예제 #1
0
def _generate_scan_jobs_for_server_scan(
    server_scan_request: ServerScanRequest,
) -> Tuple[Dict[ScanCommandType, List[ScanJob]], ScanCommandErrorsDict]:
    all_scan_jobs_per_scan_cmd: Dict[ScanCommandType, List[ScanJob]] = {}
    scan_command_errors_during_queuing = {}
    for scan_cmd in server_scan_request.scan_commands:
        implementation_cls = ScanCommandsRepository.get_implementation_cls(scan_cmd)
        scan_cmd_extra_args = server_scan_request.scan_commands_extra_arguments.get(scan_cmd)  # type: ignore

        try:
            jobs_for_scan_cmd = implementation_cls.scan_jobs_for_scan_command(
                server_info=server_scan_request.server_info, extra_arguments=scan_cmd_extra_args
            )
            all_scan_jobs_per_scan_cmd[scan_cmd] = jobs_for_scan_cmd
        # Process exceptions and instantly "complete" the scan command if the call to create the jobs failed
        except ScanCommandWrongUsageError as e:
            error = ScanCommandError(
                reason=ScanCommandErrorReasonEnum.WRONG_USAGE, exception_trace=TracebackException.from_exception(e)
            )
            scan_command_errors_during_queuing[scan_cmd] = error
        except Exception as e:
            error = ScanCommandError(
                reason=ScanCommandErrorReasonEnum.BUG_IN_SSLYZE, exception_trace=TracebackException.from_exception(e),
            )
            scan_command_errors_during_queuing[scan_cmd] = error

    return all_scan_jobs_per_scan_cmd, scan_command_errors_during_queuing
예제 #2
0
def main(args: argparse.Namespace) -> None:
    """
    Main program
    :param args: arguments from CLI
    :return: None
    """
    try:
        encrypt_ec2_class = Encryptor(
            region=args.region,
            instances=args.instances,
            key=args.key,
            discard_source=args.discard_source_volume,
            discard_snapshot=args.discard_snapshot,
            after_start=args.after_start,
            force_stop=args.force_stop,
            one_key=args.one_key)

        if args.disable_async:
            encrypt_ec2_class.start_encryption()
        else:
            encrypt_ec2_class.start_encryptions()

    except (EndpointConnectionError, ValueError) as error:
        LOGGER.error(f'Problem with your AWS region ? ({error})')
        print("".join(TracebackException.from_exception(error).format()))
        sys.exit(1)

    except (ClientError, TypeError) as error:
        LOGGER.error(f'Problem with the instance ({error})')
        print("".join(TracebackException.from_exception(error).format()))
        sys.exit(1)
예제 #3
0
    def queue_scan(self, server_scan: ServerScanRequest) -> None:
        """Queue a server scan.
        """
        # Only one scan per server can be submitted
        if server_scan.server_info in self._pending_server_scan_results:
            raise ValueError(
                f"Already submitted a scan for server {server_scan.server_info.server_location}"
            )
        self._queued_server_scans.append(server_scan)
        self._pending_server_scan_results[server_scan.server_info] = {}
        self._pending_server_scan_errors[server_scan.server_info] = {}

        # Assign the server to scan to a thread pool
        server_scans_count = len(self._queued_server_scans)
        thread_pools_count = len(self._all_thread_pools)
        thread_pool_index_to_pick = server_scans_count % thread_pools_count
        thread_pool_for_server = self._all_thread_pools[
            thread_pool_index_to_pick]
        self._server_to_thread_pool[
            server_scan.server_info] = thread_pool_for_server

        # Convert each scan command within the server scan request into jobs
        for scan_cmd in server_scan.scan_commands:
            implementation_cls = ScanCommandsRepository.get_implementation_cls(
                scan_cmd)
            scan_cmd_extra_args = server_scan.scan_commands_extra_arguments.get(
                scan_cmd)  # type: ignore

            jobs_to_run = []
            try:
                jobs_to_run = implementation_cls.scan_jobs_for_scan_command(
                    server_info=server_scan.server_info,
                    extra_arguments=scan_cmd_extra_args)
            # Process exceptions and instantly "complete" the scan command if the call to create the jobs failed
            except ScanCommandWrongUsageError as e:
                error = ScanCommandError(
                    reason=ScanCommandErrorReasonEnum.WRONG_USAGE,
                    exception_trace=TracebackException.from_exception(e))
                self._pending_server_scan_errors[
                    server_scan.server_info][scan_cmd] = error
            except Exception as e:
                error = ScanCommandError(
                    reason=ScanCommandErrorReasonEnum.BUG_IN_SSLYZE,
                    exception_trace=TracebackException.from_exception(e),
                )
                self._pending_server_scan_errors[
                    server_scan.server_info][scan_cmd] = error

            # Schedule the jobs
            for job in jobs_to_run:
                future = thread_pool_for_server.submit(job.function_to_call,
                                                       *job.function_arguments)
                self._queued_future_to_server_and_scan_cmd[future] = (
                    server_scan.server_info, scan_cmd)
예제 #4
0
    def queue_scan(self, server_scan: ServerScanRequest) -> None:
        """Queue a server scan.
        """
        already_queued_server_info = {
            queued_scan.server_scan_request.server_info for queued_scan in self._queued_server_scans
        }
        # Only one scan per server can be submitted
        if server_scan.server_info in already_queued_server_info:
            raise ValueError(f"Already submitted a scan for server {server_scan.server_info.server_location}")

        # Assign the server to scan to a thread pool
        assigned_thread_pool_index = self._get_assigned_thread_pool_index()
        assigned_thread_pool = self._thread_pools[assigned_thread_pool_index]

        # Convert each scan command within the server scan request into jobs
        queued_futures_per_scan_command: Dict[ScanCommandType, Set[Future]] = {}
        scan_command_errors_during_queuing = {}
        for scan_cmd in server_scan.scan_commands:
            implementation_cls = ScanCommandsRepository.get_implementation_cls(scan_cmd)
            scan_cmd_extra_args = server_scan.scan_commands_extra_arguments.get(scan_cmd)  # type: ignore

            jobs_to_run = []
            try:
                jobs_to_run = implementation_cls.scan_jobs_for_scan_command(
                    server_info=server_scan.server_info, extra_arguments=scan_cmd_extra_args
                )
            # Process exceptions and instantly "complete" the scan command if the call to create the jobs failed
            except ScanCommandWrongUsageError as e:
                error = ScanCommandError(
                    reason=ScanCommandErrorReasonEnum.WRONG_USAGE, exception_trace=TracebackException.from_exception(e)
                )
                scan_command_errors_during_queuing[scan_cmd] = error
            except Exception as e:
                error = ScanCommandError(
                    reason=ScanCommandErrorReasonEnum.BUG_IN_SSLYZE,
                    exception_trace=TracebackException.from_exception(e),
                )
                scan_command_errors_during_queuing[scan_cmd] = error

            # Schedule the jobs
            queued_futures_per_scan_command[scan_cmd] = set()
            for job in jobs_to_run:
                future = assigned_thread_pool.submit(job.function_to_call, *job.function_arguments)
                queued_futures_per_scan_command[scan_cmd].add(future)

        # Save everything as a queued scan
        self._queued_server_scans.append(
            _QueuedServerScan(
                server_scan_request=server_scan,
                queued_scan_jobs_per_scan_command=queued_futures_per_scan_command,
                queued_on_thread_pool_at_index=assigned_thread_pool_index,
                scan_command_errors_during_queuing=scan_command_errors_during_queuing,
            )
        )
예제 #5
0
def _generate_result_for_completed_server_scan(completed_scan: _QueuedServerScan) -> ServerScanResult:
    server_scan_results: ScanCommandResultsDict = {}
    server_scan_errors: ScanCommandErrorsDict = {}

    # Group all the completed jobs per scan command
    scan_cmd_to_completed_jobs: Dict[ScanCommandType, List[CompletedScanJob]] = {
        scan_cmd: [] for scan_cmd in completed_scan.server_scan_request.scan_commands
    }
    for completed_job in completed_scan.completed_scan_jobs:
        scan_cmd_to_completed_jobs[completed_job.for_scan_command].append(completed_job)

    for scan_cmd, completed_scan_jobs in scan_cmd_to_completed_jobs.items():
        # Pass the completed scan jobs to the corresponding plugin implementation to generate a result
        scan_job_results_for_plugin = [
            ScanJobResult(_return_value=job.return_value, _exception=job.exception) for job in completed_scan_jobs
        ]
        server_info = completed_scan.server_scan_request.server_info
        plugin_implementation_cls = ScanCommandsRepository.get_implementation_cls(scan_cmd)
        try:
            result = plugin_implementation_cls.result_for_completed_scan_jobs(server_info, scan_job_results_for_plugin)
            server_scan_results[scan_cmd] = result

        # Process exceptions that may have been raised while the jobs were being completed
        except ClientCertificateRequested as e:
            error = ScanCommandError(
                reason=ScanCommandErrorReasonEnum.CLIENT_CERTIFICATE_NEEDED,
                exception_trace=TracebackException.from_exception(e),
            )
            server_scan_errors[scan_cmd] = error
        except (ConnectionToServerTimedOut, TlsHandshakeTimedOut) as e:
            error = ScanCommandError(
                reason=ScanCommandErrorReasonEnum.CONNECTIVITY_ISSUE,
                exception_trace=TracebackException.from_exception(e),
            )
            server_scan_errors[scan_cmd] = error
        except Exception as e:
            error = ScanCommandError(
                reason=ScanCommandErrorReasonEnum.BUG_IN_SSLYZE, exception_trace=TracebackException.from_exception(e),
            )
            server_scan_errors[scan_cmd] = error

    # Lastly, return the fully completed server scan
    server_scan_errors.update(completed_scan.scan_command_errors_during_queuing)
    server_scan_result = ServerScanResult(
        scan_commands_results=server_scan_results,
        scan_commands_errors=server_scan_errors,
        server_info=completed_scan.server_scan_request.server_info,
        scan_commands=completed_scan.server_scan_request.scan_commands,
        scan_commands_extra_arguments=completed_scan.server_scan_request.scan_commands_extra_arguments,
    )
    return server_scan_result
예제 #6
0
def process_dask_tb(exc):
    """Process *exc* arising from :meth:`.Reporter.get`.

    Returns a tuple with 3 elements:

    - The key of the reporting computation.
    - The info key of the reporting computation.
    - A list of traceback.FrameSummary objects, without locals, for *only*
      frames that are not internal to dask.
    """
    key = task = None  # Info about the computation that triggered *exc*
    frames = []  # Frames for an abbreviated stacktrace

    try:
        # Get a traceback with captured locals
        tbe = TracebackException.from_exception(exc, capture_locals=True)
    except Exception:
        # Some exception occurred when capturing locals; proceed without
        tbe = TracebackException.from_exception(exc)

    # Iterate over frames from the base of the stack
    # Initial frames are internal to dask
    dask_internal = True
    for frame in tbe.stack:
        if frame.name == 'execute_task':
            # Current frame is the dask internal call to execute a task
            try:
                # Retrieve information about the key/task that triggered the
                # exception. These are not the raw values of variables, but
                # their string repr().
                key = frame.locals['key']
                task = frame.locals['task']
            except (TypeError, KeyError):  # pragma: no cover
                # No locals, or 'key' or 'task' not present
                pass

            # Subsequent frames are related to the exception
            dask_internal = False

        if not dask_internal:
            # Don't display the locals when printing the traceback
            frame.locals = None

            # Store the frame for printing the traceback
            frames.append(frame)

    # Omit a few dask internal calls below execute_task
    return key, task, frames[3:]
예제 #7
0
파일: utility.py 프로젝트: zroorz/easywall
def format_exception(exc: Exception) -> str:
    """
    Convert a exception object to a readable string.

    [Data Types] str
    """
    return "".join(TracebackException.from_exception(exc).format())
예제 #8
0
def build_error(request: Request, exc: Exception) -> Response:
    tbe = TracebackException.from_exception(exc)

    return PlainTextResponse(
        status_code=500,
        content="".join(tbe.format()),
    )
def test_prefix_stack_trace_throws_correctly(keep_message, allow_list):
    """
    After logging the library continues execution by rethrowing an error. The final
    error thrown is picked up for error reporting by AML. It should be consistent
    with user's scrubbing choice. Verify that the scrubber preserves the exception type
    and correctly modifies the exception message
    """
    file = io.StringIO()

    message = "This is the original exception message"
    e_type = ValueError

    @prefix_stack_trace(file, keep_message=keep_message, allow_list=allow_list)
    def function():
        raise e_type(message)

    with pytest.raises(e_type) as info:
        function()

    if keep_message is True or is_exception_allowed(
        TracebackException.from_exception(info.value), allow_list
    ):
        assert message in str(info.value)
    else:
        assert SCRUB_MESSAGE in str(info.value)

    assert PREFIX in str(info.value)
    assert info.type == e_type
예제 #10
0
def wrap_process(job, messages, function, arguments):
    result = {"started": dt.now()}
    try:
        messages.put(
            message("{}, {}".format(*job), "Started.", result["started"]))
        for notification in function(**arguments):
            messages.put(message("{}, {}".format(*job), notification))
    except Exception as e:
        messages.put(
            message(
                "{}, {}".format(*job),
                "\n  "
                "  ".join(TracebackException.from_exception(e).format()),
            ))
        messages.put(message("{}, {}".format(*job), "Failed."))
        result["error"] = e
    finally:
        result["finished"] = dt.now()
        duration = result["finished"] - result["started"]
        hours = floor(duration.seconds / 3600)
        minutes = floor(duration.seconds / 60) - hours * 60
        seconds = duration.total_seconds() - hours * 3600 - minutes * 60
        messages.put(
            message(
                "{}, {}".format(*job),
                "Finished in {}{}{}".format(
                    "{}h, ".format(hours) if hours else "",
                    "{}m and ".format(minutes) if minutes or hours else "",
                    "{:.3f}s.".format(seconds),
                ),
                result["finished"],
            ))
예제 #11
0
    async def handle_failure(self, supervisor, child: PID,
                             rs_stats: RestartStatistics, reason: Exception,
                             message: Any):
        directive = self._decider(child, reason)
        exp = "".join(TracebackException.from_exception(reason).format())

        if directive == SupervisorDirective.Resume:
            self._logger.info(
                f'Resuming {child.to_short_string()} Reason {exp}')
            await supervisor.resume_children(child)
        elif directive == SupervisorDirective.Restart:
            if self.should_stop(rs_stats):
                self._logger.info(
                    f'Stopping {child.to_short_string()} Reason {exp}')
                await supervisor.stop_children(*supervisor.children())
            else:
                self._logger.info(
                    f'Restarting {child.to_short_string()} Reason {exp}')
                await supervisor.restart_children(reason,
                                                  supervisor.children())
        elif directive == SupervisorDirective.Stop:
            self._logger.info(
                f'Stopping {child.to_short_string()} Reason {exp}')
            await supervisor.stop_children(child)

        elif directive == SupervisorDirective.Escalate:
            await supervisor.escalate_failure(reason, message)
        else:
            raise ValueError('Argument Out Of Range')
예제 #12
0
def _generate_scan_jobs_for_server_scan(
    server_scan_request: ServerScanRequest,
    server_connectivity_result: ServerTlsProbingResult,
) -> Tuple[Dict[ScanCommand, List[ScanJob]], Dict[ScanCommand,
                                                  ScanCommandAttempt]]:
    all_scan_jobs_per_scan_cmd: Dict[ScanCommand, List[ScanJob]] = {}
    scan_command_errors_during_queuing: Dict[ScanCommand,
                                             ScanCommandAttempt] = {}
    for scan_cmd in server_scan_request.scan_commands:
        implementation_cls = ScanCommandsRepository.get_implementation_cls(
            scan_cmd)
        scan_cmd_extra_args = getattr(
            server_scan_request.scan_commands_extra_arguments, scan_cmd, None)

        try:
            jobs_for_scan_cmd = implementation_cls.scan_jobs_for_scan_command(
                server_info=ServerConnectivityInfo(
                    server_location=server_scan_request.server_location,
                    network_configuration=server_scan_request.
                    network_configuration,
                    tls_probing_result=server_connectivity_result,
                ),
                extra_arguments=scan_cmd_extra_args,
            )
            all_scan_jobs_per_scan_cmd[scan_cmd] = jobs_for_scan_cmd

        # Process exceptions and instantly "complete" the scan command if the call to create the jobs failed
        except ScanCommandWrongUsageError as e:
            scan_command_attempt_cls = get_scan_command_attempt_cls(scan_cmd)
            errored_attempt = scan_command_attempt_cls(
                status=ScanCommandAttemptStatusEnum.ERROR,
                error_reason=ScanCommandErrorReasonEnum.WRONG_USAGE,
                error_trace=TracebackException.from_exception(e),
                result=None,
            )
            scan_command_errors_during_queuing[scan_cmd] = errored_attempt
        except Exception as e:
            scan_command_attempt_cls = get_scan_command_attempt_cls(scan_cmd)
            errored_attempt = scan_command_attempt_cls(
                status=ScanCommandAttemptStatusEnum.ERROR,
                error_reason=ScanCommandErrorReasonEnum.BUG_IN_SSLYZE,
                error_trace=TracebackException.from_exception(e),
                result=None,
            )
            scan_command_errors_during_queuing[scan_cmd] = errored_attempt

    return all_scan_jobs_per_scan_cmd, scan_command_errors_during_queuing
예제 #13
0
    def __build_node_metadata(self, initial_node_name):
        self.__reset()
        self.__nodes_that_never_ran.add(initial_node_name)

        while not QUIT_EVENT.is_set():
            jobmsg = _("{b} ({n} nodes, {r} reactors, {e} runs)").format(
                b=bold(_("running metadata reactors")),
                n=len(self.__nodes_that_never_ran) +
                len(self.__nodes_that_ran_at_least_once),
                r=len(self.__reactor_changes),
                e=self.__reactors_run,
            )
            try:
                with io.job(jobmsg):
                    # Control flow here is a bit iffy. The functions in this block often raise
                    # _StartOver in order to aggressively process new nodes first etc.
                    # Each method represents a distinct stage of metadata processing that checks
                    # for nodes in certain states as described below.

                    # This checks for newly discovered nodes that haven't seen any processing at
                    # all so far. It is important that we run them as early as possible, so their
                    # static metadata becomes available to other nodes and we recursively discover
                    # additional nodes as quickly as possible.
                    self.__run_new_nodes()
                    # At this point, we have run all relevant nodes at least once.

                    # Nodes become "triggered" when they previously looked something up from a
                    # different node and that second node changed. In this method, we try to figure
                    # out if the change on the node we depend on actually has any effect on the
                    # depending node.
                    self.__run_triggered_nodes()

                    # In this stage, we run all unstable nodes to the point where everything is
                    # stable again, except for those reactors that depend on other nodes.
                    self.__run_unstable_nodes()

                    # The final step is to make sure nothing changes when we run reactors with
                    # dependencies on other nodes. If anything changes, we need to start over so
                    # local-only reactors on a node can react to changes caused by reactors looking
                    # at other nodes.
                    self.__run_nodes_with_deps()

                    # if we get here, we're done!
                    break

            except _StartOver:
                continue

        if self.__keyerrors and not QUIT_EVENT.is_set():
            msg = _("These metadata reactors raised a KeyError "
                    "even after all other reactors were done:")
            for source, exc in sorted(self.__keyerrors.items()):
                node_name, reactor = source
                msg += f"\n\n  {node_name} {reactor}\n\n"
                for line in TracebackException.from_exception(exc).format():
                    msg += "    " + line
            raise MetadataPersistentKeyError(msg)

        io.debug("metadata generation for selected nodes finished")
예제 #14
0
 def exec(self):
     command = getattr(self, '_command_{}'.format(self.args.command))
     logging.info('Starting "{}" command...'.format(self.args.command))
     try:
         command()
     except Exception as e:
         tbe = TracebackException.from_exception(e)
         logging.critical(' '.join(list(tbe.format())))
         raise e
     logging.info('All done - terminating')
예제 #15
0
 def __formatError(self, req: Request, ex: Exception) -> Iterator[XMLNode]:
     '''Yields HTML informing the user of the given exception.
     '''
     yield xhtml.p(
         class_='notice')['An error occurred while generating this page.']
     if req.displayTracebacks:
         tb = TracebackException.from_exception(ex)
         yield xhtml.pre[tb.format()]
     else:
         yield xhtml.p['Details were written to the server log.']
예제 #16
0
def format_exception(e):
    """
    Convert an exception to a human-readable message and stacktrace
    """
    tb = TracebackException.from_exception(e)
    stack = []
    for raw in tb.format():
        stack += [s.strip() for s in raw.split("\n")]
    msg = "(%s) %s" % ((type(e).__name__, e))
    trace = filter(lambda s: s, map(indent, [s for s in stack[:-1]]))
    return "{}\n".format(msg) + "\n".join(trace)
예제 #17
0
파일: handle.py 프로젝트: edoakes/lfd2-bot
async def handle(context: Optional[Context], exception: BaseException):
    if isinstance(exception, UsageException):
        await exception.notice()
        return

    if isinstance(exception.__cause__, UsageException):
        await exception.__cause__.notice()
        return

    if exception.__cause__ is not None:
        trace = TracebackException.from_exception(exception.__cause__)
    else:
        trace = TracebackException.from_exception(exception)

    stack = "".join(trace.format())
    if context is not None:
        await context.send("There was an error during your command :worried:")
        logger.error('Command: "%s"\n%s', context.message.content, stack)
        return

    logger.error("Command: <unkown>\n%s", stack)
예제 #18
0
 async def _generate_log(error_id: str, request: Request,
                         exc: Exception) -> None:
     traceback_exception = TracebackException.from_exception(exc)
     logger.error(msg=str(exc),
                  extra={
                      'uuid': error_id,
                      'status_code': HTTP_500_INTERNAL_SERVER_ERROR,
                      'request.method': request.method,
                      'request.path': request.url.path,
                      'request.headers': dict(request.headers),
                      'traceback': ''.join(traceback_exception.format())
                  })
예제 #19
0
    def __str__(self):
        from traceback import (
            TracebackException,
            format_exception_only,
            format_list,
        )

        # Move the cause to a non-private attribute
        self.cause = self.__cause__

        # Suppress automatic printing of the cause
        self.__cause__ = None

        info = None  # Information about the call that triggered the exception
        frames = []  # Frames for an abbreviated stacktrace
        dask_internal = True  # Flag if the frame is internal to dask

        # Iterate over frames from the base of the stack
        tb = TracebackException.from_exception(self.cause, capture_locals=True)
        for frame in tb.stack:
            if frame.name == 'execute_task':
                # Current frame is the dask internal call to execute a task

                # Retrieve information about the key/task that triggered the
                # exception. These are not the raw values of variables, but
                # their string repr().
                info = {name: frame.locals[name] for name in ('key', 'task')}

                # Remaining frames are related to the exception
                dask_internal = False

            if not dask_internal:
                # Don't display the locals when printing the traceback
                frame.locals = None

                # Store the frame for printing the traceback
                frames.append(frame)

        # Assemble the exception printout

        # Reporter information for debugging
        lines = [
            'when computing {key}, using\n\n{task}\n\n'.format(**info),
            'Use Reporter.describe(...) to trace the computation.\n\n',
            'Computation traceback:\n',
        ]
        # Traceback; omitting a few dask internal calls below execute_task
        lines.extend(format_list(frames[3:]))
        # Type and message of the original exception
        lines.extend(format_exception_only(self.cause.__class__, self.cause))

        return ''.join(lines)
예제 #20
0
 def when_calling_endpoint(self):
     method = self.request_data['method'].lower()
     path = self.request_data['endpoint']
     data = self.request_data.get('body')
     api = getattr(self._client, method)
     try:
         self.actual_response = api(
             path=path,
             data=json_stringify(data),
             content_type='application/json',
         )
     except Exception as e:
         self.actual_exception = TracebackException.from_exception(e)
예제 #21
0
파일: utils.py 프로젝트: danfossi/FQM
def log_error(error):
    ''' Utility to log error to `errors.txt` file.

    Parameters
    ----------
        error: Error instance
            error that we want to log.
    '''
    log_file = absolute_path('errors.log')

    not os.path.isfile(log_file) and os.system(f'touch {log_file}')
    with open(log_file, 'a') as file:
        file.write(f'{"#" * 5} {datetime.now()} {"#" * 5}\n')
        file.write(''.join(TracebackException.from_exception(error).format()))
예제 #22
0
 def __str__(self) -> str:
     result = []
     if self.error:
         result.append(f"{self.error}")
     if self.container is not None:
         result.append(
             f"in {self.container.__class__.__module__}.{self.container.__class__.__name__}"
         )
     if self.ctx is not None:
         result.append(f"\nCONTEXT: {self.ctx!r}")
     if self.exc is not None and isinstance(self.exc, Exception):
         _exc = ''.join(
             TracebackException.from_exception(self.exc).format())
         result.append(f"\nTRACE: {_exc}")
     return ' '.join(result)
예제 #23
0
        def callback_wrapper(msg):
            msg = json.loads(msg)
            try:
                ret_value = callback(msg) or ""
                resp = {"type": msg["type"], "status": 200, "data": ret_value}
            except Exception as e:
                resp = {
                    "type": msg["type"],
                    "status": e.code if isinstance(e, WsError) else 500,
                    "data": str(e)
                }
                if e.__cause__:
                    t = TracebackException.from_exception(e.__cause__)
                    resp["traceback"] = "".join(t.format())

            return json.dumps(resp).encode("utf-8")
예제 #24
0
    def exception(self, exc):
        """
        Helper to report an exception traceback from its object
        """
        traceback = TracebackException.from_exception(exc)
        formatted = ''.join(traceback.format())
        log.error(formatted)

        if formatted in self._last_exceptions:
            log.debug('Exception already logged')
            return

        # Retain the formatted exception in memory to avoid looping
        self._last_exceptions.append(formatted)
        self._loop.call_later(self.EXCEPTION_TTL, self._forget_exception,
                              formatted)

        self.send_report('exception', {'traceback': formatted})
예제 #25
0
def log(title, message="", exception=None):
    """
    Logs a message, which will show up in CloudWatch Logs.

    """

    parts = [str(title)]
    for key, value in log.context.items():
        parts.append(key)
        parts.append(value)
    if message != "":
        parts.append(str(message))
    value = " ".join(parts)
    if exception:
        value += "\n"
        value += "\n".join(
            TracebackException.from_exception(exception).format())
    print(value.replace("\n", "\r"))
예제 #26
0
def run_put_result_in_queue(func: Callable, gui_queue: queue.Queue, *args,
                            **kwargs):
    """
    A worker thread that communicates with the GUI through a queue
    This thread can block for as long as it wants and the GUI will not be affected
    :param func: Function to run
    :param gui_queue: (queue.Queue) Queue to communicate back to GUI that task is completed
    :return:
    """
    try:
        value = func(*args, **kwargs)
        result = Result(True, value=value)
        gui_queue.put(result)  # put a message into queue for GUI
    except Exception as e:
        te = TracebackException.from_exception(e)
        result = Result(False, exception=te)
        gui_queue.put(result)
        raise e
예제 #27
0
    def exception(self, exc):
        """
        Helper to report an exception traceback from its object
        """
        traceback = TracebackException.from_exception(exc)
        formatted = ''.join(traceback.format())
        log.error(formatted)

        if formatted in self._last_exceptions:
            log.debug('Exception already logged')
            return

        # Retain the formatted exception in memory to avoid looping
        self._last_exceptions.append(formatted)
        self._loop.call_later(
            self.EXCEPTION_TTL, self._forget_exception, formatted
        )

        self.send_report('exception', {'traceback': formatted})
예제 #28
0
        def server_connectivity_test_error_callback(
            server_scan_request: ServerScanRequest, connectivity_error: ConnectionToServerFailed
        ) -> None:
            for inner_observer in self._observers:
                inner_observer.server_connectivity_test_error(server_scan_request, connectivity_error)

            # Since the server is not reachable, there is nothing else to do
            server_scan_results_queue.put(
                ServerScanResult(
                    uuid=server_scan_request.uuid,
                    server_location=server_scan_request.server_location,
                    network_configuration=server_scan_request.network_configuration,
                    connectivity_status=ServerConnectivityStatusEnum.ERROR,
                    connectivity_error_trace=TracebackException.from_exception(connectivity_error),
                    connectivity_result=None,
                    scan_status=ServerScanStatusEnum.ERROR_NO_CONNECTIVITY,
                    scan_result=None,
                )
            )
예제 #29
0
def print_prefixed_stack_trace_and_raise(
    file: TextIO = sys.stderr,
    prefix: str = PREFIX,
    scrub_message: str = SCRUB_MESSAGE,
    keep_message: bool = False,
    allow_list: list = [],
    add_timestamp: bool = False,
    err: Optional[BaseException] = None,
) -> None:
    """
    Print the current exception and stack trace to `file` (usually client
    standard error), prefixing the stack trace with `prefix`.
    Args:
        keep_message (bool): if True, don't scrub message. If false, scrub (unless
            allowed).
        allow_list (list): exception allow_list. Ignored if keep_message is True. If
            empty all messages will be srubbed.
        err: the error that was thrown. None accepted for backwards compatibility.
    """
    if err is None:
        err = sys.exc_info()[1]
    scrubbed_err = scrub_exception(err, scrub_message, prefix, keep_message,
                                   allow_list)

    tb_exception = TracebackException.from_exception(
        scrubbed_err)  # type: ignore

    for execution in tb_exception.format():
        if "return function(*func_args, **func_kwargs)" in execution:
            # Do not show the stack trace for our decorator.
            continue
        for line in execution.splitlines():
            if add_timestamp:
                current_time = time.strftime("%Y-%m-%d %H:%M:%S",
                                             time.localtime())
                print(f"{prefix} {current_time} {line}", file=file)
            else:
                print(f"{prefix} {line}", file=file)

    raise scrubbed_err  # type: ignore
예제 #30
0
    async def dispatch(self, request: Request,
                       call_next: RequestResponseEndpoint) -> Response:
        try:
            response = await call_next(request)
        except Exception as e:
            tb = TracebackException.from_exception(e)
            logger.error(msg=str(e),
                         extra={
                             'status_code': HTTP_500_INTERNAL_SERVER_ERROR,
                             'request.method': request.method,
                             'request.path': request.url.path,
                             'request.headers': dict(request.headers),
                             'traceback': ''.join(tb.format())
                         })
            return JSONResponse(
                {
                    'detail': [{
                        'type': f'Unexpected error: [{type(e).__name__}]',
                        'msg': str(e)
                    }]
                },
                status_code=HTTP_400_BAD_REQUEST)

        return response
예제 #31
0
def is_exception_allowed(exception: Union[BaseException, TracebackException],
                         allow_list: list) -> bool:
    """
    Check if message is allowed, either by `allow_list`, or `default_allow_list`.

    Args:
        exception (TracebackException): the exception to test
        allow_list (list): list of regex expressions. If any expression matches
            the exception name or message, it will be considered allowed.

    Returns:
        bool: True if message is allowed, False otherwise.
    """
    if not isinstance(exception, TracebackException):
        exception = TracebackException.from_exception(exception)

    # empty list means all messages are allowed
    for expr in allow_list + default_allow_list:
        if re.search(expr, getattr(exception, "_str", ""), re.IGNORECASE):
            return True
        if re.search(expr, getattr(exception.exc_type, "__name__", ""),
                     re.IGNORECASE):
            return True
    return False
예제 #32
0
def _retrieve_and_analyze_http_response(server_info: ServerConnectivityInfo) -> HttpHeadersScanResult:
    # Send HTTP requests until we no longer received an HTTP redirection, but allow only 4 redirections max
    _logger.info(f"Retrieving HTTP headers from {server_info}")
    redirections_count = 0
    next_location_path: Optional[str] = "/"
    http_error_trace = None

    while next_location_path and redirections_count < 4:
        _logger.info(f"Sending HTTP request to {next_location_path}")
        http_path_redirected_to = next_location_path

        # Perform the TLS handshake
        ssl_connection = server_info.get_preconfigured_tls_connection()
        ssl_connection.connect()

        try:
            # Send an HTTP GET request to the server
            ssl_connection.ssl_client.write(
                HttpRequestGenerator.get_request(
                    host=server_info.network_configuration.tls_server_name_indication, path=next_location_path
                )
            )
            http_response = HttpResponseParser.parse_from_ssl_connection(ssl_connection.ssl_client)

        except (OSError, NotAValidHttpResponseError, SslError) as e:
            # The server closed/rejected the connection, or didn't return a valid HTTP response
            http_error_trace = TracebackException.from_exception(e)

        finally:
            ssl_connection.close()

        if http_error_trace:
            break

        # Handle redirection if there is one
        next_location_path = _detect_http_redirection(
            http_response=http_response,
            server_host_name=server_info.network_configuration.tls_server_name_indication,
            server_port=server_info.server_location.port,
        )
        redirections_count += 1

    # Prepare the results
    initial_http_request = HttpRequestGenerator.get_request(
        host=server_info.network_configuration.tls_server_name_indication, path="/"
    ).decode("ascii")

    if http_error_trace:
        # If the server errored when receiving an HTTP request, return the error as the result
        return HttpHeadersScanResult(
            http_request_sent=initial_http_request,
            http_error_trace=http_error_trace,
            http_path_redirected_to=None,
            strict_transport_security_header=None,
            expect_ct_header=None,
        )
    else:
        # If no HTTP error happened, parse and return each header
        return HttpHeadersScanResult(
            http_request_sent=initial_http_request,
            http_path_redirected_to=http_path_redirected_to,
            http_error_trace=None,
            strict_transport_security_header=_parse_hsts_header_from_http_response(http_response),
            expect_ct_header=_parse_expect_ct_header_from_http_response(http_response),
        )