Ejemplo n.º 1
0
    def test_start_catches_base_exceptions(self):
        worker1 = self.create_worker()
        worker1.blueprint.state = RUN
        stc = MockStep()
        stc.start.side_effect = WorkerTerminate()
        worker1.steps = [stc]
        worker1.start()
        stc.start.assert_called_with(worker1)
        assert stc.terminate.call_count

        worker2 = self.create_worker()
        worker2.blueprint.state = RUN
        sec = MockStep()
        sec.start.side_effect = WorkerShutdown()
        sec.terminate = None
        worker2.steps = [sec]
        worker2.start()
        assert sec.stop.call_count
Ejemplo n.º 2
0
 def start(self):
     blueprint = self.blueprint
     while blueprint.state not in STOP_CONDITIONS:
         maybe_shutdown()
         if self.restart_count:
             try:
                 self._restart_state.step()
             except RestartFreqExceeded as exc:
                 crit('Frequent restarts detected: %r', exc, exc_info=1)
                 sleep(1)
         self.restart_count += 1
         try:
             blueprint.start(self)
         except self.connection_errors as exc:
             # If we're not retrying connections, we need to properly shutdown or terminate
             # the Celery main process instead of abruptly aborting the process without any cleanup.
             is_connection_loss_on_startup = self.restart_count == 0
             connection_retry_type = self._get_connection_retry_type(
                 is_connection_loss_on_startup)
             connection_retry = self.app.conf[connection_retry_type]
             if not connection_retry:
                 crit(
                     f"Retrying to {'establish' if is_connection_loss_on_startup else 're-establish'} "
                     f"a connection to the message broker after a connection loss has "
                     f"been disabled (app.conf.{connection_retry_type}=False). Shutting down..."
                 )
                 raise WorkerShutdown(1) from exc
             if isinstance(exc, OSError) and exc.errno == errno.EMFILE:
                 crit("Too many open files. Aborting...")
                 raise WorkerTerminate(1) from exc
             maybe_shutdown()
             if blueprint.state not in STOP_CONDITIONS:
                 if self.connection:
                     self.on_connection_error_after_connected(exc)
                 else:
                     self.on_connection_error_before_connected(exc)
                 self.on_close()
                 blueprint.restart(self)
Ejemplo n.º 3
0
def maybe_shutdown():
    if should_stop is not None and should_stop is not False:
        raise WorkerShutdown(should_stop)
    elif should_terminate is not None and should_terminate is not False:
        raise WorkerTerminate(should_terminate)
Ejemplo n.º 4
0
def asynloop(obj,
             connection,
             consumer,
             blueprint,
             hub,
             qos,
             heartbeat,
             clock,
             hbrate=2.0,
             RUN=RUN):
    """Non-blocking event loop consuming messages until connection is lost,
    or shutdown is requested."""
    update_qos = qos.update
    hbtick = connection.heartbeat_check
    errors = connection.connection_errors
    heartbeat = connection.get_heartbeat_interval()  # negotiated

    on_task_received = obj.create_task_handler()

    if heartbeat and connection.supports_heartbeats:
        hub.call_repeatedly(heartbeat / hbrate, hbtick, hbrate)

    consumer.on_message = on_task_received
    consumer.consume()
    obj.on_ready()
    obj.controller.register_with_event_loop(hub)
    obj.register_with_event_loop(hub)

    # did_start_ok will verify that pool processes were able to start,
    # but this will only work the first time we start, as
    # maxtasksperchild will mess up metrics.
    if not obj.restart_count and not obj.pool.did_start_ok():
        raise WorkerLostError('Could not start worker processes')

    # consumer.consume() may have prefetched up to our
    # limit - drain an event so we are in a clean state
    # prior to starting our event loop.
    if connection.transport.driver_type == 'amqp':
        hub.call_soon(_quick_drain, connection)

    # FIXME: Use loop.run_forever
    # Tried and works, but no time to test properly before release.
    hub.propagate_errors = errors
    loop = hub.create_loop()

    try:
        while blueprint.state == RUN and obj.connection:
            # shutdown if signal handlers told us to.
            should_stop, should_terminate = (
                state.should_stop,
                state.should_terminate,
            )
            # False == EX_OK, so must use is not False
            if should_stop is not None and should_stop is not False:
                raise WorkerShutdown(should_stop)
            elif should_terminate is not None and should_stop is not False:
                raise WorkerTerminate(should_terminate)

            # We only update QoS when there is no more messages to read.
            # This groups together qos calls, and makes sure that remote
            # control commands will be prioritized over task messages.
            if qos.prev != qos.value:
                update_qos()

            try:
                next(loop)
            except StopIteration:
                loop = hub.create_loop()
    finally:
        try:
            hub.reset()
        except Exception as exc:
            error(
                'Error cleaning up after event loop: %r',
                exc,
                exc_info=1,
            )
Ejemplo n.º 5
0
def static_analysis_task(apk_name, username):
    try:
        logger.info("Retrieving APK.")
        urlretrieve(config.WEBAPP_URL + '/apk/' + apk_name,
                    INPUT_APK_DIR + apk_name + ".apk")

        logger.info('Disassembling APK.')
        crashed_on_disass = False
        try:
            disassembled_path = disassemble_apk(apk_name)
        except:
            crashed_on_disass = True
            static_analysis_results = StaticAnalysisResults(
                apk_name, None, None, None, [])

        internet_perm = True
        if not crashed_on_disass:
            logger.info('Disassembled APK. Now statically analysing app.')
            package, internet_perm = requires_internet_permission(
                disassembled_path)
            if internet_perm:
                static_analysis_results = StaticAnalyzer().analyze_statically(
                    disassembled_path, apk_name)
                apk_analysis = ApkAnalysis(apk_name)
                activities = apk_analysis.get_all_activities_results()

                static_analysis_results = as_combined_static_analysis_results(
                    static_analysis_results,
                    static_analysis_results.result_list + activities)
            else:
                static_analysis_results = StaticAnalysisResults(
                    apk_name, package, None, None, [])

        logger.info(
            'Analysed app statically, now getting scenarios for dynamic analysis and sending html.'
        )
        current_user = Session.query(User).filter(
            User.username == username).one()
        html = templates_service.render_static_analysis_results(
            static_analysis_results,
            current_user,
            crashed_on_disass=crashed_on_disass,
            internet_perm=internet_perm)
        send_html(html, username)

        if crashed_on_disass:
            logger.error(
                "Static analysis crashed during disassembly of APK. No further analysis"
            )
            return

        if not internet_perm:
            logger.info(
                "App does not request internet permission. No further static dynamic analysis"
            )
            return

        scenario_datas = scenario_service.get_all_of_user(
            static_analysis_results, current_user)
        if not scenario_datas:
            logger.info('No scenarios for dynamic analysis.')
            return

        html = templates_service.render_scenario_datas(scenario_datas)
        send_html(html, username)

        logger.info('Sent html to socket. Now generating smart input for app.')
        smart_input_results = apk_analysis.get_smart_input()
        logger.info('Generated smart input for app.')

        if scenario_service.has_activities_to_select(static_analysis_results,
                                                     current_user):
            logger.info(
                "Saving static analysis and smart input results for activity selection later."
            )
            Session.add(static_analysis_results)
            smart_input_results_json = {
                clazz: [tf.__json__() for tf in tfs]
                for clazz, tfs in smart_input_results.iteritems()
            }
            smart_input_results_db = SmartInputResult(
                apk_filename=apk_name, result=smart_input_results_json)
            Session.add(smart_input_results_db)
            Session.commit()

        logger.info('Starting dynamic analysis tasks.')
        for scenario_data in scenario_datas:
            try:
                if not scenario_data.is_selected_activities():
                    celery.send_task('dynamic_analysis_task',
                                     args=[
                                         static_analysis_results.apk_filename,
                                         scenario_data, smart_input_results,
                                         username
                                     ])
            except Exception:
                logger.exception("Can't send dynamic analysis tasks")
                raise WorkerTerminate()

    except WorkerTerminate as e:
        raise e
    except Exception as e:
        logger.exception("Static analysis crashed")
        if not 'current_user' in locals():
            current_user = Session.query(User).filter(
                User.username == username).one()
        if not 'static_analysis_results' in locals():
            static_analysis_results = StaticAnalysisResults(
                apk_name, None, None, None, [])
        html = templates_service.render_static_analysis_results(
            static_analysis_results, current_user, crashed=True)
        send_html(html, username)
    finally:
        if os.path.isfile(INPUT_APK_DIR + apk_name + ".apk"):
            os.remove(INPUT_APK_DIR + apk_name + ".apk")
Ejemplo n.º 6
0
def send_html(html, username):
    try:
        socketio.emit('html', {'html': html}, room=username)
    except Exception:
        logger.exception("Can't send html")
        raise WorkerTerminate()
Ejemplo n.º 7
0
def analyze_dynamically(apk_name, scenarios, smart_input_results, smart_input_assignment, socketio, current_user):
    logger.info('Setting up analysis environment.')

    emulator_id = None
    apk_path = INPUT_APK_DIR + apk_name + ".apk"
    installed = False
    failed_results = []
    global timed_out
    timed_out = False
    try:
        # ==== Setup ====
        emulator_id = DeviceManager.get_emulator(scenarios.min_sdk_version, scenarios.target_sdk_version)

        start_adb_as_root(emulator_id)
        install_apk(emulator_id, apk_path)
        installed = True

        start_app(emulator_id, scenarios.package)

        # skip possible welcome screens at first time
        press_enter(emulator_id)
        press_enter(emulator_id)
        # ==== ===== ====

        time.sleep(1)

        logger.info('Analysing activies of scenario.')
        run_scenarios(
            scenarios,
            smart_input_results,
            smart_input_assignment,
            emulator_id,
            socketio,
            current_user)

    except SoftTimeLimitExceeded:
        logger.exception("Timed out")
        failed_results = [LogAnalysisResult(DynamicAnalysisResult(s, timed_out=True)) for s in scenarios.scenario_list]
        timed_out = True
    except WorkerTerminate as e:
        raise e
    except Exception:
        logger.exception("Crash during setup")
        failed_results = [LogAnalysisResult(DynamicAnalysisResult(s, crashed_on_setup=True))
                          for s in scenarios.scenario_list]
    finally:
        # ==== Shutdown ====
        if installed:
            try:
                uninstall_apk(emulator_id, scenarios.package)
            except Exception:
                pass

        DeviceManager.shutdown_emulator()

        time.sleep(5) # wait for emulator to be shut down
        # ==== ======== ====

    if failed_results:
        html = templates_service.render_log_analysis_results(failed_results)
        try:
            socketio.emit('html', {'html': html}, room=current_user.username)
        except Exception:
            logger.exception("Can't send html")
            raise WorkerTerminate()

    return timed_out
Ejemplo n.º 8
0
 def test_apply_target__raises_WorkerTerminate(self):
     target = Mock(name='target')
     target.side_effect = WorkerTerminate()
     with self.assertRaises(WorkerTerminate):
         apply_target(target)
def maybe_shutdown():
    """Shutdown if flags have been set."""
    if should_stop is not None and should_stop is not False:
        raise WorkerShutdown(should_stop)
    elif should_terminate is not None and should_terminate is not False:
        raise WorkerTerminate(should_terminate)
Ejemplo n.º 10
0
def maybe_shutdown():
    if should_stop:
        raise WorkerShutdown()
    elif should_terminate:
        raise WorkerTerminate()
Ejemplo n.º 11
0
def asynloop(obj,
             connection,
             consumer,
             blueprint,
             hub,
             qos,
             heartbeat,
             clock,
             hbrate=2.0,
             RUN=RUN):
    """Non-blocking event loop consuming messages until connection is lost,
    or shutdown is requested."""
    update_qos = qos.update
    readers, writers = hub.readers, hub.writers
    hbtick = connection.heartbeat_check
    errors = connection.connection_errors
    hub_add, hub_remove = hub.add, hub.remove

    on_task_received = obj.create_task_handler()

    if heartbeat and connection.supports_heartbeats:
        hub.call_repeatedly(heartbeat / hbrate, hbtick, hbrate)

    consumer.callbacks = [on_task_received]
    consumer.consume()
    obj.on_ready()
    obj.controller.register_with_event_loop(hub)
    obj.register_with_event_loop(hub)

    # did_start_ok will verify that pool processes were able to start,
    # but this will only work the first time we start, as
    # maxtasksperchild will mess up metrics.
    if not obj.restart_count and not obj.pool.did_start_ok():
        raise WorkerLostError('Could not start worker processes')

    # FIXME: Use loop.run_forever
    # Tried and works, but no time to test properly before release.
    hub.propagate_errors = errors
    loop = hub.create_loop()

    try:
        while blueprint.state == RUN and obj.connection:
            # shutdown if signal handlers told us to.
            if state.should_stop:
                raise WorkerShutdown()
            elif state.should_terminate:
                raise WorkerTerminate()

            # We only update QoS when there is no more messages to read.
            # This groups together qos calls, and makes sure that remote
            # control commands will be prioritized over task messages.
            if qos.prev != qos.value:
                update_qos()

            try:
                next(loop)
            except StopIteration:
                loop = hub.create_loop()
    finally:
        try:
            hub.reset()
        except Exception as exc:
            error(
                'Error cleaning up after event loop: %r',
                exc,
                exc_info=1,
            )