Example #1
0
    def test_poll_result(self):
        with self._result_context() as (results, backend, Message):
            # FFWD's to the latest state.
            state_messages = [
                Message(status=states.RECEIVED, seq=1),
                Message(status=states.STARTED, seq=2),
                Message(status=states.FAILURE, seq=3),
            ]
            for state_message in state_messages:
                results.put(state_message)
            r1 = backend.get_task_meta(uuid())
            self.assertDictContainsSubset({"status": states.FAILURE, "seq": 3}, r1, "FFWDs to the last state")

            # Caches last known state.
            results.put(Message())
            tid = uuid()
            backend.get_task_meta(tid)
            self.assertIn(tid, backend._cache, "Caches last known state")

            self.assertTrue(state_messages[-1].requeued)

            # Returns cache if no new states.
            results.queue.clear()
            assert not results.qsize()
            backend._cache[tid] = "hello"
            self.assertEqual(backend.get_task_meta(tid), "hello", "Returns cache if no new states")
Example #2
0
    def test_on_timeout(self):

        class MockLogger(object):

            def __init__(self):
                self.warnings = []
                self.errors = []

            def warning(self, msg, *args, **kwargs):
                self.warnings.append(msg % args)

            def error(self, msg, *args, **kwargs):
                self.errors.append(msg % args)

        tw = TaskRequest(mytask.name, uuid(), [1], {"f": "x"})
        tw.logger = MockLogger()
        tw.on_timeout(soft=True, timeout=1337)
        self.assertIn("Soft time limit (1337s) exceeded",
                      tw.logger.warnings[0])
        tw.on_timeout(soft=False, timeout=1337)
        self.assertIn("Hard time limit (1337s) exceeded", tw.logger.errors[0])
        self.assertEqual(mytask.backend.get_status(tw.id),
                         states.FAILURE)

        mytask.ignore_result = True
        try:
            tw = TaskRequest(mytask.name, uuid(), [1], {"f": "x"})
            tw.logger = MockLogger()
        finally:
            tw.on_timeout(soft=True, timeout=1336)
            self.assertEqual(mytask.backend.get_status(tw.id),
                             states.PENDING)
            mytask.ignore_result = False
Example #3
0
 def test_iter_native(self):
     backend = SimpleBackend()
     subtasks = [AsyncResult(uuid(), backend=backend)
                     for i in range(10)]
     ts = GroupResult(uuid(), subtasks)
     backend.ids = [subtask.id for subtask in subtasks]
     self.assertEqual(len(list(ts.iter_native())), 10)
Example #4
0
 def test_forget(self):
     subs = [MockAsyncResultSuccess(uuid()),
             MockAsyncResultSuccess(uuid())]
     ts = GroupResult(uuid(), subs)
     ts.forget()
     for sub in subs:
         self.assertTrue(sub.forgotten)
Example #5
0
 def test_iterate_eager(self):
     ar1 = EagerResult(uuid(), 42, states.SUCCESS)
     ar2 = EagerResult(uuid(), 42, states.SUCCESS)
     ts = GroupResult(uuid(), [ar1, ar2])
     it = iter(ts)
     self.assertEqual(it.next(), 42)
     self.assertEqual(it.next(), 42)
Example #6
0
 def test_iterate_yields(self):
     ar = MockAsyncResultSuccess(uuid())
     ar2 = MockAsyncResultSuccess(uuid())
     ts = GroupResult(uuid(), [ar, ar2])
     it = iter(ts)
     self.assertEqual(it.next(), 42)
     self.assertEqual(it.next(), 42)
Example #7
0
 def test_join_timeout(self):
     ar = MockAsyncResultSuccess(uuid())
     ar2 = MockAsyncResultSuccess(uuid())
     ar3 = AsyncResult(uuid())
     ts = GroupResult(uuid(), [ar, ar2, ar3])
     with self.assertRaises(TimeoutError):
         ts.join(timeout=0.0000001)
Example #8
0
    def test_on_failure_WorkerLostError(self):

        def get_ei():
            try:
                raise WorkerLostError('do re mi')
            except WorkerLostError:
                return ExceptionInfo()

        job = TaskRequest(
            self.mytask.name, uuid(), [1], {'f': 'x'}, app=self.app,
        )
        exc_info = get_ei()
        job.on_failure(exc_info)
        self.assertEqual(
            self.mytask.backend.get_status(job.id), states.FAILURE,
        )

        self.mytask.ignore_result = True
        exc_info = get_ei()
        job = TaskRequest(
            self.mytask.name, uuid(), [1], {'f': 'x'}, app=self.app,
        )
        job.on_failure(exc_info)
        self.assertEqual(
            self.mytask.backend.get_status(job.id), states.PENDING,
        )
    def test_poll_result(self):
        with self._result_context() as (results, backend, Message):
            tid = uuid()
            # FFWD's to the latest state.
            state_messages = [
                Message(task_id=tid, status=states.RECEIVED, seq=1),
                Message(task_id=tid, status=states.STARTED, seq=2),
                Message(task_id=tid, status=states.FAILURE, seq=3),
            ]
            for state_message in state_messages:
                results.put(state_message)
            r1 = backend.get_task_meta(tid)
            self.assertDictContainsSubset(
                {'status': states.FAILURE, 'seq': 3}, r1,
                'FFWDs to the last state',
            )

            # Caches last known state.
            tid = uuid()
            results.put(Message(task_id=tid))
            backend.get_task_meta(tid)
            self.assertIn(tid, backend._cache, 'Caches last known state')

            self.assertTrue(state_messages[-1].requeued)

            # Returns cache if no new states.
            results.queue.clear()
            assert not results.qsize()
            backend._cache[tid] = 'hello'
            self.assertEqual(
                backend.get_task_meta(tid), 'hello',
                'Returns cache if no new states',
            )
Example #10
0
 def test_with_parent(self):
     x = self.app.AsyncResult(uuid())
     x.parent = self.app.AsyncResult(uuid())
     y = result_from_tuple(x.as_tuple(), self.app)
     self.assertEqual(y, x)
     self.assertEqual(y.parent, x.parent)
     self.assertIsInstance(y.parent, AsyncResult)
Example #11
0
 def test_join_native(self):
     backend = SimpleBackend()
     subtasks = [self.app.AsyncResult(uuid(), backend=backend) for i in range(10)]
     ts = self.app.GroupResult(uuid(), subtasks)
     backend.ids = [subtask.id for subtask in subtasks]
     res = ts.join_native()
     self.assertEqual(res, list(range(10)))
    def test_get_many(self):
        b = self.create_backend(max_cached_results=10)

        tids = []
        for i in range(10):
            tid = uuid()
            b.store_result(tid, i, states.SUCCESS)
            tids.append(tid)

        res = list(b.get_many(tids, timeout=1))
        expected_results = [
            (task_id, {
                'status': states.SUCCESS,
                'result': i,
                'traceback': None,
                'task_id': task_id,
                'children': None,
            })
            for i, task_id in enumerate(tids)
        ]
        self.assertEqual(sorted(res), sorted(expected_results))
        self.assertDictEqual(b._cache[res[0][0]], res[0][1])
        cached_res = list(b.get_many(tids, timeout=1))
        self.assertEqual(sorted(cached_res), sorted(expected_results))

        # times out when not ready in cache (this shouldn't happen)
        b._cache[res[0][0]]['status'] = states.RETRY
        with self.assertRaises(socket.timeout):
            list(b.get_many(tids, timeout=0.01))

        # times out when result not yet ready
        with self.assertRaises(socket.timeout):
            tids = [uuid()]
            b.store_result(tids[0], i, states.PENDING)
            list(b.get_many(tids, timeout=0.01))
Example #13
0
 def test_with_parent(self):
     x = self.app.AsyncResult(uuid())
     x.parent = self.app.AsyncResult(uuid())
     y = from_serializable(x.serializable(), self.app)
     self.assertEqual(y, x)
     self.assertEqual(y.parent, x.parent)
     self.assertIsInstance(y.parent, AsyncResult)
Example #14
0
 def test_iterate_eager(self):
     ar1 = EagerResult(uuid(), 42, states.SUCCESS)
     ar2 = EagerResult(uuid(), 42, states.SUCCESS)
     ts = self.app.GroupResult(uuid(), [ar1, ar2])
     it = ts.iterate()
     self.assertEqual(next(it), 42)
     self.assertEqual(next(it), 42)
Example #15
0
 def test_iterate_yields(self):
     ar = MockAsyncResultSuccess(uuid(), app=self.app)
     ar2 = MockAsyncResultSuccess(uuid(), app=self.app)
     ts = self.app.GroupResult(uuid(), [ar, ar2])
     it = ts.iterate()
     self.assertEqual(next(it), 42)
     self.assertEqual(next(it), 42)
Example #16
0
    def test_on_chord_part_return(self, setresult):
        tb = CacheBackend(backend='memory://', app=self.app)

        deps = Mock()
        deps.__len__ = Mock()
        deps.__len__.return_value = 2
        setresult.restore.return_value = deps
        task = Mock()
        task.name = 'foobarbaz'
        try:
            self.app.tasks['foobarbaz'] = task
            task.request.chord = subtask(task)

            gid, res = uuid(), [AsyncResult(uuid()) for _ in range(3)]
            task.request.group = gid
            tb.on_chord_apply(gid, {}, result=res)

            self.assertFalse(deps.join_native.called)
            tb.on_chord_part_return(task)
            self.assertFalse(deps.join_native.called)

            tb.on_chord_part_return(task)
            deps.join_native.assert_called_with(propagate=True)
            deps.delete.assert_called_with()

        finally:
            self.app.tasks.pop('foobarbaz')
Example #17
0
    def test_poll_result_for_json_serializer(self):
        with self._result_context(serializer="json") as (results, backend, Message):
            tid = uuid()
            # FFWD's to the latest state.
            state_messages = [
                Message(task_id=tid, status=states.RECEIVED, seq=1),
                Message(task_id=tid, status=states.STARTED, seq=2),
                Message(
                    task_id=tid,
                    status=states.FAILURE,
                    seq=3,
                    result={"exc_type": "RuntimeError", "exc_message": "Mock"},
                ),
            ]
            for state_message in state_messages:
                results.put(state_message)
            r1 = backend.get_task_meta(tid)
            self.assertDictContainsSubset({"status": states.FAILURE, "seq": 3}, r1, "FFWDs to the last state")
            self.assertEquals(type(r1["result"]).__name__, "RuntimeError")
            self.assertEqual(str(r1["result"]), "Mock")

            # Caches last known state.
            tid = uuid()
            results.put(Message(task_id=tid))
            backend.get_task_meta(tid)
            self.assertIn(tid, backend._cache, "Caches last known state")

            self.assertTrue(state_messages[-1].requeued)

            # Returns cache if no new states.
            results.queue.clear()
            assert not results.qsize()
            backend._cache[tid] = "hello"
            self.assertEqual(backend.get_task_meta(tid), "hello", "Returns cache if no new states")
Example #18
0
    def test_iterate_respects_subpolling_interval(self):
        r1 = self.app.AsyncResult(uuid())
        r2 = self.app.AsyncResult(uuid())
        backend = r1.backend = r2.backend = Mock()
        backend.subpolling_interval = 10

        ready = r1.ready = r2.ready = Mock()

        def se(*args, **kwargs):
            ready.side_effect = KeyError()
            return False
        ready.return_value = False
        ready.side_effect = se

        x = self.app.ResultSet([r1, r2])
        with self.dummy_copy():
            with patch('celery.result.time') as _time:
                with self.assertPendingDeprecation():
                    with self.assertRaises(KeyError):
                        list(x.iterate())
                _time.sleep.assert_called_with(10)

            backend.subpolling_interval = 0
            with patch('celery.result.time') as _time:
                with self.assertPendingDeprecation():
                    with self.assertRaises(KeyError):
                        ready.return_value = False
                        ready.side_effect = se
                        list(x.iterate())
                    self.assertFalse(_time.sleep.called)
Example #19
0
 def test_iterate_raises(self):
     ar = MockAsyncResultFailure(uuid(), app=self.app)
     ts = self.app.GroupResult(uuid(), [ar])
     with self.assertPendingDeprecation():
         it = ts.iterate()
     with self.assertRaises(KeyError):
         next(it)
Example #20
0
    def test_marked_as_started(self):

        class Backend(mytask.backend.__class__):
            _started = []

            def store_result(self, tid, meta, state):
                if state == states.STARTED:
                    self._started.append(tid)

        prev, mytask.backend = mytask.backend, Backend()
        mytask.track_started = True

        try:
            tid = uuid()
            jail(tid, mytask.name, [2], {})
            self.assertIn(tid, Backend._started)

            mytask.ignore_result = True
            tid = uuid()
            jail(tid, mytask.name, [2], {})
            self.assertNotIn(tid, Backend._started)
        finally:
            mytask.backend = prev
            mytask.track_started = False
            mytask.ignore_result = False
Example #21
0
 def test_join_native_raises(self):
     ts = self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())])
     ts.iter_native = Mock()
     ts.iter_native.return_value = iter([
         (uuid(), {'status': states.FAILURE, 'result': KeyError()})
     ])
     with self.assertRaises(KeyError):
         ts.join_native(propagate=True)
Example #22
0
    def test_sets_store_errors(self):
        self.mytask.ignore_result = True
        job = TaskRequest(self.mytask.name, uuid(), [1], {"f": "x"}, app=self.app)
        self.assertFalse(job.store_errors)

        self.mytask.store_errors_even_if_ignored = True
        job = TaskRequest(self.mytask.name, uuid(), [1], {"f": "x"}, app=self.app)
        self.assertTrue(job.store_errors)
Example #23
0
 def test_save_restore_delete_group(self):
     tid = uuid()
     tsr = GroupResult(tid, [AsyncResult(uuid()) for _ in range(10)])
     self.b.save_group(tid, tsr)
     self.b.restore_group(tid)
     self.assertEqual(self.b.restore_group(tid), tsr)
     self.b.delete_group(tid)
     self.assertIsNone(self.b.restore_group(tid))
Example #24
0
 def test_join_native(self):
     backend = SimpleBackend()
     subtasks = [AsyncResult(uuid(), backend=backend)
                     for i in range(10)]
     ts = TaskSetResult(uuid(), subtasks)
     backend.ids = [subtask.task_id for subtask in subtasks]
     res = ts.join_native()
     self.assertEqual(res, range(10))
Example #25
0
 def test_iter_native(self):
     backend = SimpleBackend()
     results = [self.app.AsyncResult(uuid(), backend=backend)
                for i in range(10)]
     ts = self.app.GroupResult(uuid(), results)
     ts.app.backend = backend
     backend.ids = [result.id for result in results]
     self.assertEqual(len(list(ts.iter_native())), 10)
Example #26
0
 def prepare(self, options, tasks, **kwargs):
     r = []
     options["taskset_id"] = group_id = \
             options.setdefault("task_id", uuid())
     for task in tasks:
         tid = task.options.setdefault("task_id", uuid())
         task.options["taskset_id"] = group_id
         r.append(self.AsyncResult(tid))
     return tasks, self.app.TaskSetResult(group_id, r)
Example #27
0
    def test_get_many(self):
        data = {uuid(): 'foo', uuid(): 'bar', uuid(): 'baz'}

        tb = FilesystemBackend(app=self.app, url=self.url)
        for key, value in data.items():
            tb.mark_as_done(key, value)

        for key, result in tb.get_many(data.keys()):
            self.assertEqual(result['result'], data[key])
Example #28
0
 def test_eq_ne(self):
     ts = self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())])
     ts2 = self.app.GroupResult(ts.id, ts.results)
     ts3 = self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())])
     ts4 = self.app.GroupResult(ts.id, [self.app.AsyncResult(uuid())])
     self.assertEqual(ts, ts2)
     self.assertNotEqual(ts, ts3)
     self.assertNotEqual(ts, ts4)
     self.assertNotEqual(ts, object())
Example #29
0
 def test_save_restore_delete_taskset(self):
     tid = uuid()
     tsr = TaskSetResult(tid, [AsyncResult(uuid()) for _ in range(10)])
     self.b.save_taskset(tid, tsr)
     stored = self.b.restore_taskset(tid)
     print(stored)
     self.assertEqual(self.b.restore_taskset(tid), tsr)
     self.b.delete_taskset(tid)
     self.assertIsNone(self.b.restore_taskset(tid))
Example #30
0
    def test_get_children(self):
        tid = uuid()
        x = AsyncResult(tid)
        child = [AsyncResult(uuid()).serializable() for i in xrange(10)]
        x.backend._cache[tid] = {'children': child}
        self.assertTrue(x.children)
        self.assertEqual(len(x.children), 10)

        x.backend._cache[tid] = {'result': None}
        self.assertIsNone(x.children)
Example #31
0
 def setup(self):
     self.task = self.app.AsyncResult(uuid())
Example #32
0
 def test_iter_native_when_empty_group(self):
     ts = self.app.GroupResult(uuid(), [])
     self.assertListEqual(list(ts.iter_native()), [])
Example #33
0
 def test_AsyncResult(self):
     task_id = uuid()
     result = retry_task.AsyncResult(task_id)
     self.assertEqual(result.backend, retry_task.backend)
     self.assertEqual(result.id, task_id)
Example #34
0
 def test_mark_as_done_writes_file(self):
     tb = FilesystemBackend(app=self.app, url=self.url)
     tb.mark_as_done(uuid(), 42)
     self.assertEqual(len(os.listdir(self.directory)), 1)
Example #35
0
 def setup(self):
     self.size = 10
     self.ts = self.app.GroupResult(
         uuid(),
         make_mock_group(self.app, self.size),
     )
Example #36
0
    def apply(self,
              args=None,
              kwargs=None,
              link=None,
              link_error=None,
              **options):
        """Execute this task locally, by blocking until the task returns.

        :param args: positional arguments passed on to the task.
        :param kwargs: keyword arguments passed on to the task.
        :keyword throw: Re-raise task exceptions.  Defaults to
                        the :setting:`CELERY_EAGER_PROPAGATES_EXCEPTIONS`
                        setting.

        :rtype :class:`celery.result.EagerResult`:

        """
        # trace imports Task, so need to import inline.
        from celery.app.trace import eager_trace_task

        app = self._get_app()
        args = args or ()
        # add 'self' if this is a bound method.
        if self.__self__ is not None:
            args = (self.__self__, ) + tuple(args)
        kwargs = kwargs or {}
        task_id = options.get('task_id') or uuid()
        retries = options.get('retries', 0)
        throw = app.either('CELERY_EAGER_PROPAGATES_EXCEPTIONS',
                           options.pop('throw', None))

        # Make sure we get the task instance, not class.
        task = app._tasks[self.name]

        request = {
            'id': task_id,
            'retries': retries,
            'is_eager': True,
            'logfile': options.get('logfile'),
            'loglevel': options.get('loglevel', 0),
            'callbacks': maybe_list(link),
            'errbacks': maybe_list(link_error),
            'delivery_info': {
                'is_eager': True
            }
        }
        if self.accept_magic_kwargs:
            default_kwargs = {
                'task_name': task.name,
                'task_id': task_id,
                'task_retries': retries,
                'task_is_eager': True,
                'logfile': options.get('logfile'),
                'loglevel': options.get('loglevel', 0),
                'delivery_info': {
                    'is_eager': True
                }
            }
            supported_keys = fun_takes_kwargs(task.run, default_kwargs)
            extend_with = dict((key, val) for key, val in items(default_kwargs)
                               if key in supported_keys)
            kwargs.update(extend_with)

        tb = None
        retval, info = eager_trace_task(task,
                                        task_id,
                                        args,
                                        kwargs,
                                        app=self._get_app(),
                                        request=request,
                                        propagate=throw)
        if isinstance(retval, ExceptionInfo):
            retval, tb = retval.exception, retval.traceback
        state = states.SUCCESS if info is None else info.state
        return EagerResult(task_id, retval, state, traceback=tb)
Example #37
0
 def test_mark_as_started(self):
     tb = DatabaseBackend(self.uri, app=self.app)
     tid = uuid()
     tb.mark_as_started(tid)
     self.assertEqual(tb.get_state(tid), states.STARTED)
Example #38
0
 def test_reduce(self):
     ts = self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())])
     fun, args = ts.__reduce__()
     ts2 = fun(*args)
     self.assertEqual(ts2.id, ts.id)
     self.assertEqual(ts, ts2)
Example #39
0
 def apply(self, taskset_id=None):
     """Applies the TaskSet locally by blocking until all tasks return."""
     setid = taskset_id or uuid()
     return self.app.TaskSetResult(setid, self._sync_results(setid))
Example #40
0
 def test_mark_as_revoked(self):
     tb = DatabaseBackend(self.uri, app=self.app)
     tid = uuid()
     tb.mark_as_revoked(tid)
     self.assertEqual(tb.get_state(tid), states.REVOKED)
Example #41
0
 def test_poll_no_messages(self):
     b = self.create_backend()
     self.assertState(b.get_task_meta(uuid()), states.PENDING)
Example #42
0
 def test_repair_uuid(self):
     from celery.backends.amqp import repair_uuid
     for i in range(10):
         tid = uuid()
         self.assertEqual(repair_uuid(tid.replace('-', '')), tid)
Example #43
0
 def send_task(self, name=None, args=None, kwargs=None, **options):
     self.sent.append({'name': name,
                       'args': args,
                       'kwargs': kwargs,
                       'options': options})
     return self.app.AsyncResult(uuid())
Example #44
0
 def test_children_is_results(self):
     ts = self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())])
     self.assertIs(ts.children, ts.results)
Example #45
0
 def test_compat(self):
     uid = uuid()
     x = result_from_tuple([uid, []], app=self.app)
     self.assertEqual(x.id, uid)
Example #46
0
 def test_repr(self):
     self.assertTrue(
         repr(self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())])))
Example #47
0
 def test_named_taskset(self):
     prefix = 'test_named_taskset-'
     ts = TaskSet([return_True_task.subtask([1])])
     res = ts.apply(taskset_id=prefix + uuid())
     self.assertTrue(res.taskset_id.startswith(prefix))
Example #48
0
 def test_is_pickleable(self):
     ts = self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())])
     self.assertEqual(pickle.loads(pickle.dumps(ts)), ts)
     ts2 = self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())])
     self.assertEqual(pickle.loads(pickle.dumps(ts2)), ts2)
Example #49
0
 def test_put__get(self):
     b = buckets.TaskBucket(task_registry=self.registry)
     job = MockJob(uuid(), TaskA.name, ['theqbf'], {'foo': 'bar'})
     b.put(job)
     self.assertEqual(b.get(), job)
Example #50
0
def mock_task(name, state, result):
    return dict(id=uuid(), name=name, state=state, result=result)
Example #51
0
 def setuids(self, uids):
     uids = self.tA, self.tB, self.tC = uids or [uuid(), uuid(), uuid()]
     return uids
Example #52
0
 def test_AsyncResult(self):
     x = self.app.AsyncResult(uuid())
     self.assertEqual(x, result_from_tuple(x.as_tuple(), self.app))
     self.assertEqual(x, result_from_tuple(x, self.app))
Example #53
0
def populate_commands_vhost_http_https_only(vhost, workspace, simulation, output_base_dir,config_file=None):
    workspace_mode = lib.db.get_workspace_mode(workspace)[0][0]
    #pull all in scope vhosts that have not been submitted
    celery_path = sys.path[0]
    config, supported_services = config_parser.read_config_ini(config_file)
    task_id_list = []
    populated_command_list = []
    total_tasks_num = 0
    vhost = str(vhost)
    vhost_ip = db.get_vhost_ip(vhost, workspace)[0][0]
    host_dir = output_base_dir + vhost
    host_data_dir = host_dir + "/celerystalkOutput/"
    # Creates something like /pentest/10.0.0.1, /pentest/10.0.0.2, etc.
    utils.create_dir_structure(vhost, host_dir)
    # Next two lines create the file that will contain each command that was executed. This is not the audit log,
    # but a log of commands that can easily be copy/pasted if you need to run them again.
    summary_file_name = host_data_dir + "ScanSummary.log"
    summary_file = open(summary_file_name, 'a')


    scannable_vhost = vhost
    ip = db.get_vhost_ip(scannable_vhost, workspace)
    ip = ip[0][0]


    if workspace_mode == "vapt":
        db_scanned_services = db.get_all_services_for_ip(ip, workspace)
    elif workspace_mode == "bb":
        db_scanned_services = db.get_all_services_for_ip(vhost, workspace)


    #db_scanned_services = db.get_all_services_for_ip(ip, workspace)
    for (ip, scanned_service_port, scanned_service_protocol, scanned_service_name,product,version,extra_info) in db_scanned_services:
    #run chain on each one and then update db as submitted
        scan_output_base_file_name = output_base_dir + "/" + ip + "/celerystalkOutput/" + scannable_vhost + "_" +  str(scanned_service_port) + "_" + scanned_service_protocol + "_"
        host_dir = output_base_dir + "/" + ip

        #TODO: This def might introduce a bug - same code as parse config submit jobs to celery. need to just call that function here
        for section in config.sections():
            if (section == "http") or (section == "https"):
                if section == scanned_service_name:
                    for (cmd_name, cmd) in config.items(section):
                        outfile = scan_output_base_file_name + cmd_name
                        populated_command = cmd.replace("[TARGET]", scannable_vhost).replace("[PORT]",
                            str(scanned_service_port)).replace("[OUTPUT]", outfile).replace("/[PATH]", "")
                        populated_command = replace_user_config_options(config_file, populated_command)

                        if simulation:
                            # debug - sends jobs to celery, but with a # in front of every one.
                            populated_command = "#" + populated_command

                        # Grab a UUID from celery.utils so that i can assign it to my task at init, which is amazing because
                        # that allows me to pass it to all of the tasks in the chain.

                        task_id = uuid()
                        populated_command_list.append((cmd_name, populated_command, vhost, outfile + ".txt",
                                          workspace, task_id,scanned_service_port, scanned_service_name,scanned_service_protocol))

    if not simulation:
        db.update_vhosts_submitted(vhost_ip, vhost, workspace, 1)
    return populated_command_list
Example #54
0
    def apply_async(self,
                    args=None,
                    kwargs=None,
                    task_id=None,
                    producer=None,
                    link=None,
                    link_error=None,
                    **options):
        """Apply tasks asynchronously by sending a message.

        :keyword args: The positional arguments to pass on to the
                       task (a :class:`list` or :class:`tuple`).

        :keyword kwargs: The keyword arguments to pass on to the
                         task (a :class:`dict`)

        :keyword countdown: Number of seconds into the future that the
                            task should execute. Defaults to immediate
                            execution.

        :keyword eta: A :class:`~datetime.datetime` object describing
                      the absolute time and date of when the task should
                      be executed.  May not be specified if `countdown`
                      is also supplied.

        :keyword expires: Either a :class:`int`, describing the number of
                          seconds, or a :class:`~datetime.datetime` object
                          that describes the absolute time and date of when
                          the task should expire.  The task will not be
                          executed after the expiration time.

        :keyword connection: Re-use existing broker connection instead
                             of establishing a new one.

        :keyword retry: If enabled sending of the task message will be retried
                        in the event of connection loss or failure.  Default
                        is taken from the :setting:`CELERY_TASK_PUBLISH_RETRY`
                        setting.  Note you need to handle the
                        producer/connection manually for this to work.

        :keyword retry_policy:  Override the retry policy used.  See the
                                :setting:`CELERY_TASK_PUBLISH_RETRY` setting.

        :keyword routing_key: Custom routing key used to route the task to a
                              worker server. If in combination with a
                              ``queue`` argument only used to specify custom
                              routing keys to topic exchanges.

        :keyword queue: The queue to route the task to.  This must be a key
                        present in :setting:`CELERY_QUEUES`, or
                        :setting:`CELERY_CREATE_MISSING_QUEUES` must be
                        enabled.  See :ref:`guide-routing` for more
                        information.

        :keyword exchange: Named custom exchange to send the task to.
                           Usually not used in combination with the ``queue``
                           argument.

        :keyword priority: The task priority, a number between 0 and 9.
                           Defaults to the :attr:`priority` attribute.

        :keyword serializer: A string identifying the default
                             serialization method to use.  Can be `pickle`,
                             `json`, `yaml`, `msgpack` or any custom
                             serialization method that has been registered
                             with :mod:`kombu.serialization.registry`.
                             Defaults to the :attr:`serializer` attribute.

        :keyword compression: A string identifying the compression method
                              to use.  Can be one of ``zlib``, ``bzip2``,
                              or any custom compression methods registered with
                              :func:`kombu.compression.register`. Defaults to
                              the :setting:`CELERY_MESSAGE_COMPRESSION`
                              setting.
        :keyword link: A single, or a list of tasks to apply if the
                       task exits successfully.
        :keyword link_error: A single, or a list of tasks to apply
                      if an error occurs while executing the task.

        :keyword producer: :class:[email protected]` instance to use.
        :keyword add_to_parent: If set to True (default) and the task
            is applied while executing another task, then the result
            will be appended to the parent tasks ``request.children``
            attribute.  Trailing can also be disabled by default using the
            :attr:`trail` attribute
        :keyword publisher: Deprecated alias to ``producer``.

        Also supports all keyword arguments supported by
        :meth:`kombu.Producer.publish`.

        .. note::
            If the :setting:`CELERY_ALWAYS_EAGER` setting is set, it will
            be replaced by a local :func:`apply` call instead.

        """
        app = self._get_app()
        if app.conf.CELERY_ALWAYS_EAGER:
            return self.apply(args,
                              kwargs,
                              task_id=task_id or uuid(),
                              link=link,
                              link_error=link_error,
                              **options)
        # add 'self' if this is a "task_method".
        if self.__self__ is not None:
            args = args if isinstance(args, tuple) else tuple(args or ())
            args = (self.__self__, ) + args
        return app.send_task(self.name,
                             args,
                             kwargs,
                             task_id=task_id,
                             producer=producer,
                             link=link,
                             link_error=link_error,
                             result_cls=self.AsyncResult,
                             **dict(self._get_exec_options(), **options))
Example #55
0
 def setup(self):
     self.ts = self.app.GroupResult(
         uuid(),
         [self.app.AsyncResult(uuid()),
          self.app.AsyncResult(uuid())])
Example #56
0
def process_url(url, workspace, output_dir, arguments,config_file=None):
    celery_path = sys.path[0]
    config, supported_services = config_parser.read_config_ini(config_file)
    task_id_list = []
    urls_to_screenshot = []
    simulation = arguments["--simulation"]

    try:
        parsed_url = urlparse.urlparse(url)
        scheme = parsed_url[0]
        if not scheme:
            print("\n[!] URL parameter (-u) requires that you specify the scheme (http:// or https://)\n")
            exit()
        if ":" in parsed_url[1]:
            vhost, port = parsed_url[1].split(':')
        else:
            vhost = parsed_url[1]
            if scheme == "http":
                port = 80
            elif scheme == "https":
                port = 443
        path = parsed_url[2].replace("//", "/")
    except:
        if not scheme:
            exit()

    try:
        ip = socket.gethostbyname(vhost)
    except:
        print("Error getting IP")
        ip=False


    db_ip_tuple = lib.db.get_vhost_ip(vhost,workspace)
    if db_ip_tuple:
        db_ip = db_ip_tuple[0][0]
        if db_ip != ip:
            lib.db.update_vhost_ip(ip, vhost, workspace)


    proto = "tcp"
    vhost_explicitly_out_of_scope = lib.db.is_vhost_explicitly_out_of_scope(vhost, workspace)
    if not vhost_explicitly_out_of_scope:  # and if the vhost is not explicitly out of scope
        if not ip:
            exit()
        elif ip == vhost:
            scan_output_base_file_dir = output_dir + "/" + ip + "/celerystalkOutput/" + ip + "_" + str(
                port) + "_" + proto + "_"
        else:
            scan_output_base_file_dir = output_dir + "/" + ip + "/celerystalkOutput/" + vhost + "_" + str(
                port) + "_" + proto + "_"

        host_dir = output_dir + "/" + ip
        host_data_dir = host_dir + "/celerystalkOutput/"
        # Creates something like /pentest/10.0.0.1, /pentest/10.0.0.2, etc.
        utils.create_dir_structure(ip, host_dir)
        # Next two lines create the file that will contain each command that was executed. This is not the audit log,
        # but a log of commands that can easily be copy/pasted if you need to run them again.
        summary_file_name = host_data_dir + "ScanSummary.log"
        summary_file = open(summary_file_name, 'a')

        is_vhost_in_db = lib.db.is_vhost_in_db(vhost, workspace)
        if is_vhost_in_db:
            lib.db.update_vhosts_in_scope(ip, vhost, workspace, 1)
        else:
            db_vhost = (ip, vhost, 1, 0, 1, workspace)  # add it to the vhosts db and mark as in scope
            lib.db.create_vhost(db_vhost)


        #only mark it as submitted if it is not in scope.
        if not simulation:
            lib.db.update_vhosts_submitted(ip, vhost, workspace, 1)

        # Insert port/service combo into services table if it doesnt exist
        db_service = db.get_service(ip, port, proto, workspace)
        if not db_service:
            db_string = (ip, port, proto, scheme,'','','',workspace)
            db.create_service(db_string)

        #mark this host as in scope now
        if not simulation:
            db.update_vhosts_submitted(vhost, vhost, workspace, 1)
    # I might want to keep this, but i think it is redundant if we have gobuster and photon screenshots...
        # Insert url into paths table and take screenshot
        # db_path = db.get_path(path, workspace)
        # if not db_path:
        #     url_screenshot_filename = scan_output_base_file_dir + url.replace("http", "").replace("https", "") \
        #         .replace("/", "_") \
        #         .replace("\\", "") \
        #         .replace(":", "_") + ".png"
        #     url_screenshot_filename = url_screenshot_filename.replace("__", "")
        #     db_path = (ip, port, url, 0, url_screenshot_filename, workspace)
        #     db.insert_new_path(db_path)
        #     # print("Found Url: " + str(url))
        #     urls_to_screenshot.append((url, url_screenshot_filename))
        #     if not simulation:
        #         task_id = uuid()
        #         command_name = "Screenshots"
        #         populated_command = "firefox-esr URL mode screenshot | {0} | {1}".format(vhost,scan_output_base_file_dir)
        #         utils.create_task(command_name, populated_command, vhost, scan_output_base_file_dir, workspace, task_id)
        #         result = tasks.cel_take_screenshot.delay(urls_to_screenshot,task_id,vhost,scan_output_base_file_dir, workspace,command_name,populated_command)
        #     # print(result)

        # TODO: This def might introduce a bug - same code as parse config submit jobs to celery. need to just call that function here
        for section in config.sections():
            if (section == "http") or (section == "https"):
                if section == scheme:
                    for (cmd_name, cmd) in config.items(section):
                        path_for_filename = path.replace("/","_")
                        outfile = scan_output_base_file_dir + path_for_filename  + "_" + cmd_name
                        populated_command = cmd.replace("[TARGET]", vhost).replace("[PORT]",
                                                                                    str(port)).replace("[OUTPUT]",
                                                                                                       outfile).replace(
                            "/[PATH]", path)
                        populated_command = replace_user_config_options(config_file, populated_command)

                        if simulation:
                            # debug - sends jobs to celery, but with a # in front of every one.
                            populated_command = "#" + populated_command

                        # Grab a UUID from celery.utils so that i can assign it to my task at init, which is amazing because
                        # that allows me to pass it to all of the tasks in the chain.

                        task_id = uuid()
                        utils.create_task(cmd_name, populated_command, vhost, outfile + ".txt", workspace, task_id)
                        result = chain(
                            # insert a row into the database to mark the task as submitted. a subtask does not get tracked
                            # in celery the same way a task does, for instance, you can't find it in flower
                            # tasks.cel_create_task.subtask(args=(cmd_name,populated_command, vhost, outfile + ".txt", workspace, task_id)),

                            # run the command. run_task takes care of marking the task as started and then completed.
                            # The si tells run_cmd to ignore the data returned from a previous task
                            tasks.run_cmd.si(cmd_name, populated_command, celery_path, task_id).set(task_id=task_id),

                            # right now, every executed command gets sent to a generic post_process task that can do
                            # additinoal stuff based on the command that just ran.
                            tasks.post_process.si(cmd_name, populated_command, output_dir, workspace, vhost,
                                                  host_dir,
                                                  simulation, port, scheme, proto, celery_path),
                        )()  # .apply_async()

                        task_id_list.append(result.task_id)
                        host_audit_log = host_dir + "/" + "{0}_executed_commands.txt".format(ip)
                        f = open(host_audit_log, 'a')
                        f.write(populated_command + "\n\n")
                        f.close()
        print("[+] Submitted {0} tasks to queue.\n".format(len(task_id_list)))
    else:
        print("[!] {0} is explicitly marked as out of scope. Skipping...".format(vhost))
Example #57
0
 def test_revoke_from_resultset(self):
     r = self.app.GroupResult(uuid(),
                              [self.app.AsyncResult(x)
                               for x in [uuid() for i in range(10)]])
     r.revoke()
     self.assertIn('revoke', MockMailbox.sent)
Example #58
0
def populate_comamnds(vhost,workspace,simulation,output_base_dir,config_file=None):
    workspace_mode = lib.db.get_workspace_mode(workspace)[0][0]
    celery_path = sys.path[0]
    config, supported_services = config_parser.read_config_ini(config_file)
    task_id_list = []
    populated_command_list = []
    total_tasks_num = 0
    vhost = str(vhost)
    vhost_ip = db.get_vhost_ip(vhost, workspace)[0]
    host_dir = output_base_dir + vhost
    host_data_dir = host_dir + "/celerystalkOutput/"
    # Creates something like /pentest/10.0.0.1, /pentest/10.0.0.2, etc.
    utils.create_dir_structure(vhost, host_dir)
    # Next two lines create the file that will contain each command that was executed. This is not the audit log,
    # but a log of commands that can easily be copy/pasted if you need to run them again.
    summary_file_name = host_data_dir + "ScanSummary.log"
    summary_file = open(summary_file_name, 'a')

    # THIS is just a work around until i have a real solution.  Really, UDP scans should be done
    # For every host in the scanned host list, launch a quick UDP scan (top 100 ports)
    scan_output_base_host_filename = host_data_dir + vhost

    ###################################
    # If enabled in config, run a udp scan against the host.
    ###################################


    for (cmd_name, cmd) in config.items("nmap-commands"):

        if cmd_name == "udp_scan":
            outfile = scan_output_base_host_filename + "_" + cmd_name
            populated_command = cmd.replace("[TARGET]", vhost).replace("[OUTPUT]", outfile)
            populated_command = replace_user_config_options(config_file, populated_command)

            if simulation:
                populated_command = "#" + populated_command


            task_id = uuid()
            scanned_service_port = ""
            scanned_service_name = ""
            scanned_service_protocol = ""
            #utils.create_task(cmd_name, populated_command, vhost, outfile + ".txt", workspace, task_id)
            populated_command_list.append((cmd_name, populated_command, vhost, outfile + ".txt", workspace, task_id,scanned_service_port, scanned_service_name,scanned_service_protocol))

    if not simulation:
        db.update_vhosts_submitted(vhost_ip[0], vhost, workspace, 1)

    ###################################
    # Time to parse the services from the DB
    ###################################

    if workspace_mode == "vapt":
        db_services = db.get_all_services_for_ip(vhost_ip[0], workspace)
    elif workspace_mode == "bb":
        db_services = db.get_all_services_for_ip(vhost, workspace)

    for db_service in db_services:
        (ip, scanned_service_port, scanned_service_protocol, scanned_service_name,product,version,extra_info) = db_service

        scan_output_base_file_name = host_data_dir + vhost + "_" + str(
            scanned_service_port) + "_" + scanned_service_protocol + "_"

        # If the service name is not in the supported service list, give the user notice so they can add the service
        # and add some commands to the service. This is a major GAP right now. If the service is not in the config,
        # the script completely ignores it, which is not good!
        if scanned_service_name not in supported_services:
            print(
                "[!] Nmap reports {0}:{1} is running: [{2}]. There are no commands to run against {2} in config.ini.".format(
                    vhost, scanned_service_port, scanned_service_name))
            summary_file.write(
                "[!] Nmap reports {0}:{1} is running: [{2}]. There are no commands to run against {2} in config.ini\n".format(
                    vhost, scanned_service_port, scanned_service_name))
            # updated_port_scan = utils.nmap_follow_up_scan(vhost, scanned_service_port)
            # scanned_service_name = updated_port_scan.hosts[0]._services[0].service
            cmd_name = "nmap_service_scan"
            populated_command = 'nmap -sV -sC -Pn -p {0} -oN {1}_nmap_service_scan.txt {2}'.format(
                scanned_service_port, scan_output_base_file_name, vhost)
            populated_command = replace_user_config_options(config_file, populated_command)

            if simulation:
                populated_command = "#" + populated_command

            outfile = scan_output_base_file_name + "_nmap_service_scan.txt"

            task_id = uuid()
            populated_command_list.append((cmd_name, populated_command, vhost, outfile, workspace, task_id,scanned_service_port, scanned_service_name,scanned_service_protocol))
        else:
            for (key, val) in config.items("nmap-service-names"):
                services = val.split(",")
                for service in services:
                    if service == scanned_service_name:
                        mapped_service_name = key
                        # print(config.items(mapped_service_name))
                        for (cmd_name, cmd) in config.items(mapped_service_name):
                            outfile = scan_output_base_file_name + cmd_name
                            populated_command = cmd.replace("[TARGET]", vhost).replace("[PORT]", str(
                                scanned_service_port)).replace("[OUTPUT]", outfile).replace("/[PATH]",
                                                                                            "")
                            populated_command = replace_user_config_options(config_file, populated_command)

                            if simulation:
                                # debug - sends jobs to celery, but with a # in front of every one.
                                populated_command = "#" + populated_command

                            # Grab a UUID from celery.utils so that i can assign it to my task at init, which is amazing because
                            # that allows me to pass it to all of the tasks in the chain.

                            task_id = uuid()
                            populated_command_list.append((cmd_name, populated_command, vhost, outfile + ".txt",
                                              workspace, task_id,scanned_service_port, scanned_service_name,scanned_service_protocol))
    #print(populated_command_list.__len__())
    #print(populated_command_list)
    return populated_command_list
Example #59
0
def process_db_services(output_base_dir, simulation, workspace, target=None,host=None,config_file=None):
    celery_path = sys.path[0]
    config, supported_services = config_parser.read_config_ini(config_file)
    task_id_list = []
    total_tasks_num = 0
    if host:
        target = db.get_vhost_ip(host)
    try:
        os.stat(output_base_dir)
    except:
        print("[+] Output directory does not exist. Creating " + output_base_dir)
        os.makedirs(output_base_dir)
    #unique_hosts = db.get_unique_hosts(workspace)
    unique_unscanned_vhosts = db.get_inscope_unsubmitted_vhosts(workspace)
    for row in unique_unscanned_vhosts:

        vhost = row[0]
        #print("in proccess_db_services - vhost:" + vhost)
        vhost_ip = db.get_vhost_ip(vhost,workspace)[0]
        #print(target)
        #print(vhost_ip)
        #print(str(vhost_ip))

        if (IPAddress(vhost_ip[0]) == target) or (target is None):
            host_dir = output_base_dir + vhost
            host_data_dir = host_dir + "/celerystalkOutput/"
            # Creates something like /pentest/10.0.0.1, /pentest/10.0.0.2, etc.
            utils.create_dir_structure(vhost, host_dir)
            #Next two lines create the file that will contain each command that was executed. This is not the audit log,
            #but a log of commands that can easily be copy/pasted if you need to run them again.
            summary_file_name = host_data_dir + "ScanSummary.log"
            summary_file = open(summary_file_name, 'a')

            #THIS is just a work around until i have a real solution.  Really, UDP scans should be done
            #For every host in the scanned host list, launch a quick UDP scan (top 100 ports)
            scan_output_base_host_filename = host_data_dir + vhost

            for (cmd_name, cmd) in config.items("nmap-commands"):
                if cmd_name == "udp_scan":
                    #print(cmd_name,cmd)
                    outfile = scan_output_base_host_filename + "_" + cmd_name
                    populated_command = cmd.replace("[TARGET]", vhost).replace("[OUTPUT]", outfile)
                    populated_command = replace_user_config_options(config_file, populated_command)

                    #print(cmd)

                    #cmd_name = "udp-top100"
                    #populated_command = 'nmap -sV -sC -Pn -sU --top-ports 100 -oN {0}_nmap_UDP_service_scan.txt -oX {0}_nmap_UDP_service_scan.xml {1}'.format(
                    #    scan_output_base_host_filename, vhost)
                    if simulation:
                        populated_command = "#" + populated_command
                    #outfile = scan_output_base_host_filename + "_nmap_UDP_service_scan.txt"
                    task_id = uuid()
                    utils.create_task(cmd_name, populated_command, vhost, outfile + ".txt", workspace, task_id)
                    result = chain(
                        # insert a row into the database to mark the task as submitted. a subtask does not get tracked
                        # in celery the same way a task does, for instance, you can't find it in flower
                        #tasks.cel_create_task.subtask(args=(cmd_name, populated_command, vhost, outfile + ".txt", workspace, task_id)),

                        # run the command. run_task takes care of marking the task as started and then completed.
                        # The si tells run_cmd to ignore the data returned from a previous task
                        tasks.run_cmd.si(cmd_name, populated_command,celery_path,task_id).set(task_id=task_id),

                    )()  # .apply_async()
                    task_id_list.append(result.task_id)


            if not simulation:
                db.update_vhosts_submitted(vhost, vhost, workspace, 1)



            #print "IP Address: {0}".format(vhost)
            db_services = db.get_all_services_for_ip(vhost_ip[0], workspace)

            for db_service in db_services:
                (ip, scanned_service_port, scanned_service_protocol, scanned_service_name,product,version,extra_info,workspace) = db_service

                scan_output_base_file_name = host_data_dir + vhost + "_" + str(scanned_service_port) + "_" + scanned_service_protocol + "_"

                #If the service name is not in the supported service list, give the user notice so they can add the service
                # and add some commands to the service. This is a major GAP right now. If the service is not in the config,
                # the script completely ignores it, which is not good!
                if scanned_service_name not in supported_services:
                    print("[!] Nmap reports {0}:{1} is running: [{2}]. There are no commands to run against {2} in config.ini.".format(vhost, scanned_service_port, scanned_service_name))
                    summary_file.write("[!] Nmap reports {0}:{1} is running: [{2}]. There are no commands to run against {2} in config.ini\n".format(vhost, scanned_service_port, scanned_service_name))
                    #updated_port_scan = utils.nmap_follow_up_scan(vhost, scanned_service_port)
                    #scanned_service_name = updated_port_scan.hosts[0]._services[0].service
                    cmd_name = "nmap_service_scan"
                    populated_command = 'nmap -sV -sC -Pn -p {0} -oN {1}_nmap_service_scan.txt {2}'.format(
                        scanned_service_port, scan_output_base_file_name, vhost)
                    populated_command = replace_user_config_options(config_file, populated_command)

                    if simulation:
                        populated_command = "#" + populated_command

                    outfile = scan_output_base_file_name + "_nmap_service_scan.txt"

                    task_id = uuid()
                    utils.create_task(cmd_name, populated_command, vhost, outfile , workspace, task_id)
                    result = chain(
                        # insert a row into the database to mark the task as submitted. a subtask does not get tracked
                        # in celery the same way a task does, for instance, you can't find it in flower
                        #tasks.cel_create_task.subtask(args=(cmd_name, populated_command, vhost, outfile , workspace, task_id)),

                        # run the command. run_task takes care of marking the task as started and then completed.
                        # The si tells run_cmd to ignore the data returned from a previous task
                        tasks.run_cmd.si(cmd_name, populated_command,celery_path,task_id).set(task_id=task_id),

                    )()  # .apply_async()

                    task_id_list.append(result.task_id)
                else:
                    parse_config_and_send_commands_to_celery(scanned_service_name, scanned_service_port, scan_output_base_file_name, config, simulation, output_base_dir, host_dir, workspace, task_id_list,vhost,scanned_service_protocol)
                #task_id_list = task_id_list + new_tasks_list
            summary_file.close()

            print("[+] Submitted {0} tasks to the queue.".format(len(task_id_list)))
            total_tasks_num = total_tasks_num + len(task_id_list)
            task_id_list = []
    print("\n\n[+] Summary:\tSubmitted {0} tasks to the [{1}] workspace.".format(total_tasks_num,workspace))
    print("[+]\t\tThere might be additional tasks added to the queue during post processing\n[+]")
    print("[+]\t\tTo keep an eye on things, run one of these commands: \n[+]")
    print("[+]\t\tcelerystalk query [watch]")
    print("[+]\t\tcelerystalk query brief [watch]")
    print("[+]\t\tcelerystalk query summary [watch]\n")