Пример #1
0
    def on_timeout(self, soft, timeout):
        """Handler called if the task times out."""
        task_ready(self)
        if soft:
            warn('Soft time limit (%ss) exceeded for %s[%s]', soft, self.name,
                 self.id)
            exc = SoftTimeLimitExceeded(soft)
        else:
            error('Hard time limit (%ss) exceeded for %s[%s]', timeout,
                  self.name, self.id)
            exc = TimeLimitExceeded(timeout)

        self.task.backend.mark_as_failure(
            self.id,
            exc,
            request=self,
            store_result=self.store_errors,
        )

        if self.task.acks_late:
            self.acknowledge()
Пример #2
0
    def wrapper(*args, **kwargs):
        # Messages for fails
        MSG_T_SOFT = '<=Soft Time Limit=> Task soft time limit exceeded. {}.{}'
        MSG_FAIL = '<=TestExecTasks=> Task fail "{}.{}" ! Error output: {}'

        # TODO: Remove debug looging

        try:
            return function(*args, **kwargs)
        except SoftTimeLimitExceeded:
            log.error("Task SoftTimeLimitExceeded: %s", (function, args, kwargs))
            TMail.t_lim(function, *args, **kwargs)
            raise SoftTimeLimitExceeded(MSG_T_SOFT.format(function.__module__, function.__name__))
        except WorkerLostError as e:
            log.error("Task WorkerLostError: %s", (function, e, args, kwargs))
            TMail.t_fail(function, e, *args, **kwargs)
            raise Exception(MSG_FAIL.format(function.__module__, function.__name__, e))
        except Exception as e:
            log.error("Task Exception: %s", (function, e, args, kwargs))
            TMail.t_fail(function, e, *args, **kwargs)
            raise Exception(MSG_FAIL.format(function.__module__, function.__name__, e))
Пример #3
0
    def test_sql_json_soft_timeout(self):
        examples_db = get_example_database()
        if examples_db.backend == "sqlite":
            return

        self.login("admin")

        with mock.patch.object(examples_db.db_engine_spec,
                               "handle_cursor") as handle_cursor:
            handle_cursor.side_effect = SoftTimeLimitExceeded()
            data = self.run_sql("SELECT * FROM birth_names LIMIT 1", "1")

        assert data == {
            "errors": [{
                "message":
                ("The query was killed after 21600 seconds. It might be too complex, "
                 "or the database might be under heavy load."),
                "error_type":
                SupersetErrorType.SQLLAB_TIMEOUT_ERROR,
                "level":
                ErrorLevel.ERROR,
                "extra": {
                    "issue_codes": [
                        {
                            "code":
                            1026,
                            "message":
                            "Issue 1026 - Query is too complex and takes too long to run.",
                        },
                        {
                            "code":
                            1027,
                            "message":
                            "Issue 1027 - The database is currently running too many queries.",
                        },
                    ]
                },
            }]
        }
Пример #4
0
    def test_soft_timeout_load_chart_data_into_cache(
        self, mock_update_job, mock_run_command
    ):
        async_query_manager.init_app(app)
        user = security_manager.find_user("gamma")
        form_data = {}
        job_metadata = {
            "channel_id": str(uuid4()),
            "job_id": str(uuid4()),
            "user_id": user.id,
            "status": "pending",
            "errors": [],
        }
        errors = ["A timeout occurred while loading chart data"]

        with pytest.raises(SoftTimeLimitExceeded):
            with mock.patch.object(
                async_queries, "ensure_user_is_set",
            ) as ensure_user_is_set:
                ensure_user_is_set.side_effect = SoftTimeLimitExceeded()
                load_chart_data_into_cache(job_metadata, form_data)
            ensure_user_is_set.assert_called_once_with(user.id, "error", errors=errors)
Пример #5
0
def test_soft_timeout_alert(email_mock, create_alert_email_chart):
    """
    ExecuteReport Command: Test soft timeout on alert queries
    """
    from celery.exceptions import SoftTimeLimitExceeded
    from superset.reports.commands.exceptions import AlertQueryTimeout

    with patch.object(create_alert_email_chart.database.db_engine_spec,
                      "execute",
                      return_value=None) as execute_mock:
        execute_mock.side_effect = SoftTimeLimitExceeded()
        with pytest.raises(AlertQueryTimeout):
            AsyncExecuteReportScheduleCommand(create_alert_email_chart.id,
                                              datetime.utcnow()).run()

    notification_targets = get_target_from_report_schedule(
        create_alert_email_chart)
    # Assert the email smtp address, asserts a notification was sent with the error
    assert email_mock.call_args[0][0] == notification_targets[0]

    assert_log(ReportState.ERROR,
               error_message="A timeout occurred while executing the query.")
    def test_softtimeout_exception(self, *mocks):
        self.app.config['MAX_TRANSMIT_RETRY_ATTEMPT'] = 4
        self.app.config['CELERY_TASK_ALWAYS_EAGER'] = False
        subscriber = {
            '_id': ObjectId('56c11bd78b84bb00b0a1905e'),
            'sequence_num_settings': {'max': 9999, 'min': 1},
            '_etag': 'f16b7eaa566f68b8d2561c811ec694bdf819784d',
            'is_active': True,
            'destinations': [{'delivery_type': 'email'}],
            'email': '*****@*****.**',
            'subscriber_type': 'digital',
            'name': 'Test',
        }

        self.app.data.insert('subscribers', [subscriber])

        items = [{'_id': ObjectId(), 'state': 'pending', 'item_id': 'item_1', 'item_version': 4,
                  'headline': 'pending headline', 'destination': {'delivery_type': 'email'},
                  'subscriber_id': subscriber['_id'], 'formatted_item': 'test'},
                 {'_id': ObjectId(), 'state': 'pending', 'item_id': 'item_2', 'item_version': 4,
                  'headline': 'pending headline 2', 'destination': {'delivery_type': 'email'},
                  'subscriber_id': subscriber['_id'], 'formatted_item': 'test'}
                 ]

        self.app.data.insert('publish_queue', items)

        fake_transmitter = MagicMock()
        fake_transmitter.transmit.side_effect = SoftTimeLimitExceeded()

        fake_transmitters_list = mocks[0]
        fake_transmitters_list.__getitem__.return_value = fake_transmitter

        with self.assertRaises(SoftTimeLimitExceeded):
            superdesk.publish.publish_content.transmit_subscriber_items(subscriber.get('_id'), False)
        failed_item = self.app.data.find_one('publish_queue', req=None, _id=items[0].get('_id'))
        self.assertEqual(failed_item['state'], 'retrying')
        pending_item = self.app.data.find_one('publish_queue', req=None, _id=items[1].get('_id'))
        self.assertEqual(pending_item['state'], 'pending')
        self.app.config['CELERY_TASK_ALWAYS_EAGER'] = True
Пример #7
0
    def test_get_sql_results_soft_time_limit(self, mock_execute_sql_statement,
                                             mock_get_query):
        from celery.exceptions import SoftTimeLimitExceeded

        sql = """
            -- comment
            SET @value = 42;
            SELECT @value AS foo;
            -- comment
        """
        mock_get_query.side_effect = SoftTimeLimitExceeded()
        with pytest.raises(SqlLabTimeoutException) as excinfo:
            get_sql_results(
                1,
                sql,
                return_results=True,
                store_results=False,
            )
        assert (
            str(excinfo.value) ==
            "SQL Lab timeout. This environment's policy is to kill queries after 21600 seconds."
        )
Пример #8
0
class TimeoutTest(TestCase):

    published_items = [{
        "_id": ObjectId("58006b8d1d41c88eace5179d"),
        "item_id": "1",
        "_created": utcnow(),
        "_updated": utcnow(),
        "queue_state": "pending",
        "state": "published",
        "operation": "publish"
    }]

    def setUp(self):
        with self.app.app_context():
            init_app(self.app)

    @mock.patch('apps.publish.enqueue.get_enqueue_service',
                side_effect=SoftTimeLimitExceeded())
    def test_soft_timeout_gets_re_queued(self, mock):
        self.app.data.insert('published', self.published_items)
        enqueue_published()
        published = self.app.data.find(PUBLISHED, None, None)
        self.assertTrue(published[0].get('queue_state'), 'pending')
Пример #9
0
def runLab(self, j_id, uid, serv_id):
    try:
        serv = Server.objects.get(id=serv_id)
        server = serv.ip
        user = User.objects.get(id=uid)
        job_not_found = False
        task_Err = Error(owner=user, from_job_id=j_id, errors="")
        error_flag = False
        b = None
        try:
            job = Job.objects.get(owner=user, jid=j_id)
            job.status = 'RUNNING'
            job.hostname = serv.hostname
            job.task_id = self.request.id
            job.time_started = datetime.datetime.now()
            job.save()
        except Job.DoesNotExist:
            print("Job not found")
            job_not_found = True
            task_Err.errors += "Job not found"
            error_flag = True
            raise SoftTimeLimitExceeded()
        progress_recorder = ProgressRecorder(self)

        toReturn = ""
        try:
            path = "/home/perfserv/uploads/" + str(uid) + "/" + str(j_id) + "/"

            #print path
            config = open(path + "/config.txt", "r")
            progress_recorder.set_progress(1, 100)
            job.cur_action = "Setting Up"
            job.save()

            a = "ssh -i /home/perfserv/.ssh/id_rsa -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no perfuser@" + server + " \"rm -rf perflab-setup\""
            b = Popen(a, shell=True, stdout=PIPE, stderr=PIPE)
            b.wait()
            c = b.stdout.read()
            e = b.stderr.read().decode()
            progress_recorder.set_progress(2, 100)

            a = "ssh -i /home/perfserv/.ssh/id_rsa -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no perfuser@" + server + " \"cp -rf perflab-files perflab-setup\""
            b = Popen(a, shell=True, stdout=PIPE, stderr=PIPE)
            b.wait()
            c = b.stdout.read()
            progress_recorder.set_progress(3, 100)

            job.cur_action = "Scanning FilterMain"
            job.save()

            f = open(path + "FilterMain.cpp", "r")
            for line in f:
                if "unistd" in line:
                    task_Err.err += "Filtermain.cpp:\nIllegal Library unistd\n"
                    task_Err.save()
                    error_flag = True
            if error_flag == False:
                a = "scp -i /home/perfserv/.ssh/id_rsa -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no " + path + "FilterMain.cpp perfuser@" + server + ":~/perflab-setup/"
                b = Popen(a, shell=True, stdout=PIPE, stderr=PIPE)
                b.wait()
                c = b.stdout.read()
            progress_recorder.set_progress(4, 100)

            job.cur_action = "Sending Files"
            job.save()
            """For each file in config, check illegal lib, then copy to server"""

            for line in config:
                #print line
                line = line.split()
                if line[1] == "Y":
                    f = open(path + line[0], "r")
                    print(line[0])
                    for line2 in f:
                        if "unistd" in line2:
                            task_Err.errors += (line[0] + ": illegal unistd\n")
                            task_Err.save()
                            error_flag = True
                    if error_flag == False:
                        a = "scp -i /home/perfserv/.ssh/id_rsa -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no " + path + str(
                            line[0]
                        ) + " perfuser@" + server + ":~/perflab-setup/"
                        print(a)
                        b = Popen(a, shell=True, stdout=PIPE, stderr=PIPE)
                        b.wait()
                        c = b.stdout.read()
            if error_flag == True:
                job.status = "ERROR"
                task_Err.save()
                raise SoftTimeLimitExceeded()
            progress_recorder.set_progress(5, 100)

            job.cur_action = "Compiling..."
            job.save()

            a = "ssh -i /home/perfserv/.ssh/id_rsa -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no perfuser@" + server + " \"cd perflab-setup/ ; make filter\""
            b = Popen(a, shell=True, stdout=PIPE, stderr=PIPE)
            b.wait()
            c = b.stdout.read()
            e = b.stderr.read().decode()
            if len(e) > 0:
                if "Error" in e:
                    task_Err.errors = "Failed at Make:\n"
                    task_Err += e
                    task_Err.save()
                    raise SoftTimeLimitExceeded()
                #print e
                if not "ECDSA" in e:
                    task_Err.errors = "Failed at Make:\n"
                    task_Err += e
                    task_Err.save()
                    raise SoftTimeLimitExceeded()
            #print c
            progress_recorder.set_progress(10, 100)
            job.cur_action = "Running Gauss"
            job.save()

            status = 10.0
            tests = 5
            increment = (100.0 - status) / (4.0 * float(tests))
            scores = []
            #GAUSS
            gauss = []
            count = 0
            while count < tests:
                a = "ssh -i /home/perfserv/.ssh/id_rsa -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no perfuser@" + server + " \"cd perflab-setup/ ; ./gauss.sh\""
                print(a)
                b = Popen(a, shell=True, stdout=PIPE, stderr=PIPE)
                b.wait()
                c = b.stdout.read()
                line = c.split()
                try:
                    print(line)
                    score = float(line[-1])
                    print(score)
                    if not score > 9000 and not score <= 0:  # Check for and ignore odd scores
                        scores = scores + [score]
                        gauss = gauss + [score]
                        status = status + increment
                        count = count + 1
                        progress_recorder.set_progress(status, 100)

                except:
                    task_Err.errors += "Gauss:\n " + str(
                        sys.exc_info()) + " " + serv.hostname + " " + str(
                            c) + "\n" + str(a) + "\n" + str(
                                b.stderr.read()) + "\n"
                    task_Err.save()
                    raise SoftTimeLimitExceeded()
            job.cur_action = "Running Avg"
            job.save()
            #AVG
            count = 0
            avg = []
            while count < tests:
                a = "ssh -i /home/perfserv/.ssh/id_rsa -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no perfuser@" + server + " \"cd perflab-setup/ ; ./avg.sh\""
                b = Popen(a, shell=True, stdout=PIPE, stderr=PIPE)
                b.wait()
                c = b.stdout.read()
                line = c.split()
                try:
                    score = float(line[-1])
                    if not score > 9000 and not score <= 0:  # Check for and ignore odd scores
                        scores = scores + [score]
                        avg = avg + [score]
                        status = status + increment
                        count = count + 1
                        progress_recorder.set_progress(status, 100)

                except:
                    task_Err.errors += "Avg:\n " + str(
                        sys.exc_info()) + " " + serv.hostname + " " + str(
                            c) + "\n" + str(a) + "\n" + str(
                                b.stderr.read()) + "\n"
                    task_Err.save()
                    raise SoftTimeLimitExceeded()

            job.cur_action = "Running HLine"
            job.save()
            #HLINE
            count = 0
            hline = []
            while count < tests:
                a = "ssh -i /home/perfserv/.ssh/id_rsa -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no perfuser@" + server + " \"cd perflab-setup/ ; ./hline.sh\""
                b = Popen(a, shell=True, stdout=PIPE, stderr=PIPE)
                b.wait()
                c = b.stdout.read()
                e = b.stderr.read().decode()
                line = c.split()
                try:
                    score = float(line[-1])
                    if not score > 9000 and not score <= 0:  # Check for and ignore odd scores
                        scores = scores + [score]
                        hline = hline + [score]
                        status = status + increment
                        count = count + 1
                        progress_recorder.set_progress(status, 100)

                except:
                    task_Err.errors += "Hline:\n " + str(
                        sys.exc_info()) + " " + serv.hostname + " " + str(
                            c) + "\n" + str(a) + "\n" + str(
                                b.stderr.read()) + "\n"
                    task_Err.save()
                    error_flag = True
                    raise SoftTimeLimitExceeded()
            job.cur_action = "Running Emboss"
            job.save()
            #EMBOSS
            count = 0
            emboss = []
            while count < tests:
                a = "ssh -i /home/perfserv/.ssh/id_rsa -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no perfuser@" + server + " \"cd perflab-setup/ ; ./emboss.sh\""
                b = Popen(a, shell=True, stdout=PIPE, stderr=PIPE)
                b.wait()
                c = b.stdout.read().decode()

                line = c.split()
                #print c
                try:
                    score = float(line[-1])
                    if not score > 9000 and not score <= 0:  # Check for and ignore odd scores
                        scores = scores + [score]
                        emboss = emboss + [score]
                        status = status + increment
                        count = count + 1
                        progress_recorder.set_progress(status, 100)

                except:
                    #print e
                    task_Err.errors += "emboss " + str(
                        sys.exc_info()) + " " + serv.hostname + "\n"
                    task_Err.save()
                    error_flag = True
                    raise SoftTimeLimitExceeded()

            scores.sort()
            #print scores

            job.status = "SCORING"
            job.cur_action = "Moving numbers around..."
            job.save()
            toReturn += "gauss:\n"
            for g in gauss:
                toReturn += str(g) + "..\n "
            toReturn += "\navg:\n"
            for a in avg:
                toReturn += str(a) + "..\n "
            toReturn += "\nhline:\n"
            for h in hline:
                toReturn += str(h) + "..\n "
            toReturn += "\nemboss:\n"
            for e in emboss:
                toReturn += str(e) + "..\n "
            toReturn += "\nScores are:\n"
            count = 0
            for s in scores:
                if count < 4:
                    toReturn += str(int(s)) + " "
                    count += 1
                else:
                    toReturn += str(int(s)) + "\n"
                    count = 0
            cpe = scores[int((len(scores) + 1) / 2)]
            toReturn += "\nmedian CPE is " + str(int(cpe)) + "\n"
            if cpe > 4000:
                score = 0
            else:
                #score = math.log(6000-cpe) * 46.93012749-305.91731341
                score = 119.653 * math.exp(-0.001196 * cpe)
                if score > 110:
                    score = 110
            score = int(score)
            toReturn += "\nResulting score is " + str(score)
            job.status = "COMPLETE \nScore: " + str(score)
            job.cur_action = "Score:" + str(score)
            job.save()
        except:
            task_Err.errors += "\nUnexpected error: " + str(sys.exc_info())
            task_Err.save()
            error_flag = True
            raise SoftTimeLimitExceeded()
        newAttempt = Attempt(owner=user,
                             note_field=job.note_field,
                             score=score,
                             result_out=toReturn,
                             time_stamp=job.time_created)
        newAttempt.save()
        if task_Err.id != None:
            task_Err.delete()
        job.deletable = True
        job.save()
        red.incr('server')
        progress_recorder.set_progress(100, 100)
        return "runLab Completed successfully"
    except SoftTimeLimitExceeded:
        if error_flag == True:
            err_block = task_Err.errors
            task_Err.errors = "A team of flying monkies has been dispatched. When they show up (eventually), show them this log.\n"
            task_Err.errors += err_block
            task_Err.save()
            job.cur_action = "ERROR"
            job.status = "ERROR"
            job.save()
        if b != None:
            b.kill()
        a = "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no perfuser@" + server + " \"killall -u perfuser;\""
        b = Popen(a, shell=True, stdout=PIPE, stderr=PIPE)
        b.wait()
        if job_not_found == False:
            job.deletable = True
            job.save()
        #print(b.stdout.read())
        serv.inUse = False
        serv.uID = -1
        red.incr('server')
        if error_flag:
            self.update_state(state='FAILURE')
            return "Task failed with errors"
        else:
            return "Task Stopped by user"
    except Exception as e:
        print(e)
Пример #10
0
def soft_timeout_sighandler(signum, frame):
    raise SoftTimeLimitExceeded()
Пример #11
0
def process_data(search_term, client_ip, browser, proxy, test):
    celery.current_task.update_state(state=states.STARTED, meta={'progress': 'downloading data...'})
    try:
        if proxy != '':
            prox = Proxy()
            prox.proxy_type = ProxyType.MANUAL
            prox.http_proxy = proxy
            prox.socks_proxy = proxy
            prox.ssl_proxy = proxy
        if browser == 'Chrome':
            capabilities = DesiredCapabilities.CHROME
            try:
                prox.add_to_capabilities(capabilities)
            except NameError:
                pass
            host = 'selenium-chrome'
        elif browser == 'Firefox':
            capabilities = DesiredCapabilities.FIREFOX
            try:
                prox.add_to_capabilities(capabilities)
            except NameError:
                pass
            host = 'selenium-firefox'
        driver = webdriver.Remote(command_executor=f'http://{host}:4444/wd/hub',
                                  desired_capabilities=capabilities)
        page_source = get_from_google(driver, search_term, test)
        soup = BeautifulSoup(page_source, "html5lib")
        num = soup.select('#resultStats')[0].getText()
        num = num.replace('\xa0', '')
        num = int(re.findall(r'\s\d+', num)[0])
        processed_data = [num, ]
        results = soup.find_all('div', class_='g')
        rank = 1
        for item in results:
            link = item.a.get('href')
            try:
                title = item.h3.getText()
            except AttributeError:
                continue
            text_field = item.find(class_='st')
            try:
                text_field.span.replace_with(' ')
            except AttributeError:
                pass
            try:
                text = text_field.getText()
            except AttributeError:
                continue
            processed_data.append({'title': title,
                                   'link': link,
                                   'text': text,
                                   'rank': rank,
                                   })
            rank += 1
        num_results = processed_data[0]
        popular_words = count_words(processed_data)
        q, created = Query.objects.update_or_create(text=search_term, defaults=dict(
                                                    popular_words=popular_words, num_results=num_results,
                                                    client_ip=client_ip, browser=browser))
        if not created:
            Link.objects.filter(qu=q).all().delete()
        bulk = [Link(qu=q, title=item['title'], link=item['link'],
                     description=item['text'], position=item['rank']) for item in processed_data[1:]]
        Link.objects.bulk_create(bulk)
    except SoftTimeLimitExceeded:
        driver.close()
        raise SoftTimeLimitExceeded('Timeout limit!')
    return "Success!"
Пример #12
0
    def test_softtimeout_exception(self, *mocks):
        self.app.config["MAX_TRANSMIT_RETRY_ATTEMPT"] = 4
        self.app.config["CELERY_TASK_ALWAYS_EAGER"] = False
        subscriber = {
            "_id": ObjectId("56c11bd78b84bb00b0a1905e"),
            "sequence_num_settings": {
                "max": 9999,
                "min": 1
            },
            "_etag": "f16b7eaa566f68b8d2561c811ec694bdf819784d",
            "is_active": True,
            "destinations": [{
                "delivery_type": "email"
            }],
            "email": "*****@*****.**",
            "subscriber_type": "digital",
            "name": "Test",
        }

        self.app.data.insert("subscribers", [subscriber])

        items = [
            {
                "_id": ObjectId(),
                "state": "pending",
                "item_id": "item_1",
                "item_version": 4,
                "headline": "pending headline",
                "destination": {
                    "delivery_type": "email"
                },
                "subscriber_id": subscriber["_id"],
                "formatted_item": "test",
            },
            {
                "_id": ObjectId(),
                "state": "pending",
                "item_id": "item_2",
                "item_version": 4,
                "headline": "pending headline 2",
                "destination": {
                    "delivery_type": "email"
                },
                "subscriber_id": subscriber["_id"],
                "formatted_item": "test",
            },
        ]

        self.app.data.insert("publish_queue", items)

        fake_transmitter = MagicMock()
        fake_transmitter.transmit.side_effect = SoftTimeLimitExceeded()

        fake_transmitters_list = mocks[0]
        fake_transmitters_list.__getitem__.return_value = fake_transmitter

        with self.assertRaises(SoftTimeLimitExceeded):
            superdesk.publish.publish_content.transmit_subscriber_items(
                subscriber.get("_id"), False)
        failed_item = self.app.data.find_one("publish_queue",
                                             req=None,
                                             _id=items[0].get("_id"))
        self.assertEqual(failed_item["state"], "retrying")
        pending_item = self.app.data.find_one("publish_queue",
                                              req=None,
                                              _id=items[1].get("_id"))
        self.assertEqual(pending_item["state"], "pending")
        self.app.config["CELERY_TASK_ALWAYS_EAGER"] = True
Пример #13
0
    async def trace_task(uuid, args, kwargs, request=None):
        # R      - is the possibly prepared return value.
        # I      - is the Info object.
        # T      - runtime
        # Rstr   - textual representation of return value
        # retval - is the always unmodified return value.
        # state  - is the resulting task state.

        # This function is very long because we've unrolled all the calls
        # for performance reasons, and because the function is so long
        # we want the main variables (I, and R) to stand out visually from the
        # the rest of the variables, so breaking PEP8 is worth it ;)
        R = I = T = Rstr = retval = state = None
        task_request = None
        time_start = monotonic()
        try:
            try:
                kwargs.items
            except AttributeError:
                raise InvalidTaskError(
                    'Task keyword arguments is not a mapping')
            push_task(task)
            task_request = trace.Context(
                request or {},
                args=args,
                called_directly=False,
                kwargs=kwargs,
            )
            root_id = task_request.root_id or uuid
            task_priority = task_request.delivery_info.get('priority') if \
                inherit_parent_priority else None
            push_request(task_request)
            try:
                # -*- PRE -*-
                if prerun_receivers:
                    send_prerun(sender=task,
                                task_id=uuid,
                                task=task,
                                args=args,
                                kwargs=kwargs)
                loader_task_init(uuid, task)
                if track_started:
                    store_result(
                        uuid,
                        {
                            'pid': pid,
                            'hostname': hostname
                        },
                        STARTED,
                        request=task_request,
                    )

                # -*- TRACE -*-
                try:
                    coro = fun(*args, **kwargs)
                    coro_task = asyncio.create_task(coro)

                    waiter = asyncio.wait(
                        [coro_task],
                        timeout=task.soft_time_limit,
                    )

                    waiter_task = asyncio.create_task(waiter)

                    try:
                        await waiter_task

                        if coro_task.done():
                            R = retval = coro_task.result()
                        else:
                            R = retval = await coro_utils.send_exception(
                                coro,
                                SoftTimeLimitExceeded(),
                            )
                            await coro_utils.await_anyway(coro_task)

                    except asyncio.CancelledError as exc:
                        waiter_task.cancel()
                        coro_task.cancel()
                        waiter.close()
                        coro.close()
                        exc = CeleryTimeoutError(exc)
                        exc = str(exc)
                        I, R, state, retval = on_error(task_request, exc, uuid)

                    except SoftRevoked:
                        R = retval = await coro_utils.send_exception(
                            coro,
                            SoftTimeLimitExceeded(),
                        )
                        await coro_utils.await_anyway(coro_task)

                    state = SUCCESS
                except Reject as exc:
                    waiter_task.cancel()
                    coro_task.cancel()
                    try:
                        await coro_utils.await_anyway(coro_task)
                    except asyncio.CancelledError:
                        pass

                    I = Info(REJECTED, exc)
                    R = ExceptionInfo(internal=True)
                    state, retval = I.state, I.retval
                    I.handle_reject(task, task_request)
                except Ignore as exc:
                    I = Info(IGNORED, exc)
                    R = ExceptionInfo(internal=True)
                    state, retval = I.state, I.retval
                    I.handle_ignore(task, task_request)
                except Retry as exc:
                    I, R, state, retval = on_error(task_request,
                                                   exc,
                                                   uuid,
                                                   RETRY,
                                                   call_errbacks=False)
                except SoftTimeLimitExceeded as exc:
                    I, R, state, retval = on_error(task_request, exc, uuid)
                except Exception as exc:
                    coro.close()
                    await coro_utils.await_anyway(waiter_task)
                    I, R, state, retval = on_error(task_request, exc, uuid)
                except BaseException:
                    raise
                else:
                    try:
                        # callback tasks must be applied before the result is
                        # stored, so that result.children is populated.

                        # groups are called inline and will store trail
                        # separately, so need to call them separately
                        # so that the trail's not added multiple times :(
                        # (Issue #1936)
                        callbacks = task.request.callbacks
                        if callbacks:
                            if len(task.request.callbacks) > 1:
                                sigs, groups = [], []
                                for sig in callbacks:
                                    sig = signature(sig, app=app)
                                    if isinstance(sig, group):
                                        groups.append(sig)
                                    else:
                                        sigs.append(sig)
                                for group_ in groups:
                                    group_.apply_async((retval, ),
                                                       parent_id=uuid,
                                                       root_id=root_id,
                                                       priority=task_priority)
                                if sigs:
                                    group(sigs, app=app).apply_async(
                                        (retval, ),
                                        parent_id=uuid,
                                        root_id=root_id,
                                        priority=task_priority)
                            else:
                                signature(callbacks[0], app=app).apply_async(
                                    (retval, ),
                                    parent_id=uuid,
                                    root_id=root_id,
                                    priority=task_priority)

                        # execute first task in chain
                        chain = task_request.chain
                        if chain:
                            _chsig = signature(chain.pop(), app=app)
                            _chsig.apply_async((retval, ),
                                               chain=chain,
                                               parent_id=uuid,
                                               root_id=root_id,
                                               priority=task_priority)
                        mark_as_done(
                            uuid,
                            retval,
                            task_request,
                            publish_result,
                        )
                    except EncodeError as exc:
                        I, R, state, retval = on_error(task_request, exc, uuid)
                    else:
                        Rstr = saferepr(R, resultrepr_maxsize)
                        T = monotonic() - time_start
                        if task_on_success:
                            task_on_success(retval, uuid, args, kwargs)
                        if success_receivers:
                            send_success(sender=task, result=retval)
                        if _does_info:
                            task_name = trace.get_task_name(task_request, name)
                            trace.info(
                                trace.LOG_SUCCESS, {
                                    'id': uuid,
                                    'name': task_name,
                                    'return_value': Rstr,
                                    'runtime': T,
                                })

                # -* POST *-
                if state not in trace.IGNORE_STATES:
                    if task_after_return:
                        task_after_return(
                            state,
                            retval,
                            uuid,
                            args,
                            kwargs,
                            None,
                        )
            finally:
                await coro_utils.await_anyway(coro_task)
                try:
                    if postrun_receivers:
                        send_postrun(sender=task,
                                     task_id=uuid,
                                     task=task,
                                     args=args,
                                     kwargs=kwargs,
                                     retval=retval,
                                     state=state)
                finally:
                    pop_task()
                    pop_request()
                    if not eager:
                        try:
                            backend_cleanup()
                            loader_cleanup()
                        except (KeyboardInterrupt, SystemExit, MemoryError):
                            raise
                        except Exception as exc:
                            logger.error('Process cleanup failed: %r',
                                         exc,
                                         exc_info=True)
        except MemoryError:
            raise
        except Exception as exc:
            if eager:
                raise
            R = trace.report_internal_error(task, exc)
            if task_request is not None:
                I, _, _, _ = on_error(task_request, exc, uuid)
        return trace_ok_t(R, I, T, Rstr)
Пример #14
0
class TestTasks(CreateConnectionsMixin, TestCase):
    _mock_execute = 'openwisp_controller.connection.base.models.AbstractCommand.execute'
    _mock_connect = (
        'openwisp_controller.connection.base.models.AbstractDeviceConnection.connect'
    )

    @mock.patch('logging.Logger.warning')
    @mock.patch('time.sleep')
    def test_update_config_missing_config(self, mocked_sleep, mocked_warning):
        pk = self._create_device().pk
        tasks.update_config.delay(pk)
        mocked_warning.assert_called_with(
            f'update_config("{pk}") failed: Device has no config.')
        mocked_sleep.assert_called_once()

    @mock.patch('logging.Logger.warning')
    @mock.patch('time.sleep')
    def test_update_config_missing_device(self, mocked_sleep, mocked_warning):
        pk = uuid.uuid4()
        tasks.update_config.delay(pk)
        mocked_warning.assert_called_with(
            f'update_config("{pk}") failed: Device matching query does not exist.'
        )
        mocked_sleep.assert_called_once()

    @mock.patch('logging.Logger.warning')
    def test_launch_command_missing(self, mocked_warning):
        pk = uuid.uuid4()
        tasks.launch_command.delay(pk)
        mocked_warning.assert_called_with(
            f'launch_command("{pk}") failed: Command matching query does not exist.'
        )

    @mock.patch(_mock_execute, side_effect=SoftTimeLimitExceeded())
    @mock.patch(_mock_connect, return_value=True)
    def test_launch_command_timeout(self, *args):
        dc = self._create_device_connection()
        command = Command(
            device=dc.device,
            connection=dc,
            type='custom',
            input={'command': '/usr/sbin/exotic_command'},
        )
        command.full_clean()
        command.save()
        # must call this explicitly because lack of transactions in this test case
        tasks.launch_command.delay(command.pk)
        command.refresh_from_db()
        self.assertEqual(command.status, 'failed')
        self.assertEqual(command.output,
                         'Background task time limit exceeded.\n')

    @mock.patch(_mock_execute, side_effect=RuntimeError('test error'))
    @mock.patch(_mock_connect, return_value=True)
    def test_launch_command_exception(self, *args):
        dc = self._create_device_connection()
        command = Command(
            device=dc.device,
            connection=dc,
            type='custom',
            input={'command': '/usr/sbin/exotic_command'},
        )
        command.full_clean()
        command.save()
        # must call this explicitly because lack of transactions in this test case
        with redirect_stderr(StringIO()) as stderr:
            tasks.launch_command.delay(command.pk)
            expected = f'An exception was raised while executing command {command.pk}'
            self.assertIn(expected, stderr.getvalue())
        command.refresh_from_db()
        self.assertEqual(command.status, 'failed')
        self.assertEqual(command.output, 'Internal system error: test error\n')
Пример #15
0
class TestVideoTranscoder(Mixin, TestCase):
    @property
    def output_settings(self):
        return {
            "id": "1232",
            "input": "file://abc",
            "segmentLength": 10,
            "destination": "file:///abc",
            "file_name": "video.m3u8",
            "format": "HLS",
            "encryption": {
                "key": "ecd0d06eaf884d8226c33928e87efa33",
                "url":
                "https://demo.testpress.in/api/v2.4/encryption_key/abcdef/",
            },
            "output": {
                "name": "360p",
                "url":
                "s3://bucket_url/institute/demo/videos/transcoded/bunny",
                "local_path": "/abc/1232/360p",
                "video": {
                    "width": 360,
                    "height": 640,
                    "codec": "h264",
                    "bitrate": 500000
                },
                "audio": {
                    "codec": "aac",
                    "bitrate": "48000"
                },
            },
        }

    def setUp(self) -> None:
        self.output.settings = self.output_settings
        self.output.save()
        self.prepare_video_transcoder()

    def prepare_video_transcoder(self):
        self.video_transcoder = VideoTranscoderRunnable(
            job_id=self.output.job.id, output_id=self.output.id, task_id=13)
        self.video_transcoder.output = self.output
        self.video_transcoder.job = self.output.job

    @mock.patch("apps.jobs.runnables.ManifestGenerator")
    @mock.patch("apps.jobs.runnables.LumberjackController")
    @mock.patch("apps.executors.transcoder.FFMpegTranscoder")
    def test_runnable_should_run_ffmpeg_manager(self, mock_ffmpeg_manager,
                                                mock_controller,
                                                mock_manifest_generator):
        mock_ffmpeg_manager().check_status.return_value = Status.Finished
        mock_controller().check_status.return_value = Status.Finished
        self.video_transcoder.do_run()

        self.assertTrue(mock_ffmpeg_manager.called)

    def test_update_progress_should_update_progress_of_output_and_job(self):
        self.video_transcoder.update_progress(20)

        self.assertEqual(self.output.progress, 20)
        self.assertEqual(self.job.progress, 20)

    @mock.patch("apps.jobs.runnables.LumberjackController.start")
    @mock.patch("apps.jobs.models.app.control")
    def test_task_should_be_stopped_in_case_of_exception(
            self, mock_celery_control, mock_controller_start):
        self.create_output()
        with mock.patch.object(LumberjackController,
                               "check_status") as mocked_check_status:
            mocked_check_status.return_value = Status.Errored
            self.video_transcoder.do_run()

        mock_celery_control.revoke.assert_called_with(None,
                                                      terminate=True,
                                                      signal="SIGUSR1")
        self.assertEqual(self.video_transcoder.job.status, Job.ERROR)

    @mock.patch("apps.jobs.runnables.LumberjackController",
                **{"return_value.run.side_effect": SoftTimeLimitExceeded()})
    @mock.patch("apps.jobs.models.app.GroupResult")
    def test_task_should_stop_ffmpeg_process_in_case_of_soft_time_limit_exception(
            self, mock_group_result, mock_controller):
        mock_controller().check_status.side_effect = SoftTimeLimitExceeded()
        task_mock = mock.MagicMock()
        mock_group_result.restore.return_value = [task_mock]
        self.video_transcoder.do_run()

        self.assertEqual(self.video_transcoder.output.status, Output.CANCELLED)
        mock_controller().stop.assert_called()

    def test_complete_job_should_change_status_to_completed(self):
        self.video_transcoder.complete_job()

        self.job.refresh_from_db()
        self.assertEqual(self.job.status, Job.COMPLETED)

    @mock.patch("apps.jobs.runnables.ManifestGenerator")
    @mock.patch("apps.jobs.tasks.PostDataToWebhookTask")
    @mock.patch("apps.jobs.runnables.LumberjackController")
    def test_job_completion_status_should_should_be_notified_on_transcoding_completion(
            self, mock_controller, mock_webhook, mock_manifest_generator):
        mock_controller().check_status.return_value = Status.Finished
        self.job.webhook_url = "google.com"
        self.job.save()
        self.video_transcoder.do_run()

        self.job.refresh_from_db()
        self.assertEqual(self.job.status, Job.COMPLETED)
        mock_webhook.apply_async.assert_called_with(args=(JobSerializer(
            instance=self.job).data, "google.com"))

    @mock.patch("apps.jobs.runnables.ManifestGenerator")
    @mock.patch("apps.jobs.runnables.LumberjackController")
    def test_manifest_generator_should_be_called_on_transcoding_completion(
            self, mock_controller, mock_manifest_generator):
        mock_controller().check_status.return_value = Status.Finished
        self.video_transcoder.do_run()

        mock_manifest_generator.assert_called()

    @mock.patch("apps.jobs.runnables.ManifestGenerator")
    @mock.patch("apps.jobs.tasks.PostDataToWebhookTask")
    @mock.patch("apps.jobs.runnables.LumberjackController")
    @mock.patch("builtins.open",
                new_callable=mock_open,
                read_data="FFMpeg Log")
    def test_ffmpeg_log_should_get_stored_in_output_model(
            self, mocked_op, mock_controller, mock_webhook,
            mock_manifest_generator):
        mock_controller().check_status.return_value = Status.Finished
        self.video_transcoder.do_run()

        self.output.refresh_from_db()
        self.assertEquals("FFMpeg Log", self.output.log)