def test_different_request(self, mock_now):
     app.env.request = HttpRequest()
     assert cached_fn("cat") == "cat"
     request_finished.send(sender=WSGIHandler)
     app.env.request = HttpRequest()
     assert cached_fn("cat") == "cat"
     assert mock_now.call_count == 2
Esempio n. 2
0
    def create(self, request):
        """
        Creates and saves a cohort. Takes a JSON object in the request body to use as the cohort's filters.
        Authentication is required.
        Returns information about the saved cohort, including the number of patients and the number
        of samples in that cohort.
        """
        cursor = None
        db = None

        user = endpoints.get_current_user()
        user_email = user.email() if user else None

        if user_email is None:
            raise endpoints.UnauthorizedException(
                "Authentication failed. Try signing in to {} to register "
                "with the web application.".format(BASE_URL))

        django.setup()
        try:
            django_user = Django_User.objects.get(email=user_email)
        except (ObjectDoesNotExist, MultipleObjectsReturned), e:
            logger.warn(e)
            request_finished.send(self)
            raise endpoints.NotFoundException("%s does not have an entry in the user database." % user_email)
Esempio n. 3
0
    def list(self, unused_request):
        """
        Returns information about cohorts a user has either READER or OWNER permission on.
        Authentication is required. Optionally takes a cohort id as a parameter to
        only list information about one cohort.
        """
        user_email = None
        cursor = None
        db = None

        if endpoints.get_current_user() is not None:
            user_email = endpoints.get_current_user().email()

        if user_email is None:
            raise endpoints.UnauthorizedException(
                "Authentication failed. Try signing in to {} to register "
                "with the web application.".format(BASE_URL))

        django.setup()
        try:
            user_id = Django_User.objects.get(email=user_email).id
        except (ObjectDoesNotExist, MultipleObjectsReturned), e:
            logger.warn(e)
            request_finished.send(self)
            raise endpoints.NotFoundException("%s does not have an entry in the user database." % user_email)
Esempio n. 4
0
    def get(self, request):
        '''
        Returns the dbGaP authorization status of the user.
        '''
        user_email = None

        if endpoints.get_current_user() is not None:
            user_email = endpoints.get_current_user().email()

        if user_email is None:
            raise endpoints.UnauthorizedException("Authentication unsuccessful.")

        # this checks the controlled-access google group
        am_dbgap_authorized = is_dbgap_authorized(user_email)

        if not am_dbgap_authorized:
            return ReturnJSON(message="{} is not on the controlled-access google group.".format(user_email),
                              dbGaP_authorized=False)

        django.setup()

        # all the following five situations should never happen

        # 1. check to make sure they have an entry in auth_user
        try:
            django_user = Django_User.objects.get(email=user_email)
        except (ObjectDoesNotExist, MultipleObjectsReturned), e:
            logger.error("Email {} is in {} group but did not have a unique entry in auth_user table. Error: {}"
                         .format(user_email, CONTROLLED_ACL_GOOGLE_GROUP, e))
            request_finished.send(self)
            raise endpoints.NotFoundException("{} is in the controlled-access google group "
                                              "but does not have an entry in the user database."
                                              .format(user_email))
    def googlegenomics(self, request):
        """
        Returns a list of Google Genomics dataset and readgroupset ids associated with
        all the samples in a specified cohort.
        Authentication is required. User must have either READER or OWNER permissions on the cohort.
        """
        cursor = None
        db = None
        user_email = None
        cohort_id = request.get_assigned_value('cohort_id')

        if endpoints.get_current_user() is not None:
            user_email = endpoints.get_current_user().email()

        if user_email is None:
            raise endpoints.UnauthorizedException(
                "Authentication failed. Try signing in to {} to register with the web application."
                    .format(BASE_URL))

        django.setup()
        try:
            user_id = Django_User.objects.get(email=user_email).id
            Django_Cohort.objects.get(id=cohort_id)
            Cohort_Perms.objects.get(cohort_id=cohort_id, user_id=user_id)
        except (ObjectDoesNotExist, MultipleObjectsReturned), e:
            logger.warn(e)
            err_msg = "Error retrieving cohort {} for user {}: {}".format(cohort_id, user_email, e)
            if 'Cohort_Perms' in e.message:
                err_msg = "User {} does not have permissions on cohort {}. Error: {}" \
                    .format(user_email, cohort_id, e)
            request_finished.send(self)
            raise endpoints.UnauthorizedException(err_msg)
Esempio n. 6
0
 def resolve(self, request, handler):
     try:
         request_started.send(sender=__name__)
         return self.query.query(request,
                                 handler.request[1].getsockname()[0])
     finally:
         request_finished.send(sender=__name__)
Esempio n. 7
0
def close_gevent_db_connection() -> None:
    """
    Clean gevent db connections. Check `atomic block` to prevent breaking the tests (Django `TestCase` wraps tests
    inside an atomic block that rollbacks at the end of the test)
    https://github.com/jneight/django-db-geventpool#using-orm-when-not-serving-requests
    """
    if not connection.in_atomic_block:
        request_finished.send(sender="greenlet")
Esempio n. 8
0
    def test_api(self):
        base_count = ModelDictModel.objects.count()

        mydict = ModelDict(ModelDictModel, key='key', value='value')
        mydict['foo'] = 'bar'
        self.assertEquals(mydict['foo'], 'bar')
        self.assertEquals(
            ModelDictModel.objects.values_list('value',
                                               flat=True).get(key='foo'),
            'bar')
        self.assertEquals(ModelDictModel.objects.count(), base_count + 1)
        mydict['foo'] = 'bar2'
        self.assertEquals(mydict['foo'], 'bar2')
        self.assertEquals(
            ModelDictModel.objects.values_list('value',
                                               flat=True).get(key='foo'),
            'bar2')
        self.assertEquals(ModelDictModel.objects.count(), base_count + 1)
        mydict['foo2'] = 'bar'
        self.assertEquals(mydict['foo2'], 'bar')
        self.assertEquals(
            ModelDictModel.objects.values_list('value',
                                               flat=True).get(key='foo2'),
            'bar')
        self.assertEquals(ModelDictModel.objects.count(), base_count + 2)
        del mydict['foo2']
        self.assertRaises(KeyError, mydict.__getitem__, 'foo2')
        self.assertFalse(ModelDictModel.objects.filter(key='foo2').exists())
        self.assertEquals(ModelDictModel.objects.count(), base_count + 1)

        ModelDictModel.objects.create(key='foo3', value='hello')

        self.assertEquals(mydict['foo3'], 'hello')
        self.assertTrue(
            ModelDictModel.objects.filter(key='foo3').exists(), True)
        self.assertEquals(ModelDictModel.objects.count(), base_count + 2)

        request_finished.send(sender=self)

        self.assertEquals(mydict._last_checked_for_remote_changes, None)

        # These should still error because even though the cache repopulates (local cache)
        # the remote cache pool does not
        # self.assertRaises(KeyError, mydict.__getitem__, 'foo3')
        # self.assertTrue(ModelDictModel.objects.filter(key='foo3').exists())
        # self.assertEquals(ModelDictModel.objects.count(), base_count + 2)

        self.assertEquals(mydict['foo'], 'bar2')
        self.assertEquals(
            ModelDictModel.objects.values_list('value',
                                               flat=True).get(key='foo'),
            'bar2')
        self.assertEquals(ModelDictModel.objects.count(), base_count + 2)

        self.assertEquals(mydict.pop('foo'), 'bar2')
        self.assertEquals(mydict.pop('foo', None), None)
        self.assertFalse(ModelDictModel.objects.filter(key='foo').exists())
        self.assertEquals(ModelDictModel.objects.count(), base_count + 1)
Esempio n. 9
0
 def handle(self, socket, address):
     """ Just overwrite the handle to have database lock per connection """
     log.info('Incoming connection from {}:{}'.format(
         address[0], address[1]))
     request_started.send(
         sender='transactional-edge-{}-{}'.format(address[0], address[1]))
     super().handle(socket, address)
     request_finished.send(
         sender='transactional-edge-{}-{}'.format(address[0], address[1]))
Esempio n. 10
0
 def push_messages(self):
     try:
         request_started.send(None, environ=None)
         self._push_messages()
     except Exception as e:
         self.logger.exception(e)
         raise
     finally:
         request_finished.send(None)
Esempio n. 11
0
 def push_messages(self):
     try:
         request_started.send(None, environ=None)
         self._push_messages()
     except Exception as e:
         self.logger.exception(e)
         raise
     finally:
         request_finished.send(None)
Esempio n. 12
0
    def test_request_finished(self, _mock):
        with self.settings(CELERY_ALWAYS_EAGER=False):
            test_task.delay()
        self._verify_task_filled()

        request_finished.send(sender=self)
        self._verify_task_empty()

        # Assert the original `apply_async` called.
        assert _mock.called
Esempio n. 13
0
 def cleanup(self):
     """
     This feels pretty icky. Basically this needs to be here to ensure that
     django db connections are cleanup and and properly closed after each
     request/response cycle - which is what django does behind the scenes (
     thus we can use django's machinery to close the connection for us here
     by just firing the `request_finished` signal). To leave this out means
     that we end up with an idle connection per server thread (I think).
     """
     request_finished.send(sender=self)
Esempio n. 14
0
    def test_request_finished(self, _mock):
        with self.settings(CELERY_ALWAYS_EAGER=False):
            test_task.delay()
        self._verify_task_filled()

        request_finished.send(sender=self)
        self._verify_task_empty()

        # Assert the original `apply_async` called.
        assert _mock.called
Esempio n. 15
0
    def test_request_finished(self, _mock):
        request_started.send(sender=self)
        test_task.delay()
        self._verify_task_filled()

        request_finished.send(sender=self)
        self._verify_task_empty()

        # Assert the original `apply_async` called.
        assert _mock.called, (
            'Expected PostRequestTask.original_apply_async call')
Esempio n. 16
0
    def test_signals(self):
        actrack.log(self.user, 'tests', related=self.project)

        self.assertEqual(len(Action.objects.all()), 0)

        request_finished.send(None)

        self.assertEqual(len(Action.objects.all()), 1)
        created_action = Action.objects.all()[0]
        self.assertEqual(list(created_action.related.all()), [self.project])
        self.assertEqual(created_action.verb, 'tests')
        self.assertEqual(created_action.actor, self.user)
Esempio n. 17
0
    def test_task_applied_once_request_finished(self,
                                                original_apply_async_mock):
        request_started.send(sender=self)
        test_task.delay()
        self._verify_one_task_queued()

        request_finished.send(sender=self)
        self._verify_task_empty()

        # Assert the original `apply_async` called.
        self.assertEqual(original_apply_async_mock.call_count, 1,
                         'Expected PostRequestTask.original_apply_async call')
Esempio n. 18
0
    def test_task_applied_once_request_finished(
            self, original_apply_async_mock):
        request_started.send(sender=self)
        test_task.delay()
        self._verify_one_task_queued()

        request_finished.send(sender=self)
        self._verify_task_empty()

        # Assert the original `apply_async` called.
        self.assertEqual(
            original_apply_async_mock.call_count, 1,
            'Expected PostRequestTask.original_apply_async call')
Esempio n. 19
0
    def am_i_dbgap_authorized(self, request):
        '''
        Returns information about the user.
        :param token: Optional. Access token with email scope to verify user's google identity.
        :return: ReturnJSON with msg string indicating presence or absence on the controlled-access list.
        '''
        print >> sys.stderr,'Called '+sys._getframe().f_code.co_name
        user_email = None

        if endpoints.get_current_user() is not None:
            user_email = endpoints.get_current_user().email()

        # users have the option of pasting the access token in the query string
        # or in the 'token' field in the api explorer
        # but this is not required
        access_token = request.__getattribute__('token')
        if access_token:
            user_email = get_user_email_from_token(access_token)

        if user_email:
            # this checks the controlled-access google group
            am_dbgap_authorized = is_dbgap_authorized(user_email)

            if not am_dbgap_authorized:
                return ReturnJSON(msg="You are not on the controlled-access google group.")

            django.setup()
            # all the following five situations should never happen

            # 1. check to make sure they have an entry in auth_user
            try:
                django_user = Django_User.objects.get(email=user_email)
            except (ObjectDoesNotExist, MultipleObjectsReturned), e:
                logger.error("Email {} is in {} group but did not have a unique entry in auth_user table. Error: {}"
                             .format(user_email, CONTROLLED_ACL_GOOGLE_GROUP, e))
                request_finished.send(self)
                raise endpoints.NotFoundException("{} is in the controlled-access google group "
                                                  "but does not have an entry in the user database."
                                                  .format(user_email))

            # 2. check to make sure they have an entry in accounts_nih_user
            try:
                nih_user = NIH_User.objects.get(user_id=django_user.id)
            except (ObjectDoesNotExist, MultipleObjectsReturned), e:
                logger.error("Email {} is in {} group but did not have a unique entry in "
                             "accounts_nih_user table. Error: {}"
                             .format(user_email, CONTROLLED_ACL_GOOGLE_GROUP, e))
                raise endpoints.NotFoundException("{} is in the controlled-access google group "
                                                  "but does not have an entry in the nih_user database."
                                                  .format(user_email))
Esempio n. 20
0
    def test_api(self):
        base_count = ModelDictModel.objects.count()

        mydict = ModelDict(ModelDictModel, key='key', value='value')
        mydict['foo'] = 'bar'
        assert mydict['foo'] == 'bar'
        assert ModelDictModel.objects.values_list(
            'value', flat=True).get(key='foo') == 'bar'
        assert ModelDictModel.objects.count() == base_count + 1
        mydict['foo'] = 'bar2'
        assert mydict['foo'] == 'bar2'
        assert ModelDictModel.objects.values_list(
            'value', flat=True).get(key='foo') == 'bar2'
        assert ModelDictModel.objects.count() == base_count + 1
        mydict['foo2'] = 'bar'
        assert mydict['foo2'] == 'bar'
        assert ModelDictModel.objects.values_list(
            'value', flat=True).get(key='foo2') == 'bar'
        assert ModelDictModel.objects.count() == base_count + 2
        del mydict['foo2']
        with pytest.raises(KeyError):
            mydict.__getitem__('foo2')
        assert not ModelDictModel.objects.filter(key='foo2').exists()
        assert ModelDictModel.objects.count() == base_count + 1

        ModelDictModel.objects.create(key='foo3', value='hello')

        assert mydict['foo3'] == 'hello'
        assert ModelDictModel.objects.filter(key='foo3').exists(), True
        assert ModelDictModel.objects.count() == base_count + 2

        request_finished.send(sender=self)

        assert mydict._last_checked_for_remote_changes == 0.0

        # These should still error because even though the cache repopulates (local cache)
        # the remote cache pool does not
        # self.assertRaises(KeyError, mydict.__getitem__, 'foo3')
        # self.assertTrue(ModelDictModel.objects.filter(key='foo3').exists())
        # self.assertEquals(ModelDictModel.objects.count(), base_count + 2)

        assert mydict['foo'] == 'bar2'
        assert ModelDictModel.objects.values_list(
            'value', flat=True).get(key='foo') == 'bar2'
        assert ModelDictModel.objects.count() == base_count + 2

        assert mydict.pop('foo') == 'bar2'
        assert mydict.pop('foo', None) is None
        assert not ModelDictModel.objects.filter(key='foo').exists()
        assert ModelDictModel.objects.count() == base_count + 1
Esempio n. 21
0
    def test_modeldict_expirey(self):
        base_count = ModelDictModel.objects.count()
        
        mydict = ModelDict(ModelDictModel, key='key', value='value')

        self.assertEquals(mydict._cache, None)
        
        mydict['test_modeldict_expirey'] = 'hello'

        self.assertEquals(len(mydict._cache), base_count + 1)
        self.assertEquals(mydict['test_modeldict_expirey'], 'hello')

        self.client.get('/')
        
        self.assertEquals(mydict._cache, None)
        self.assertEquals(mydict['test_modeldict_expirey'], 'hello')
        self.assertEquals(len(mydict._cache), base_count + 1)
        
        request_finished.send(sender=self)

        self.assertEquals(mydict._cache, None)
        self.assertEquals(mydict['test_modeldict_expirey'], 'hello')
        self.assertEquals(len(mydict._cache), base_count + 1)

    # def test_modeldict_counts(self):
    #     # TODO: 
    #     mydict = ModelDict(ModelDictModel, key='key', value='value')
    #     mydict['test_1'] = 'foo'
    #     mydict['test_2'] = 'bar'
    #     del mydict
    #     request_finished.send(sender=self)
    # 
    #     mydict = ModelDict(ModelDictModel, key='key', value='value')
    #     # First and only cache.get() here.
    #     self.assertEqual(mydict['test_1'], 'foo')
    #     self.assertEqual(mydict['test_2'], 'bar')
    #     self.assertEqual(mydict['test_1'], 'foo')
    # 
    #     request_finished.send(sender=self)
    #     # Should not be another cache.get().
    #     self.assertEqual(mydict['test_1'], 'foo')
    #     self.assertEqual(mydict['test_2'], 'bar')
    #     self.assertEqual(mydict['test_1'], 'foo')
    # 
    #     self.assertEqual(cache._gets[c.get_key('ModelDict:ModelDictModel:key')], 1)
    #     self.assertEqual(cache._gets[c.get_key('ModelDict.last_updated:ModelDictModel:key')], 2)
Esempio n. 22
0
def worker_int(worker):
    request_finished.send(sender="greenlet")
    worker.log.info("worker received INT or QUIT signal")

    ## get traceback info
    import threading, sys, traceback
    id2name = dict([(th.ident, th.name) for th in threading.enumerate()])
    code = []
    for threadId, stack in sys._current_frames().items():
        code.append("\n# Thread: %s(%d)" %
                    (id2name.get(threadId, ""), threadId))
        for filename, lineno, name, line in traceback.extract_stack(stack):
            code.append('File: "%s", line %d, in %s' %
                        (filename, lineno, name))
            if line:
                code.append("  %s" % (line.strip()))
    worker.log.debug("\n".join(code))
Esempio n. 23
0
    def test_api(self):
        base_count = ModelDictModel.objects.count()

        mydict = ModelDict(ModelDictModel, key='key', value='value')
        mydict['foo'] = 'bar'
        assert mydict['foo'] == 'bar'
        assert ModelDictModel.objects.values_list('value', flat=True).get(key='foo') == 'bar'
        assert ModelDictModel.objects.count() == base_count + 1
        mydict['foo'] = 'bar2'
        assert mydict['foo'] == 'bar2'
        assert ModelDictModel.objects.values_list('value', flat=True).get(key='foo') == 'bar2'
        assert ModelDictModel.objects.count() == base_count + 1
        mydict['foo2'] = 'bar'
        assert mydict['foo2'] == 'bar'
        assert ModelDictModel.objects.values_list('value', flat=True).get(key='foo2') == 'bar'
        assert ModelDictModel.objects.count() == base_count + 2
        del mydict['foo2']
        with pytest.raises(KeyError):
            mydict.__getitem__('foo2')
        assert not ModelDictModel.objects.filter(key='foo2').exists()
        assert ModelDictModel.objects.count() == base_count + 1

        ModelDictModel.objects.create(key='foo3', value='hello')

        assert mydict['foo3'] == 'hello'
        assert ModelDictModel.objects.filter(key='foo3').exists(), True
        assert ModelDictModel.objects.count() == base_count + 2

        request_finished.send(sender=self)

        assert mydict._last_checked_for_remote_changes == 0.0

        # These should still error because even though the cache repopulates (local cache)
        # the remote cache pool does not
        # self.assertRaises(KeyError, mydict.__getitem__, 'foo3')
        # self.assertTrue(ModelDictModel.objects.filter(key='foo3').exists())
        # self.assertEquals(ModelDictModel.objects.count(), base_count + 2)

        assert mydict['foo'] == 'bar2'
        assert ModelDictModel.objects.values_list('value', flat=True).get(key='foo') == 'bar2'
        assert ModelDictModel.objects.count() == base_count + 2

        assert mydict.pop('foo') == 'bar2'
        assert mydict.pop('foo', None) is None
        assert not ModelDictModel.objects.filter(key='foo').exists()
        assert ModelDictModel.objects.count() == base_count + 1
Esempio n. 24
0
    def test_api(self):
        base_count = ModelDictModel.objects.count()

        mydict = ModelDict(ModelDictModel, key='key', value='value')
        mydict['foo'] = 'bar'
        self.assertEquals(mydict['foo'], 'bar')
        self.assertEquals(ModelDictModel.objects.values_list('value', flat=True).get(key='foo'), 'bar')
        self.assertEquals(ModelDictModel.objects.count(), base_count + 1)
        mydict['foo'] = 'bar2'
        self.assertEquals(mydict['foo'], 'bar2')
        self.assertEquals(ModelDictModel.objects.values_list('value', flat=True).get(key='foo'), 'bar2')
        self.assertEquals(ModelDictModel.objects.count(), base_count + 1)
        mydict['foo2'] = 'bar'
        self.assertEquals(mydict['foo2'], 'bar')
        self.assertEquals(ModelDictModel.objects.values_list('value', flat=True).get(key='foo2'), 'bar')
        self.assertEquals(ModelDictModel.objects.count(), base_count + 2)
        del mydict['foo2']
        self.assertRaises(KeyError, mydict.__getitem__, 'foo2')
        self.assertFalse(ModelDictModel.objects.filter(key='foo2').exists())
        self.assertEquals(ModelDictModel.objects.count(), base_count + 1)

        ModelDictModel.objects.create(key='foo3', value='hello')

        self.assertEquals(mydict['foo3'], 'hello')
        self.assertTrue(ModelDictModel.objects.filter(key='foo3').exists(), True)
        self.assertEquals(ModelDictModel.objects.count(), base_count + 2)

        request_finished.send(sender=self)

        self.assertEquals(mydict._last_checked_for_remote_changes, None)

        # These should still error because even though the cache repopulates (local cache)
        # the remote cache pool does not
        # self.assertRaises(KeyError, mydict.__getitem__, 'foo3')
        # self.assertTrue(ModelDictModel.objects.filter(key='foo3').exists())
        # self.assertEquals(ModelDictModel.objects.count(), base_count + 2)

        self.assertEquals(mydict['foo'], 'bar2')
        self.assertEquals(ModelDictModel.objects.values_list('value', flat=True).get(key='foo'), 'bar2')
        self.assertEquals(ModelDictModel.objects.count(), base_count + 2)

        self.assertEquals(mydict.pop('foo'), 'bar2')
        self.assertEquals(mydict.pop('foo', None), None)
        self.assertFalse(ModelDictModel.objects.filter(key='foo').exists())
        self.assertEquals(ModelDictModel.objects.count(), base_count + 1)
Esempio n. 25
0
 def check_records(records):
     try:
         request_started.send(sender=__name__)
         records_map = {}
         for record in records:
             records_map[record.pk] = record
             record.incr_recent_query_times(1)
         for monitor in _get_monitors(records):
             record = records_map[monitor.record_id]
             if monitor.mtype == 1:  # tcp
                 checker = TcpChecker(record, monitor)
             elif monitor.mtype == 2:
                 checker = HttpChecker(record, monitor)
             thread = threading.Thread(target=checker)
             thread.daemon = True
             thread.start()
     finally:
         request_finished.send(sender=__name__)
Esempio n. 26
0
 def test_context_cache_cleared_after_request(self):
     """ The context cache should be cleared between requests. """
     CachingTestModel.objects.create(field1="test")
     with sleuth.watch("google.appengine.api.datastore.Query.Run") as query:
         CachingTestModel.objects.get(field1="test")
         self.assertEqual(query.call_count, 0)
         # Now start a new request, which should clear the cache
         request_started.send(HttpRequest(), keep_disabled_flags=True)
         CachingTestModel.objects.get(field1="test")
         self.assertEqual(query.call_count, 1)
         # Now do another call, which should use the cache (because it would have been
         # populated by the previous call)
         CachingTestModel.objects.get(field1="test")
         self.assertEqual(query.call_count, 1)
         # Now clear the cache again by *finishing* a request
         request_finished.send(HttpRequest(), keep_disabled_flags=True)
         CachingTestModel.objects.get(field1="test")
         self.assertEqual(query.call_count, 2)
Esempio n. 27
0
 def test_context_cache_cleared_after_request(self):
     """ The context cache should be cleared between requests. """
     CachingTestModel.objects.create(field1="test")
     with sleuth.watch("google.appengine.api.datastore.Query.Run") as query:
         CachingTestModel.objects.get(field1="test")
         self.assertEqual(query.call_count, 0)
         # Now start a new request, which should clear the cache
         request_started.send(HttpRequest(), keep_disabled_flags=True)
         CachingTestModel.objects.get(field1="test")
         self.assertEqual(query.call_count, 1)
         # Now do another call, which should use the cache (because it would have been
         # populated by the previous call)
         CachingTestModel.objects.get(field1="test")
         self.assertEqual(query.call_count, 1)
         # Now clear the cache again by *finishing* a request
         request_finished.send(HttpRequest(), keep_disabled_flags=True)
         CachingTestModel.objects.get(field1="test")
         self.assertEqual(query.call_count, 2)
Esempio n. 28
0
    def test_modeldict_expirey(self):
        base_count = ModelDictModel.objects.count()

        mydict = ModelDict(ModelDictModel, key='key', value='value')

        assert mydict._local_cache == {}

        mydict['test_modeldict_expirey'] = 'hello'

        assert len(mydict._local_cache) == base_count + 1
        assert mydict['test_modeldict_expirey'] == 'hello'

        self.client.get('/')

        assert mydict._last_checked_for_remote_changes == 0.0
        assert mydict['test_modeldict_expirey'] == 'hello'
        assert len(mydict._local_cache) == base_count + 1

        request_finished.send(sender=self)

        assert mydict._last_checked_for_remote_changes == 0.0
        assert mydict['test_modeldict_expirey'] == 'hello'
        assert len(mydict._local_cache) == base_count + 1
Esempio n. 29
0
    def test_modeldict_expirey(self):
        base_count = ModelDictModel.objects.count()

        mydict = ModelDict(ModelDictModel, key='key', value='value')

        self.assertEquals(mydict._cache, None)

        mydict['test_modeldict_expirey'] = 'hello'

        self.assertEquals(len(mydict._cache), base_count + 1)
        self.assertEquals(mydict['test_modeldict_expirey'], 'hello')

        self.client.get('/')

        self.assertEquals(mydict._cache, None)
        self.assertEquals(mydict['test_modeldict_expirey'], 'hello')
        self.assertEquals(len(mydict._cache), base_count + 1)

        request_finished.send(sender=self)

        self.assertEquals(mydict._cache, None)
        self.assertEquals(mydict['test_modeldict_expirey'], 'hello')
        self.assertEquals(len(mydict._cache), base_count + 1)
Esempio n. 30
0
    def test_modeldict_expirey(self):
        base_count = ModelDictModel.objects.count()

        mydict = ModelDict(ModelDictModel, key='key', value='value')

        self.assertEquals(mydict._local_cache, None)

        mydict['test_modeldict_expirey'] = 'hello'

        self.assertEquals(len(mydict._local_cache), base_count + 1)
        self.assertEquals(mydict['test_modeldict_expirey'], 'hello')

        self.client.get('/')

        self.assertEquals(mydict._last_checked_for_remote_changes, None)
        self.assertEquals(mydict['test_modeldict_expirey'], 'hello')
        self.assertEquals(len(mydict._local_cache), base_count + 1)

        request_finished.send(sender=self)

        self.assertEquals(mydict._last_checked_for_remote_changes, None)
        self.assertEquals(mydict['test_modeldict_expirey'], 'hello')
        self.assertEquals(len(mydict._local_cache), base_count + 1)
Esempio n. 31
0
    def delete(self, request):
        """
        Deletes a cohort. User must have owner permissions on the cohort.
        """
        user_email = None

        if endpoints.get_current_user() is not None:
            user_email = endpoints.get_current_user().email()

        cohort_id = request.get_assigned_value('cohort_id')

        if user_email is None:
            raise endpoints.UnauthorizedException(
                "Authentication failed. Try signing in to {} to register with the web application."
                    .format(BASE_URL))

        django.setup()
        try:
            django_user = Django_User.objects.get(email=user_email)
            user_id = django_user.id
        except (ObjectDoesNotExist, MultipleObjectsReturned), e:
            logger.warn(e)
            request_finished.send(self)
            raise endpoints.NotFoundException("%s does not have an entry in the user database." % user_email)
Esempio n. 32
0
 def wrapper(*args, **kwargs):
     result = f(*args, **kwargs)
     request_finished.send(sender="greenlet")
     return result
Esempio n. 33
0
                cursor.close()
                db.close()
                if len(metadata_sample_rows) == 0:
                    msg = "Aliquot barcode {} not found in the database.".format(aliquot_barcode)
                    logger.info(msg)
                else:
                    msg = "No annotations found for aliquot barcode {}".format(aliquot_barcode)
                    if item_type_name is not None:
                        msg += " and item type name {}. Item type name must be one of the following: " \
                               "'Patient', 'Aliquot', 'Analyte', 'Shipped Portion', 'Portion', 'Slide', 'Sample'.".format(item_type_name)
                    logger.info(msg)
                raise endpoints.NotFoundException(msg)

            items = []
            for row in rows:
                constructor_dict = build_constructor_dict_for_message(MetadataAnnotationItem(), row)
                items.append(MetadataAnnotationItem(**constructor_dict))

            return MetadataAnnotationList(items=items, count=len(items))

        except (IndexError, TypeError), e:
            logger.info("Aliquot {} not found. Error: {}".format(aliquot_barcode, e))
            raise endpoints.NotFoundException("Aliquot {} not found.".format(aliquot_barcode))
        except MySQLdb.ProgrammingError as e:
            logger.warn("Error retrieving aliquot data: {}".format(e))
            raise endpoints.BadRequestException("Error retrieving aliquot data: {}".format(e))
        finally:
            if cursor: cursor.close()
            if db and db.open: db.close()
            request_finished.send(self)
Esempio n. 34
0
 def tearDown(self):
     # remove the request, trigger the signal to clear the cache, restore receivers
     app.env.request = None
     request_finished.send(sender=WSGIHandler)
     request_finished.receivers = self.original_receivers
     super().tearDown()
Esempio n. 35
0
 def wrapper(*args, **kwargs):
     try:
         return f(*args, **kwargs)
     finally:
         request_finished.send(sender='greenlet')
Esempio n. 36
0
 def wrapper(*args, **kwargs):
     try:
         return f(*args, **kwargs)
     finally:
         request_finished.send(sender='greenlet')
Esempio n. 37
0
def worker_exit(server, worker):
    request_finished.send(sender="greenlet")
    worker.log.info("worker exit signal")
Esempio n. 38
0
    def __call__(self, body, message):
        """
        Handle a vanilla AMQP message, called by the Celery framework.

        Raising an exception in this method will crash the Celery worker. Ensure
        that all Exceptions are caught and messages acknowledged or rejected
        as they are processed.

        Args:
            body (Any): the message content, which has been deserialized by Kombu
            message (kombu.message.Message)

        Returns:
            None
        """
        retry_count = self.retry_count(message)

        try:
            _logger.debug(
                'Received: (key={routing_key}, retry_count={retry_count})'.
                format(
                    routing_key=self.routing_key,
                    retry_count=retry_count,
                ))
            self.func(body)

        except Exception as e:
            if isinstance(e, PermanentFailure):
                self.archive(
                    body, message,
                    "Task '{routing_key}' raised '{cls}, {error}'\n"
                    "{traceback}".format(
                        routing_key=self.routing_key,
                        cls=e.__class__.__name__,
                        error=e,
                        traceback=traceback.format_exc(),
                    ))
            elif retry_count >= settings.MAX_RETRIES:
                self.archive(
                    body, message,
                    "Task '{routing_key}' ran out of retries ({retries}) on exception "
                    "'{cls}, {error}'\n"
                    "{traceback}".format(
                        routing_key=self.routing_key,
                        retries=retry_count,
                        cls=e.__class__.__name__,
                        error=e,
                        traceback=traceback.format_exc(),
                    ))
            else:
                self.retry(
                    body, message,
                    "Task '{routing_key}' raised the exception '{cls}, {error}', but there are "
                    "{retries} retries left\n"
                    "{traceback}".format(
                        routing_key=self.routing_key,
                        retries=settings.MAX_RETRIES - retry_count,
                        cls=e.__class__.__name__,
                        error=e,
                        traceback=traceback.format_exc(),
                    ))
        else:
            message.ack()
            _logger.debug(
                "Task '{routing_key}' processed and ack() sent".format(
                    routing_key=self.routing_key))

        finally:
            if settings.USE_DJANGO:
                # avoid various problems with db connections, due to long-lived
                # worker not automatically participating in Django request lifecycle
                request_finished.send(sender="AMQPRetryHandler")

            if not message.acknowledged:
                message.requeue()
                _logger.critical(
                    "Messages for task '{routing_key}' are not sending an ack() or a reject(). "
                    "This needs attention. Assuming some kind of error and requeueing the "
                    "message.".format(routing_key=self.routing_key))
Esempio n. 39
0
def worker_abort(worker):
    request_finished.send(sender="greenlet")
    worker.log.info("worker received SIGABRT signal")