Ejemplo n.º 1
0
    def test_clear_global_state(self):
        from logging import _handlers, _handlerList

        capture = LogCapture()
        capture.uninstall()
        self.assertFalse(capture in _handlers)
        self.assertFalse(capture in _handlerList)
Ejemplo n.º 2
0
 def test_simple(self):
     root.info('before')
     l = LogCapture()
     root.info('during')
     l.uninstall()
     root.info('after')
     assert str(l) == "root INFO\n  during"
Ejemplo n.º 3
0
class ExperimentRunningMock(test_integration_mock.GatewayCodeMock):

    """ Create environment for running experiments """

    def setUp(self):
        super(ExperimentRunningMock, self).setUp()
        # config experiment and create folder
        self.g_m._create_user_exp_folders(USER, EXP_ID)
        self.log_error = LogCapture('gateway_code', level=logging.ERROR)

    def tearDown(self):
        super(ExperimentRunningMock, self).tearDown()
        self.g_m._destroy_user_exp_folders(USER, EXP_ID)
        self.log_error.uninstall()

    @staticmethod
    def send_n_cmds(command, num_times, step=0.5):
        """ Send a command multiple times and return array of answers """
        answers = []
        cmd = command.split()
        for _itr in range(0, num_times):  # pylint:disable=unused-variable
            ans = OpenNodeConnection.send_one_command(cmd)
            ans = ' '.join(ans) if ans is not None else None
            answers.append(ans)
            time.sleep(step)
        return answers
Ejemplo n.º 4
0
    def test_atexit(self):
        m = Mock()
        with Replacer() as r:
            # make sure the marker is false, other tests will
            # probably have set it
            r.replace('testfixtures.LogCapture.atexit_setup', False)
            r.replace('atexit.register', m.register)

            l = LogCapture()

            expected = [call.register(l.atexit)]

            compare(expected, m.mock_calls)

            with catch_warnings(record=True) as w:
                l.atexit()
                self.assertTrue(len(w), 1)
                compare(str(w[0].message), ( # pragma: no branch
                    "LogCapture instances not uninstalled by shutdown, "
                    "loggers captured:\n"
                    "(None,)"
                    ))
                
            l.uninstall()

            compare(set(), LogCapture.instances)
            
            # check re-running has no ill effects
            l.atexit()
Ejemplo n.º 5
0
class UserSignupTests(TestCase):

    def setUp(self):
        self.handler = LogCapture()
        self.formatter = JsonLogFormatter(logger_name='testpilot.newuser')

        self.username = '******'
        self.password = '******'
        self.email = '*****@*****.**' % self.username

        self.user = User.objects.create_user(
            username=self.username,
            email=self.email,
            password=self.password)

        UserProfile.objects.filter(user=self.user).delete()

    def tearDown(self):
        self.handler.uninstall()

    def test_newuser_log_event(self):
        """testpilot.newuser log event should be emitted on signup"""
        self.user.is_active = True
        user_signed_up.send(sender=self.user.__class__,
                            request=None,
                            user=self.user)

        self.assertEquals(len(self.handler.records), 1)
        record = self.handler.records[0]

        details = json.loads(self.formatter.format(record))
        self.assertTrue('Fields' in details)

        fields = details['Fields']
        self.assertEqual(fields['uid'], self.user.id)
Ejemplo n.º 6
0
class TestElfTargetIsCompatibleWithNode(unittest.TestCase):
    """Test elftarget.is_compatible_with_node."""

    def setUp(self):
        self.m3_class = mock.Mock()
        self.m3_class.ELF_TARGET = ('ELFCLASS32', 'EM_ARM')
        self.log = LogCapture('gateway_code', level=logging.DEBUG)

    def tearDown(self):
        self.log.uninstall()

    def test_m3_like_elf_check(self):
        """Test elftarget for an m3 like node."""
        ret = elftarget.is_compatible_with_node(firmware('idle_m3.elf'),
                                                self.m3_class)
        self.assertTrue(ret)
        self.log.check()

        # invalid target
        ret = elftarget.is_compatible_with_node(firmware('node.z1'),
                                                self.m3_class)
        self.assertFalse(ret)
        self.log.check()

        # invalid, not elf file
        ret = elftarget.is_compatible_with_node(
            firmware('wsn430_print_uids.hex'), self.m3_class)
        self.assertFalse(ret)
        self.log.check(('gateway_code', 'WARNING',
                        'Invalid firmware: Not a valid elf file'))
Ejemplo n.º 7
0
class Test(unittest.TestCase):

    def setUp(self):
        from testfixtures import LogCapture
        self.log_capture = LogCapture()
        self.log_group  = "phidgeter.ir_temperature"
        self.lvl = "DEBUG"

    def tearDown(self):
        self.log_capture.uninstall()

    def test_log_captures(self):
        # verification of log matching functionality
        from logging import getLogger
        getLogger().info("a message")
        self.log_capture.check(("root", "INFO", "a message"))

    def test_sensor_is_available(self):
        ir_temp = IRSensor()
        assert ir_temp.open_phidget() == True
        assert ir_temp.get_temperature() >= 0.0
        assert ir_temp.close_phidget() == True

    def test_sensor_is_room_temperature(self):
        ir_temp = IRSensor()
        assert ir_temp.open_phidget() == True
        print ir_temp.get_temperature()
        assert ir_temp.get_temperature() >= 20.0
        assert ir_temp.close_phidget() == True
Ejemplo n.º 8
0
 def test_simple_manual_install(self):
     l = LogCapture(install=False)
     root.info('before')
     l.install()
     root.info('during')
     l.uninstall()
     root.info('after')
     assert str(l) == "root INFO\n  during"
Ejemplo n.º 9
0
class TestStartExtension(TestCase):
    """Test the start extension.
    """

    def setUp(self):
        self.buildout_config = {
            'buildout': {'versions': 'versions'},
            'versions': {'A-Package': '1.0'},
            }
        self.i = Installer(versions=self.buildout_config['versions'])
        self.logging = LogCapture('zc.buildout.easy_install')
        # Run the extension, which includes installing our patches.
        start(self.buildout_config)
        # Some other extension, like mr.developer, may run after us
        # and call 'default_versions' after changing something to the
        # buildout config versions.  We should be fine with that.
        self.buildout_config['versions']['extra-lower'] = '4.2'
        self.buildout_config['versions']['ExtraUpper'] = '4.2'
        default_versions(self.buildout_config['versions'])

    def tearDown(self):
        self.logging.uninstall()

    def _check(self, requirement, expected):
        parsed_req = tuple(parse_requirements(requirement))[0]
        result = self.i._constrain(parsed_req)
        self.failUnless(isinstance(result, Requirement))
        compare(expected, str(result))

    def test_normal(self):
        self._check('A-Package', 'A-Package==1.0')
        self.logging.check()

    def test_lowercase(self):
        self._check('a_package', 'a-package==1.0')
        self.logging.check()

    def test_uppercase(self):
        self._check('A_PACKAGE', 'A-PACKAGE==1.0')
        self.logging.check()

    def test_sanity(self):
        # A non-pinned package should still be reported as such.
        self._check('B_Package', 'B-Package')
        self.logging.check()

    def test_extra(self):
        # A pin added after our extension has run is picked up as well.
        self._check('extra-lower', 'extra-lower==4.2')
        self.logging.check()

    def test_extra_lower(self):
        # A pin added after our extension has run is not lowercased
        # though.
        self._check('ExtraUpper', 'ExtraUpper')
        self.logging.check()
Ejemplo n.º 10
0
 def test_multiple_loggers(self):
     l = LogCapture(('one.child','two'))
     root.info('1')
     one.info('2')
     two.info('3')
     child.info('4')
     l.uninstall()
     assert str(l) == (
         "two INFO\n  3\n"
         "one.child INFO\n  4"
     )
class TestPickleSerializer(unittest.TestCase):

    layer = ZAMQP_FUNCTIONAL_TESTING

    def setUp(self):
        from testfixtures import LogCapture
        self.l = LogCapture("c.zamqp.tests")

    def tearDown(self):
        self.l.uninstall()

    def _testDeclareQueue(self):
        rabbitctl = self.layer['rabbitctl']
        self.assertIn("my.picklequeue\t0",
                      rabbitctl('list_queues')[0].split("\n"))

    def testDeclareQueue(self):
        runAsyncTest(self._testDeclareQueue)

    def _testDeclareQueueAgain(self):
        rabbitctl = self.layer['rabbitctl']
        self.assertIn("my.picklequeue\t0",
                      rabbitctl('list_queues')[0].split("\n"))

    def testDeclareQueueAgain(self):
        runAsyncTest(self._testDeclareQueueAgain)

    def _testPublishToQueue(self):
        rabbitctl = self.layer['rabbitctl']
        self.assertIn("my.picklequeue\t1",
                      rabbitctl('list_queues')[0].split("\n"))

    def _testPublishToQueueAndConsumeIt(self):
        rabbitctl = self.layer['rabbitctl']
        self.assertIn("my.picklequeue\t0",
                      rabbitctl('list_queues')[0].split("\n"))

    def testPublishToQueueAndConsumeIt(self):
        runAsyncTest(self._testDeclareQueue)

        from zope.component import getUtility
        from collective.zamqp.interfaces import IProducer
        producer = getUtility(IProducer, name="my.picklequeue")
        producer.publish({"key": "value"})

        runAsyncTest(self._testPublishToQueue)
        runAsyncTest(self._testPublishToQueueAndConsumeIt)
        self.l.check(
            ('c.zamqp.tests', 'INFO',
             "<BasicProperties(['delivery_mode=2', "
             "'content_type=application/x-python-serialize'])>"),
            ('c.zamqp.tests', 'INFO', "{'key': 'value'}"),
            ('c.zamqp.tests', 'INFO', "<type 'dict'>")
        )
Ejemplo n.º 12
0
 def test_specific_logger(self):
     l = LogCapture('one')
     root.info('1')
     one.info('2')
     two.info('3')
     child.info('4')
     l.uninstall()
     assert str(l) == (
         "one INFO\n  2\n"
         "one.child INFO\n  4"
     )
class TestComponentResolving(unittest.TestCase):
    def setUp(self):
        self.l = LogCapture()
        
    def tearDown(self):
        self.l.uninstall()
        
    def test_that_resolve_does_not_warn_if_component_list_empty(self):
        root = ET.Element('component')
        op = ComponentParser()
        
        op.parse(root)
        op.resolve([])
        
        assert_that(self.l.__str__(), is_("No logging captured"))
        
    def test_that_resolve_does_not_warn_if_all_categories_found(self):
        category_name = 'something'
        category = MockCategory(category_name)
        name = 'sample component'
        root = _component_tree_with_category(category_name, name)
        op = ComponentParser()
        
        op.parse(root)
        op.resolve([category])
        
        assert_that(self.l.__str__(), is_("No logging captured"))
        
    def test_that_resolve_links_to_category(self):
        category_name = 'something'
        category = MockCategory(category_name)
        name = 'sample component'
        root = _component_tree_with_category(category_name, name)
        op = ComponentParser()
        
        op.parse(root)
        op.resolve([category])
        result = op.components[name]
        
        assert_that(result.category(category_name), is_(same_instance(category)))
        assert_that(category.set_component_was_called, is_(True))
        assert_that(category.component, is_(same_instance(result)))
        
    def test_that_resolve_warns_if_category_referenced_which_does_not_exist(self):
        category = 'something'
        name = 'sample component'
        root = _component_tree_with_category(category, name)
        op = ComponentParser()
        
        op.parse(root)
        op.resolve([])
        
        self.l.check(('root', 'WARNING', "Component '{0}' references undefined category '{1}'".format(name, category)))
Ejemplo n.º 14
0
class TestConstrain(TestCase):

    def setUp(self):
        self._versions = {}
        self.i=FakeInstaller(self._versions)
        self.logging = LogCapture('zc.buildout.easy_install')

    def tearDown(self):
        self.logging.uninstall()

    def _check(self,requirement,expected):
        result = _constrain(
            self.i,
            tuple(parse_requirements(requirement))[0]
            )
        self.failUnless(isinstance(result,Requirement))
        compare(expected,str(result))
        
    def test_normal(self):
        self._versions['a-package']='1.0'
        self._check('a_package','a-package==1.0')
        self.logging.check()
    
    def test_extras(self):
        self._versions['a-package']='1.0'
        self._check('a_package[some,thing]',
                    'a-package[some,thing]==1.0')
        self.logging.check()
    
    def test_capitalisation(self):
        self._versions['a-package']='1.0'
        self._check('A-Package','A-Package==1.0')
        self.logging.check()

    def test_no_version(self):
        self._check('a_package','a-package')
        self.logging.check()
    
    def test_incompatible_version(self):
        self._versions['a-package']='1.0'
        with ShouldRaise(IncompatibleVersionError(
                'Bad version','1.0'
                )):
            self._check('a-package==2.0','xxx')
        self.logging.check(
            ('zc.buildout.easy_install',
             'ERROR',
             "The version, 1.0, is not consistent with"
             " the requirement, 'a-package==2.0'.")
            )
Ejemplo n.º 15
0
    def test_uninstall(self):
        # Lets start off with a couple of loggers:

        root = getLogger()
        child = getLogger('child')

        # Lets also record the handlers for these loggers before
        # we start the test:

        before_root = root.handlers[:]
        before_child = child.handlers[:]

        # Lets also record the levels for the loggers:

        old_root_level=root.level
        old_child_level=child.level

        # Now the test:

        try:
            root.setLevel(49)
            child.setLevel(69)
            l1 = LogCapture()
            l2 = LogCapture('child')
            root = getLogger()
            root.info('1')
            child = getLogger('child')
            assert root.level == 1
            assert child.level == 1

            child.info('2')
            assert str(l1) == (
                "root INFO\n  1\n"
                "child INFO\n  2"
            )
            assert str(l2) == (
                "child INFO\n  2"
            )
            l2.uninstall()
            l1.uninstall()
            assert root.level == 49
            assert child.level == 69
        finally:
           root.setLevel(old_root_level)
           child.setLevel(old_child_level)

        # Now we check the handlers are as they were before
        # the test:
        assert root.handlers == before_root
        assert child.handlers == before_child
Ejemplo n.º 16
0
class TestCheck(TestCase):

    def setUp(self):
        self.r = Replacer()
        self.l = LogCapture()

    def tearDown(self):
        self.l.uninstall()
        self.r.restore()

    def checker_returns(self,output):
        resolve = Mock()
        self.r.replace('checker.resolve',resolve)
        def the_checker(config_folder,param):
            return output
        resolve.return_value = the_checker
        return resolve
        
    def test_bad_checker(self):
        from checker import check
        check = should_raise(check,ImportError('No module named unknown'))
        check('/config','unknown',None)

    def test_normal(self):
        m = self.checker_returns('some output')
        check('/config','achecker',None)
        compare(m.call_args_list,[
                (('checker.checkers.achecker.check',), {})
                ])

    def test_log_newline(self):
        self.checker_returns('some output\n')
        check('/config','achecker','aparam')
        self.l.check(
            ('root', 'INFO', 'some output'),
            )

    def test_log_no_newline(self):
        self.checker_returns('some output')
        check('/config','achecker','aparam')
        self.l.check(
            ('root', 'INFO', 'some output'),
            )
        
    def test_no_log_empty(self):
        self.checker_returns('')
        check('/config','achecker','aparam')
        self.l.check()
 def test_atexit(self):
     l = LogCapture()
     self.assertTrue(
         LogCapture.atexit in [t[0] for t in atexit._exithandlers]
         )
     with catch_warnings(record=True) as w:
         l.atexit()
         self.assertTrue(len(w), 1)
         compare(str(w[0].message), (
             "LogCapture instances not uninstalled by shutdown, "
             "loggers captured:\n"
             "(None,)"
             ))
     l.uninstall()
     # check running it again has no ill effects
     l.atexit()
Ejemplo n.º 18
0
def test_query_sanitazion(query_sanitazion):
    app_client = query_sanitazion.app.test_client()
    l = LogCapture()

    url = '/v1.0/greeting'
    response = app_client.post(url, data={'name': 'Jane Doe'})
    # This is ugly. The reason for asserting the logging in this way
    # is that in order to use LogCapture().check, we'd have to assert that
    # a specific sequence of logging has occurred. This is too restricting
    # for future development, and we are really only interested in the fact
    # a single message is logged.
    messages = [x.strip() for x in str(l).split("\n")]
    assert "FormData parameter 'name' in function arguments" in messages
    assert "Query Parameter 'name' in function arguments" not in messages
    assert "Function argument 'name' not defined in specification" not in messages
    assert response.status_code == 200
    l.uninstall()
Ejemplo n.º 19
0
class TestHandleAnswer(unittest.TestCase):

    def setUp(self):
        self.cn = cn_interface.ControlNodeSerial('tty')
        self.log = LogCapture('gateway_code', level=logging.DEBUG)

    def tearDown(self):
        self.log.uninstall()

    def test_config_ack(self):
        self.cn._handle_answer('config_ack set_time 0.123456')
        self.log.check(
            ('gateway_code', 'DEBUG', 'config_ack set_time'),
            ('gateway_code', 'INFO', 'Control Node set time delay: 123456 us')
        )

        self.log.clear()
        self.cn._handle_answer('config_ack anything')
        self.log.check(
            ('gateway_code', 'DEBUG', 'config_ack anything'),
        )

    def test_error(self):
        self.cn._handle_answer('error 42')
        self.log.check(
            ('gateway_code', 'ERROR', 'Control node error: %r' % '42'))

    def test_cn_serial_error(self):
        self.cn._handle_answer('cn_serial_error: any error msg')
        self.log.check(
            ('gateway_code', 'ERROR', 'cn_serial_error: any error msg'))

    def test_measures_debug(self):
        msg = ('measures_debug: consumption_measure 1377268768.841070:'
               '1.78250 0.000000 3.230000 0.080003')

        m_debug = mock.Mock()

        self.cn.measures_debug = m_debug
        self.cn._handle_answer(msg)
        m_debug.assert_called_with(msg)

        m_debug.reset_mock()
        self.cn.measures_debug = None
        self.cn._handle_answer(msg)
        self.assertFalse(m_debug.called)
Ejemplo n.º 20
0
class TestJsonLogFormatter(unittest2.TestCase):

    def setUp(self):
        self.handler = LogCapture()
        self.formatter = JsonLogFormatter()

    def tearDown(self):
        self.handler.uninstall()

    def test_basic_operation(self):
        logging.debug("simple test")
        self.assertEquals(len(self.handler.records), 1)
        details = json.loads(self.formatter.format(self.handler.records[0]))
        self.assertEquals(details["message"], "simple test")
        self.assertEquals(details["name"], "root")
        self.assertEquals(details["pid"], os.getpid())
        self.assertEquals(details["op"], "root")
        self.assertEquals(details["v"], 1)
        self.assertTrue("time" in details)

    def test_custom_paramters(self):
        logger = logging.getLogger("mozsvc.test.test_logging")
        logger.warn("custom test %s", "one", extra={
            "more": "stuff",
            "op": "mytest",
        })
        self.assertEquals(len(self.handler.records), 1)
        details = json.loads(self.formatter.format(self.handler.records[0]))
        self.assertEquals(details["message"], "custom test one")
        self.assertEquals(details["name"], "mozsvc.test.test_logging")
        self.assertEquals(details["op"], "mytest")
        self.assertEquals(details["more"], "stuff")

    def test_logging_error_tracebacks(self):
        try:
            raise ValueError("\n")
        except Exception:
            logging.exception("there was an error")
        self.assertEquals(len(self.handler.records), 1)
        details = json.loads(self.formatter.format(self.handler.records[0]))
        self.assertEquals(details["message"], "there was an error")
        self.assertEquals(details["error"], "ValueError('\\n',)")
        tblines = details["traceback"].strip().split("\n")
        self.assertEquals(tblines[-1], details["error"])
        self.assertEquals(tblines[-2], "<type 'exceptions.ValueError'>")
Ejemplo n.º 21
0
class TestCheck(TestCase):
    def setUp(self):
        self.r = Replacer()
        self.l = LogCapture()

    def tearDown(self):
        self.l.uninstall()
        self.r.restore()

    def checker_returns(self, output):
        resolve = Mock()
        self.r.replace("checker.check.resolve", resolve)

        def the_checker(config_folder, param):
            return output

        resolve.return_value = the_checker
        return resolve

    def test_bad_checker(self):
        with ShouldRaise(ImportError("No module named unknown")):
            check("/config", "unknown", None)

    def test_normal(self):
        m = self.checker_returns("some output")
        check("/config", "achecker", None)
        compare(m.call_args_list, [(("checker.checkers.achecker.check",), {})])

    def test_log_newline(self):
        self.checker_returns("some output\n")
        check("/config", "achecker", "aparam")
        self.l.check(("root", "INFO", "some output"))

    def test_log_no_newline(self):
        self.checker_returns("some output")
        check("/config", "achecker", "aparam")
        self.l.check(("root", "INFO", "some output"))

    def test_no_log_empty(self):
        self.checker_returns("")
        check("/config", "achecker", "aparam")
        self.l.check()
Ejemplo n.º 22
0
class TestHTMLFilter(TestCase):

    def setUp(self):
        self.log = LogCapture()
        self.logger = getLogger()
        self.log.addFilter(HTMLFilter())

    def tearDown(self):
        self.log.uninstall()

    def test_plain_string(self):
        self.logger.info('foo')
        self.log.check(('root', 'INFO', 'foo'),)

    def test_html_string(self):
        self.logger.info('<foo &bar>')
        self.log.check(('root', 'INFO', '&lt;foo &amp;bar&gt;'),)

    def test_with_params_string(self):
        self.logger.info('%s', 'foo')
        self.log.check(('root', 'INFO', 'foo'),)

    def test_plain_unicode(self):
        self.logger.info(u"accentu\u00E9")
        self.log.check(('root', 'INFO', u'accentu\xe9'),)

    def test_html_unicode(self):
        self.logger.info(u"<u\u00E9 &bar>")
        self.log.check(('root', 'INFO', u'&lt;u\xe9 &amp;bar&gt;'),)

    def test_with_params_unicode(self):
        self.logger.info(u"\u00E9%s", u"accentu\u00E9")
        self.log.check(('root', 'INFO', u'\xe9accentu\xe9'),)

    def test_some_object(self):
        class AnObject(object):

            def __repr__(self):
                return 'obj'
            __str__ = __repr__
        self.logger.info(AnObject())
        self.log.check(('root', 'INFO', 'obj'),)
Ejemplo n.º 23
0
    def test_uninstall_more_than_once(self):
        # There's no problem with uninstalling a LogCapture
        # more than once:

        old_level = root.level
        try:
           root.setLevel(49)
           l = LogCapture()
           assert root.level == 1
           l.uninstall()
           assert root.level == 49
           root.setLevel(69)
           l.uninstall()
           assert root.level == 69
        finally:
           root.setLevel(old_level)

        # And even when loggers have been uninstalled, there's
        # no problem having uninstall_all as a backstop:

        l.uninstall_all()
Ejemplo n.º 24
0
class DatabaseHandlerTests(TestCase):

    def setUp(self):
        self.dir = TempDirectory()
        self.db_path = self.dir.getpath('test.db')
        self.conn = sqlite3.connect(self.db_path)
        self.conn.execute('create table notes '
                          '(filename varchar, text varchar)')
        self.conn.commit()
        self.log = LogCapture()
        
    def tearDown(self):
        self.log.uninstall()
        self.dir.cleanup()
        
    def test_normal(self):
        with DatabaseHandler(self.db_path) as handler:
            handler.conn.execute('insert into notes values (?, ?)',
                                 ('test.txt', 'a note'))
            handler.conn.commit()
        # check the row was inserted and committed
        curs = self.conn.cursor()
        curs.execute('select * from notes')
        self.assertEqual(curs.fetchall(), [('test.txt', 'a note')])
        # check there was no logging
        self.log.check()

    def test_exception(self):
        with ShouldRaise(Exception('foo')):
            with DatabaseHandler(self.db_path) as handler:
                handler.conn.execute('insert into notes values (?, ?)',
                                     ('test.txt', 'a note'))
                raise Exception('foo')
        # check the row not inserted and the transaction was rolled back
        curs = handler.conn.cursor()
        curs.execute('select * from notes')
        self.assertEqual(curs.fetchall(), [])
        # check the error was logged
        self.log.check(('root', 'ERROR', 'Something went wrong'))
Ejemplo n.º 25
0
class BaseTestCase(TestCase):

    maxDiff = None

    def setUp(self):
        super(BaseTestCase, self).setUp()

        self.handler = LogCapture()

        self.username = '******'
        self.password = '******'
        self.email = '*****@*****.**' % self.username

        self.user = User.objects.create_user(
            username=self.username,
            email=self.email,
            password=self.password)

        self.users = dict((obj.username, obj) for obj in (
            User.objects.create_user(
                username='******' % idx,
                email='*****@*****.**' % idx,
                password='******' % idx
            ) for idx in range(0, 5)))

        self.experiments = dict((obj.slug, obj) for (obj, created) in (
            Experiment.objects.get_or_create(
                slug="test-%s" % idx, defaults=dict(
                    order=idx,
                    title="Longer Test Title %s" % idx,
                    short_title="Test %s" % idx,
                    description="This is a test",
                    introduction="<h1>Hello, Test!</h1>",
                    addon_id="*****@*****.**" % idx
                )) for idx in range(1, 4)))

    def tearDown(self):
        self.handler.uninstall()
Ejemplo n.º 26
0
class RequestSummaryLoggingTests(TestCase):

    def setUp(self):
        super(RequestSummaryLoggingTests, self).setUp()
        self.handler = LogCapture()

    def tearDown(self):
        self.handler.uninstall()

    def test_unblackisted_are_logged(self):
        self.handler.records = []
        url = '/__version__'
        resp = self.client.get(url)
        self.assertEqual(200, resp.status_code)
        record = self.handler.records[0]
        self.assertEqual(url, record.path)

    def test_blacklisted_are_not_logged(self):
        self.handler.records = []
        url = '/__heartbeat__'
        resp = self.client.get(url)
        self.assertEqual(200, resp.status_code)
        self.assertEqual(0, len(self.handler.records))
Ejemplo n.º 27
0
class TestJsonLogFormatter(unittest.TestCase):

    def setUp(self):
        self.handler = LogCapture()
        self.logger_name = "TestingTestPilot"
        self.formatter = JsonLogFormatter(logger_name=self.logger_name)

    def tearDown(self):
        self.handler.uninstall()

    def _fetchLastLog(self):
        self.assertEqual(len(self.handler.records), 1)
        details = json.loads(self.formatter.format(self.handler.records[0]))
        jsonschema.validate(details, JSON_LOGGING_SCHEMA)
        return details

    def test_basic_operation(self):
        """Ensure log formatter contains all the expected fields and values"""
        message_text = "simple test"
        logging.debug(message_text)
        details = self._fetchLastLog()

        expected_present = ["Timestamp", "Hostname"]
        for key in expected_present:
            self.assertTrue(key in details)

        expected_meta = {
            "Severity": 7,
            "Type": "root",
            "Pid": os.getpid(),
            "Logger": self.logger_name,
            "EnvVersion": self.formatter.LOGGING_FORMAT_VERSION
        }
        for key, value in expected_meta.items():
            self.assertEqual(value, details[key])

        self.assertEqual(details['Fields']['msg'], message_text)

    def test_custom_paramters(self):
        """Ensure log formatter can handle custom parameters"""
        logger = logging.getLogger("mozsvc.test.test_logging")
        logger.warning("custom test %s", "one", extra={"more": "stuff"})
        details = self._fetchLastLog()

        self.assertEqual(details["Type"], "mozsvc.test.test_logging")
        self.assertEqual(details["Severity"], 4)

        fields = details['Fields']
        self.assertEqual(fields["msg"], "custom test one")
        self.assertEqual(fields["more"], "stuff")

    def test_logging_error_tracebacks(self):
        """Ensure log formatter includes exception traceback information"""
        try:
            raise ValueError("\n")
        except Exception:
            logging.exception("there was an error")
        details = self._fetchLastLog()

        expected_meta = {
            "Severity": 3,
        }
        for key, value in expected_meta.items():
            self.assertEqual(value, details[key])

        fields = details['Fields']
        expected_fields = {
            'msg': 'there was an error',
            'error': "ValueError('\\n',)"
        }
        for key, value in expected_fields.items():
            self.assertEqual(value, fields[key])

        self.assertTrue(fields['traceback'].startswith('Uncaught exception:'))
        self.assertTrue("ValueError" in fields['traceback'])
Ejemplo n.º 28
0
class TestProcessPaymentsLocks(CacheResetMixin, TransactionTestCase):
    def setUp(self):
        self.tempdir = TempDirectory()
        self.account_in = BankAccount(account_number='123456/7890',
                                      currency='CZK')
        self.account_ex = BankAccount(account_number='987654/3210',
                                      currency='CZK')
        self.account_in.save()
        self.account_ex.save()
        get_payment(identifier='PAYMENT_1',
                    account=self.account_in,
                    state=PaymentState.READY_TO_PROCESS).save()
        get_payment(identifier='PAYMENT_2',
                    account=self.account_ex,
                    state=PaymentState.READY_TO_PROCESS).save()
        self.log_handler = LogCapture(
            'django_pain.management.commands.process_payments',
            propagate=False)
        # Exception in a threads does not fail the test - wee need to collect it somemehow
        self.errors = Queue()  # type: Queue

    def tearDown(self):
        self.log_handler.uninstall()
        self.tempdir.cleanup()

    @override_settings(
        PAIN_PROCESSORS={
            'dummy':
            'django_pain.tests.commands.test_process_payments.DummyTruePaymentProcessor'
        })
    def test_processing_does_not_overwrite_locked_rows(self):
        processing_finished = threading.Event()
        query_finished = threading.Event()

        def target_processing():
            query_finished.wait()
            try:
                call_command('process_payments')
            except Exception as e:  # pragma: no cover
                self.errors.put(e)
                raise e
            finally:
                processing_finished.set()
                close_old_connections()

        def target_query():
            try:
                with transaction.atomic():
                    payment = BankPayment.objects.select_for_update().filter(
                        identifier='PAYMENT_2').get()
                    payment.processor = 'manual'
                    payment.save()
                    query_finished.set()
                    processing_finished.wait()
            except Exception as e:  # pragma: no cover
                self.errors.put(e)
                raise e
            finally:
                query_finished.set()
                close_old_connections()

        threads = [
            threading.Thread(target=target_processing),
            threading.Thread(target=target_query)
        ]
        for t in threads:
            t.start()
        for t in threads:
            t.join()

        self.assertTrue(self.errors.empty())
        self.assertQuerysetEqual(BankPayment.objects.values_list(
            'identifier', 'account', 'state', 'processor'),
                                 [('PAYMENT_1', self.account_in.pk,
                                   PaymentState.PROCESSED, 'dummy'),
                                  ('PAYMENT_2', self.account_ex.pk,
                                   PaymentState.READY_TO_PROCESS, 'manual')],
                                 transform=tuple,
                                 ordered=False)

    @override_settings(
        PAIN_PROCESSORS={
            'dummy':
            'django_pain.tests.commands.test_process_payments.DummyTruePaymentProcessor'
        })
    def test_processed_rows_not_overwritten(self):
        processing_started = threading.Event()
        query_finished = threading.Event()

        def mock_process_payments(payments):
            processing_started.set()
            query_finished.wait()
            return [ProcessPaymentResult(result=True) for p in payments]

        def target_processing():
            try:
                # cache may prevent mocking
                get_processor_instance.cache_clear()
                get_processor_class.cache_clear()
                with patch(
                        'django_pain.tests.commands.test_process_payments.DummyTruePaymentProcessor'
                ) as MockClass:
                    instance = MockClass.return_value
                    instance.process_payments = mock_process_payments
                    call_command('process_payments', '--exclude-accounts',
                                 self.account_ex.account_number)
            except Exception as e:  # pragma: no cover
                self.errors.put(e)
                raise e
            finally:
                processing_started.set()
                # mock might be cached
                get_processor_instance.cache_clear()
                get_processor_class.cache_clear()
                close_old_connections()

        def target_query():
            processing_started.wait()
            try:
                with transaction.atomic():
                    payments = BankPayment.objects.select_for_update(
                        skip_locked=True).all()
                    for p in payments:
                        p.state = PaymentState.PROCESSED
                        p.processor = 'manual'
                        p.save()
            except Exception as e:  # pragma: no cover
                self.errors.put(e)
                raise e
            finally:
                query_finished.set()
                close_old_connections()

        threads = [
            threading.Thread(target=target_processing),
            threading.Thread(target=target_query)
        ]
        for t in threads:
            t.start()
        for t in threads:
            t.join()

        self.assertTrue(self.errors.empty())
        self.assertQuerysetEqual(BankPayment.objects.values_list(
            'identifier', 'account', 'state', 'processor'),
                                 [('PAYMENT_1', self.account_in.pk,
                                   PaymentState.PROCESSED, 'dummy'),
                                  ('PAYMENT_2', self.account_ex.pk,
                                   PaymentState.PROCESSED, 'manual')],
                                 transform=tuple,
                                 ordered=False)
Ejemplo n.º 29
0
class TestProcessPayments(CacheResetMixin, TestCase):
    """Test process_payments command."""
    def setUp(self):
        super().setUp()
        self.tempdir = TempDirectory()
        self.account = BankAccount(account_number='123456/7890',
                                   currency='CZK')
        self.account.save()
        payment = get_payment(identifier='PAYMENT_1',
                              account=self.account,
                              state=PaymentState.READY_TO_PROCESS)
        payment.save()
        self.log_handler = LogCapture(
            'django_pain.management.commands.process_payments',
            propagate=False)

    def tearDown(self):
        self.log_handler.uninstall()
        self.tempdir.cleanup()

    def _test_non_existing_account(self, param_name):
        """
        Test non existing account.

        param_name should contain either '--include-accounts' or '--exclude-accounts'
        """
        BankAccount.objects.create(account_number='987654/3210',
                                   currency='CZK')
        with override_settings(PAIN_PROCESS_PAYMENTS_LOCK_FILE=os.path.join(
                self.tempdir.path, 'test.lock')):
            out = StringIO()
            err = StringIO()
            with self.assertRaises(CommandError):
                call_command('process_payments',
                             param_name,
                             'xxxxxx/xxxx,yyyyyy/yyyy,987654/3210',
                             stdout=out,
                             stderr=err)

            self.assertEqual(out.getvalue(), '')
            self.assertEqual(err.getvalue(), '')
            self.log_handler.check(
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Command process_payments started.'),
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Lock acquired.'),
                ('django_pain.management.commands.process_payments', 'ERROR',
                 'Following accounts do not exist: xxxxxx/xxxx, yyyyyy/yyyy. Terminating.'
                 ),
            )

    @override_settings(
        PAIN_PROCESSORS={
            'dummy':
            'django_pain.tests.commands.test_process_payments.DummyTruePaymentProcessor'
        })
    def test_payments_processed(self):
        """Test processed payments."""
        with override_settings(PAIN_PROCESS_PAYMENTS_LOCK_FILE=os.path.join(
                self.tempdir.path, 'test.lock')):
            call_command('process_payments')

            self.assertQuerysetEqual(BankPayment.objects.values_list(
                'identifier', 'account', 'state', 'processor'),
                                     [('PAYMENT_1', self.account.pk,
                                       PaymentState.PROCESSED, 'dummy')],
                                     transform=tuple,
                                     ordered=False)
            self.assertEqual(BankPayment.objects.first().objective,
                             'True objective')
            self.log_handler.check(
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Command process_payments started.'),
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Lock acquired.'),
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Processing 1 unprocessed payments.'),
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Processing payments with processor dummy.'),
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Marking 0 unprocessed payments as DEFERRED.'),
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Command process_payments finished.'),
            )

    @override_settings(
        PAIN_PROCESSORS={
            'dummy':
            'django_pain.tests.commands.test_process_payments.DummyFalsePaymentProcessor'
        })
    def test_payments_deferred(self):
        """Test deferred payments."""
        with override_settings(PAIN_PROCESS_PAYMENTS_LOCK_FILE=os.path.join(
                self.tempdir.path, 'test.lock')):
            call_command('process_payments')

            self.assertQuerysetEqual(
                BankPayment.objects.values_list('identifier', 'account',
                                                'state', 'processor'),
                [('PAYMENT_1', self.account.pk, PaymentState.DEFERRED, '')],
                transform=tuple,
                ordered=False)
            self.assertEqual(BankPayment.objects.first().objective, '')
            self.log_handler.check(
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Command process_payments started.'),
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Lock acquired.'),
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Processing 1 unprocessed payments.'),
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Processing payments with processor dummy.'),
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Marking 1 unprocessed payments as DEFERRED.'),
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Command process_payments finished.'),
            )

    @override_settings(
        PAIN_PROCESSORS={
            'dummy':
            'django_pain.tests.commands.test_process_payments.DummyTrueErrorPaymentProcessor'
        })
    def test_payments_processed_with_error(self):
        """Test processed payments with processing error."""
        with override_settings(PAIN_PROCESS_PAYMENTS_LOCK_FILE=os.path.join(
                self.tempdir.path, 'test.lock')):
            call_command('process_payments')

            self.assertQuerysetEqual(
                BankPayment.objects.values_list('identifier', 'account',
                                                'state', 'processor',
                                                'processing_error'),
                [('PAYMENT_1', self.account.pk, PaymentState.PROCESSED,
                  'dummy', PaymentProcessingError.DUPLICITY)],
                transform=tuple,
                ordered=False)
            self.assertEqual(BankPayment.objects.first().objective,
                             'Dummy objective')
            self.log_handler.check(
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Command process_payments started.'),
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Lock acquired.'),
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Processing 1 unprocessed payments.'),
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Processing payments with processor dummy.'),
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Marking 0 unprocessed payments as DEFERRED.'),
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Command process_payments finished.'),
            )

    @override_settings(
        PAIN_PROCESSORS={
            'dummy':
            'django_pain.tests.commands.test_process_payments.DummyFalseErrorPaymentProcessor'
        })
    def test_payments_deferred_with_error(self):
        """Test deferred payments with processing error."""
        with override_settings(PAIN_PROCESS_PAYMENTS_LOCK_FILE=os.path.join(
                self.tempdir.path, 'test.lock')):
            call_command('process_payments')

            self.assertQuerysetEqual(
                BankPayment.objects.values_list('identifier', 'account',
                                                'state', 'processor',
                                                'processing_error'),
                [('PAYMENT_1', self.account.pk, PaymentState.DEFERRED, 'dummy',
                  PaymentProcessingError.DUPLICITY)],
                transform=tuple,
                ordered=False)
            self.assertEqual(BankPayment.objects.first().objective,
                             'Dummy objective')
            self.log_handler.check(
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Command process_payments started.'),
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Lock acquired.'),
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Processing 1 unprocessed payments.'),
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Processing payments with processor dummy.'),
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Saving payment %s as DEFERRED with error PaymentProcessingError.DUPLICITY.'
                 % BankPayment.objects.first().uuid),
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Marking 0 unprocessed payments as DEFERRED.'),
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Command process_payments finished.'),
            )

    @override_settings(PAIN_PROCESSORS=OrderedDict([
        ('dummy_false',
         'django_pain.tests.commands.test_process_payments.DummyFalsePaymentProcessor'
         ),
        ('dummy_true',
         'django_pain.tests.commands.test_process_payments.DummyTruePaymentProcessor'
         ),
    ]))
    def test_payments_from_to(self):
        """Test processed payments."""
        with override_settings(PAIN_PROCESS_PAYMENTS_LOCK_FILE=os.path.join(
                self.tempdir.path, 'test.lock')):
            call_command('process_payments', '--from', '2017-01-01 00:00',
                         '--to', '2017-01-02 00:00')

            self.assertQuerysetEqual(BankPayment.objects.values_list(
                'identifier', 'account', 'state', 'processor'),
                                     [('PAYMENT_1', self.account.pk,
                                       PaymentState.READY_TO_PROCESS, '')],
                                     transform=tuple,
                                     ordered=False)
            self.assertEqual(BankPayment.objects.first().objective, '')

    def test_invalid_date_raises_exception(self):
        with override_settings(PAIN_PROCESS_PAYMENTS_LOCK_FILE=os.path.join(
                self.tempdir.path, 'test.lock')):
            with self.assertRaises(CommandError):
                call_command('process_payments', '--from', '2017-01-32 00:00',
                             '--to', '2017-02-01 00:00')
            with self.assertRaises(CommandError):
                call_command('process_payments', '--from', 'not a date',
                             '--to', '2017-02-01 00:00')
            with self.assertRaises(CommandError):
                call_command('process_payments', '--from', '2017-01-01 00:00',
                             '--to', '2017-01-32 00:00')
            with self.assertRaises(CommandError):
                call_command('process_payments', '--from', '2017-01-01 00:00',
                             '--to', 'not a date')

    def test_lock(self):
        """Test process payments lock."""
        with override_settings(PAIN_PROCESS_PAYMENTS_LOCK_FILE=os.path.join(
                self.tempdir.path, 'test.lock')):
            lock = open(SETTINGS.process_payments_lock_file, 'a')
            fcntl.flock(lock, fcntl.LOCK_EX | fcntl.LOCK_NB)
            out = StringIO()
            err = StringIO()
            call_command('process_payments',
                         '--no-color',
                         stdout=out,
                         stderr=err)
            self.assertEqual(out.getvalue(), '')
            self.assertEqual(
                err.getvalue(),
                'Command process_payments is already running. Terminating.\n')
            self.assertQuerysetEqual(BankPayment.objects.values_list(
                'identifier', 'account', 'state', 'processor'),
                                     [('PAYMENT_1', self.account.pk,
                                       PaymentState.READY_TO_PROCESS, '')],
                                     transform=tuple,
                                     ordered=False)
            self.log_handler.check(
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Command process_payments started.'),
                ('django_pain.management.commands.process_payments', 'WARNING',
                 'Command already running. Terminating.'))

    def test_invalid_lock(self):
        """Test process payments with invalid lock file."""
        with override_settings(PAIN_PROCESS_PAYMENTS_LOCK_FILE=os.path.join(
                self.tempdir.path, 'test.lock')):
            os.mkdir(SETTINGS.process_payments_lock_file, mode=0o0)
            out = StringIO()
            err = StringIO()
            with self.assertRaisesRegexp(
                    CommandError,
                    r'^Error occured while opening lockfile .*/test.lock:.*Is a '
                    r'directory: .*\. Terminating\.$'):
                call_command('process_payments',
                             '--no-color',
                             stdout=out,
                             stderr=err)
            self.assertEqual(out.getvalue(), '')
            self.assertEqual(err.getvalue(), '')
            self.assertEqual(len(self.log_handler.actual()), 2)
            self.assertEqual(
                self.log_handler.actual()[0],
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Command process_payments started.'),
            )
            self.assertEqual(
                self.log_handler.actual()[1][:2],
                ('django_pain.management.commands.process_payments', 'ERROR'))
            self.assertRegex(
                self.log_handler.actual()[1][2],
                r'^Error occured while opening lockfile .*/test.lock:.*Is a directory.*Terminating\.$'
            )
            os.chmod(SETTINGS.process_payments_lock_file, 0o755)

    @override_settings(
        PAIN_PROCESSORS={
            'dummy':
            'django_pain.tests.commands.test_process_payments.DummyTruePaymentProcessor'
        })
    def test_exclusion_in_payment_processing(self):
        """Test excluding accounts from payment processing"""
        account2 = BankAccount(account_number='987654/3210', currency='CZK')
        account2.save()
        get_payment(identifier='PAYMENT_2',
                    account=self.account,
                    state=PaymentState.READY_TO_PROCESS).save()
        get_payment(identifier='PAYMENT_3',
                    account=account2,
                    state=PaymentState.READY_TO_PROCESS).save()
        with override_settings(PAIN_PROCESS_PAYMENTS_LOCK_FILE=os.path.join(
                self.tempdir.path, 'test.lock')):
            out = StringIO()
            err = StringIO()
            call_command('process_payments',
                         '--exclude-accounts',
                         '987654/3210',
                         stdout=out,
                         stderr=err)

            self.assertEqual(out.getvalue(), '')
            self.assertEqual(err.getvalue(), '')
            self.log_handler.check(
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Command process_payments started.'),
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Lock acquired.'),
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Processing 2 unprocessed payments.'),
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Processing payments with processor dummy.'),
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Marking 0 unprocessed payments as DEFERRED.'),
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Command process_payments finished.'),
            )

    @override_settings(
        PAIN_PROCESSORS={
            'dummy':
            'django_pain.tests.commands.test_process_payments.DummyTruePaymentProcessor'
        })
    def test_exclusion_of_non_existing_account_in_payment_processing(self):
        """Test excluding non-existing accounts from payment processing"""
        self._test_non_existing_account('--exclude-accounts')

    @override_settings(
        PAIN_PROCESSORS={
            'dummy':
            'django_pain.tests.commands.test_process_payments.DummyTruePaymentProcessor'
        })
    def test_inclusion_in_payment_processing(self):
        """Test including accounts from payment processing"""
        account2 = BankAccount(account_number='987654/3210', currency='CZK')
        account2.save()
        get_payment(identifier='PAYMENT_2',
                    account=self.account,
                    state=PaymentState.READY_TO_PROCESS).save()
        get_payment(identifier='PAYMENT_3',
                    account=account2,
                    state=PaymentState.READY_TO_PROCESS).save()
        with override_settings(PAIN_PROCESS_PAYMENTS_LOCK_FILE=os.path.join(
                self.tempdir.path, 'test.lock')):
            out = StringIO()
            err = StringIO()
            call_command('process_payments',
                         '--include-accounts',
                         '123456/7890',
                         stdout=out,
                         stderr=err)

            self.assertEqual(out.getvalue(), '')
            self.assertEqual(err.getvalue(), '')
            self.log_handler.check(
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Command process_payments started.'),
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Lock acquired.'),
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Processing 2 unprocessed payments.'),
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Processing payments with processor dummy.'),
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Marking 0 unprocessed payments as DEFERRED.'),
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Command process_payments finished.'),
            )

    @override_settings(
        PAIN_PROCESSORS={
            'dummy':
            'django_pain.tests.commands.test_process_payments.DummyTruePaymentProcessor'
        })
    def test_inclusion_of_non_existing_account_in_payment_processing(self):
        """Test including non-existing accounts from payment processing"""
        self._test_non_existing_account('--include-accounts')

    @override_settings(
        PAIN_PROCESSORS={
            'dummy':
            'django_pain.tests.commands.test_process_payments.DummyTruePaymentProcessor'
        })
    def test_card_payments_processed(self):
        get_payment(identifier='PAYMENT_2',
                    account=self.account,
                    state=PaymentState.READY_TO_PROCESS,
                    payment_type=PaymentType.CARD_PAYMENT,
                    counter_account_number='',
                    processor='dummy').save()
        get_payment(identifier='PAYMENT_3',
                    account=self.account,
                    state=PaymentState.READY_TO_PROCESS,
                    payment_type=PaymentType.CARD_PAYMENT,
                    counter_account_number='',
                    processor='dummy').save()
        with override_settings(PAIN_PROCESS_PAYMENTS_LOCK_FILE=os.path.join(
                self.tempdir.path, 'test.lock')):
            call_command('process_payments')
            self.assertQuerysetEqual(BankPayment.objects.values_list(
                'identifier', 'state',
                'processor'), [('PAYMENT_1', PaymentState.PROCESSED, 'dummy'),
                               ('PAYMENT_2', PaymentState.PROCESSED, 'dummy'),
                               ('PAYMENT_3', PaymentState.PROCESSED, 'dummy')],
                                     transform=tuple,
                                     ordered=False)
            self.assertEqual(BankPayment.objects.first().objective,
                             'True objective')
            self.log_handler.check(
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Command process_payments started.'),
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Lock acquired.'),
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Processing 3 unprocessed payments.'),
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Processing card payments.'),
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Processing card payments with processor dummy.'),
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Processing payments with processor dummy.'),
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Marking 0 unprocessed payments as DEFERRED.'),
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Command process_payments finished.'),
            )

    @override_settings(PAIN_PROCESSORS=OrderedDict([
        ('dummy',
         'django_pain.tests.commands.test_process_payments.DummyTruePaymentProcessor'
         ),
        ('dummy_false',
         'django_pain.tests.commands.test_process_payments.DummyFalsePaymentProcessor'
         ),
    ]))
    def test_card_payments_unprocessed(self):
        get_payment(identifier='PAYMENT_2',
                    account=self.account,
                    state=PaymentState.READY_TO_PROCESS,
                    payment_type=PaymentType.CARD_PAYMENT,
                    counter_account_number='',
                    processor='dummy_false',
                    transaction_date=date(2018, 5, 10)).save()
        get_payment(identifier='PAYMENT_3',
                    account=self.account,
                    state=PaymentState.READY_TO_PROCESS,
                    payment_type=PaymentType.CARD_PAYMENT,
                    counter_account_number='',
                    processor='dummy',
                    transaction_date=date(2018, 5, 11)).save()
        with override_settings(PAIN_PROCESS_PAYMENTS_LOCK_FILE=os.path.join(
                self.tempdir.path, 'test.lock')):
            call_command('process_payments')
            self.assertQuerysetEqual(
                BankPayment.objects.values_list('identifier', 'state',
                                                'processor'),
                [('PAYMENT_1', PaymentState.PROCESSED, 'dummy'),
                 ('PAYMENT_2', PaymentState.DEFERRED, 'dummy_false'),
                 ('PAYMENT_3', PaymentState.PROCESSED, 'dummy')],
                transform=tuple,
                ordered=False)
            self.assertEqual(BankPayment.objects.first().objective,
                             'True objective')
            self.log_handler.check(
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Command process_payments started.'),
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Lock acquired.'),
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Processing 3 unprocessed payments.'),
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Processing card payments.'),
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Processing card payments with processor dummy.'),
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Processing card payments with processor dummy_false.'),
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Saving payment %s as DEFERRED with error None.' %
                 BankPayment.objects.get(identifier='PAYMENT_2').uuid),
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Processing payments with processor dummy.'),
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Marking 0 unprocessed payments as DEFERRED.'),
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Command process_payments finished.'),
            )

    @override_settings(PAIN_PROCESSORS=OrderedDict([
        ('dummy',
         'django_pain.tests.commands.test_process_payments.DummyTruePaymentProcessor'
         ),
        ('dummy_error',
         'django_pain.tests.commands.test_process_payments.DummyFalseErrorPaymentProcessor'
         ),
    ]))
    def test_card_payments_defferred(self):
        get_payment(identifier='PAYMENT_2',
                    account=self.account,
                    state=PaymentState.READY_TO_PROCESS,
                    payment_type=PaymentType.CARD_PAYMENT,
                    counter_account_number='',
                    processor='dummy_error',
                    transaction_date=date(2018, 5, 10)).save()
        get_payment(identifier='PAYMENT_3',
                    account=self.account,
                    state=PaymentState.READY_TO_PROCESS,
                    payment_type=PaymentType.CARD_PAYMENT,
                    counter_account_number='',
                    processor='dummy',
                    transaction_date=date(2018, 5, 11)).save()
        with override_settings(PAIN_PROCESS_PAYMENTS_LOCK_FILE=os.path.join(
                self.tempdir.path, 'test.lock')):
            call_command('process_payments')
            self.assertQuerysetEqual(
                BankPayment.objects.values_list('identifier', 'state',
                                                'processor'),
                [('PAYMENT_1', PaymentState.PROCESSED, 'dummy'),
                 ('PAYMENT_2', PaymentState.DEFERRED, 'dummy_error'),
                 ('PAYMENT_3', PaymentState.PROCESSED, 'dummy')],
                transform=tuple,
                ordered=False)
            self.assertEqual(BankPayment.objects.first().objective,
                             'True objective')
            self.log_handler.check(
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Command process_payments started.'),
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Lock acquired.'),
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Processing 3 unprocessed payments.'),
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Processing card payments.'),
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Processing card payments with processor dummy.'),
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Processing card payments with processor dummy_error.'),
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Saving payment %s as DEFERRED with error PaymentProcessingError.DUPLICITY.'
                 % BankPayment.objects.get(identifier='PAYMENT_2').uuid),
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Processing payments with processor dummy.'),
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Marking 0 unprocessed payments as DEFERRED.'),
                ('django_pain.management.commands.process_payments', 'INFO',
                 'Command process_payments finished.'),
            )
Ejemplo n.º 30
0
    def test_update_node_information(self, mock_shell):
        """
        Test that the correct work of this function
        """
        l = LogCapture()  # we cature the logger
        command = "scontrol"
        params = ["-o", "--all", "show", "node"]

        # We store some data in the db for the test.
        # We add two testbeds to the db
        testbed = Testbed("name1", True, Testbed.slurm_category,
                          Testbed.protocol_ssh, "user@server", ['slurm'])

        # We add some nodes to Testbed_1
        node_1 = Node()
        node_1.name = "nd80"
        node_1.information_retrieved = True
        node_2 = Node()
        node_2.name = "nd23"
        node_2.information_retrieved = True
        testbed.nodes = [node_1, node_2]
        node_1.disabled = True

        db.session.add(testbed)
        db.session.commit()

        # We mock the command call
        mock_shell.return_value = self.command_scontrol_output

        slurm.update_node_information()

        # We verify the results
        node_80 = db.session.query(Node).filter_by(name='nd80').first()
        node_23 = db.session.query(Node).filter_by(name='nd23').first()
        self.assertEquals('ALLOCATED', node_80.state)
        self.assertEquals(1, len(node_80.memories))
        self.assertEquals(Memory.MEGABYTE, node_80.memories[0].units)
        self.assertEquals(6850663, node_80.memories[0].size)
        self.assertEquals(0, len(node_80.gpus))

        self.assertEquals('MAINT', node_23.state)
        self.assertEquals(1, len(node_23.memories))
        self.assertEquals(Memory.MEGABYTE, node_23.memories[0].units)
        self.assertEquals(24018, node_23.memories[0].size)
        self.assertEquals(2, len(node_23.gpus))
        self.assertEquals('Nvidia', node_23.gpus[0].vendor_id)
        self.assertEquals('Nvidia TESLA C2075', node_23.gpus[0].model_name)
        self.assertEquals('Nvidia', node_23.gpus[1].vendor_id)
        self.assertEquals('Nvidia TESLA C2075', node_23.gpus[1].model_name)

        mock_shell.assert_called_with(command=command,
                                      server="user@server",
                                      params=params)
        self.assertEquals(1, mock_shell.call_count)

        # Checking that we are logging the correct messages
        l.check(('root', 'INFO',
                 'Updating information for node: nd80 if necessary'),
                ('root', 'INFO', 'Updating memory information for node: nd80'),
                ('root', 'INFO',
                 'Updating information for node: nd23 if necessary'),
                ('root', 'INFO', 'Updating memory information for node: nd23'),
                ('root', 'INFO', 'Updating gpu information for node: nd23'))
        l.uninstall()  # We uninstall the capture of the logger
Ejemplo n.º 31
0
class TestRouteSpec(TestBase):

    def setUp(self):
        self.lc = LogCapture()
        self.lc.addFilter(test_common.MyLogCaptureFilter())
        self.temp_dir = tempfile.mkdtemp()
        self.addCleanup(self.cleanup)

    def cleanup(self):
        self.lc.uninstall()
        shutil.rmtree(self.temp_dir)

    def test_file_event_watcher(self):
        #
        # Test for the detection of file events.
        #

        # Create a small test file
        global RES
        abs_fname = self.temp_dir + "/r.spec"

        class MyQueue(object):
            def put(self, msg):
                self.msg = msg

        with open(abs_fname, "w+") as f:
            myq = MyQueue()
            handler = configfile.RouteSpecChangeEventHandler(
                                              route_spec_fname   = "r.spec",
                                              route_spec_abspath = abs_fname,
                                              q_route_spec       = myq,
                                              plugin             = None)
            # Install the file observer on the directory
            observer_thread = Observer()
            observer_thread.schedule(handler, self.temp_dir)
            observer_thread.start()

            # A write event to the file should be detected
            f.write("blah")
            f.flush()
            time.sleep(1)  # not instantaneous, so need to wait a little

            # File is malformed, so should not have received a message
            self.assertTrue(myq.msg is None)

            # A new file created in the temp directory should not create an
            # event
            with open(self.temp_dir + "/foo", "w+") as f2:
                f2.write("blah")
                f2.flush()
            time.sleep(1)
            self.assertTrue(myq.msg is None)

            # Check that we received the right log messages about the file
            self.lc.check(
                ('root', 'INFO',
                 'Detected file change event for %s' % abs_fname),
                ('root', 'ERROR',
                 "Config ignored: Cannot open file: [Errno 2] "
                 "No such file or directory: 'r.spec'"))

    def test_route_spec_parser(self):
        #
        # Test the spec parsing function with a number of different inputs,
        # valid as well as malformed.
        #
        test_specs = [
            {
                "inp" : {
                            "10.1.0.0/16" : ["1.1.1.1", "2.2.2.2"],
                            "10.2.0.0/16" : ["3.3.3.3"]
                        },
                "res" : "IDENT"
            },
            {
                "inp" : {
                            "10.1.0.0/16" : ["1.1.1.1", "2.2.2.2", "2.2.2.2"],
                            "10.2.0.0/16" : ["3.3.3.3"]
                        },
                "res" : {
                            "10.1.0.0/16" : ["1.1.1.1", "2.2.2.2"],
                            "10.2.0.0/16" : ["3.3.3.3"]
                        },
            },
            {
                # malformed list of IPs
                "inp" : {
                            "10.1.0.0/16" : "Foo",
                        },
                "res" : None
            },
            {
                # malformed IP in list
                "inp" : {
                            "10.1.0.0/16" : ["1.1.1.", "2.2.2.2"],
                        },
                "res" : None
            },
            {
                # malformed top level type
                "inp" : "Foo",
                "res" : None
            }
        ]

        for test_data in test_specs:
            if test_data['res'] is None:
                self.assertRaises(ValueError,
                                  watcher.common.parse_route_spec_config,
                                  test_data['inp'])
            else:
                if test_data['res'] == 'IDENT':
                    expected_out = test_data['inp']
                else:
                    expected_out = test_data['res']

                res = watcher.common.parse_route_spec_config(test_data['inp'])
                self.assertEqual(expected_out, res)
Ejemplo n.º 32
0
class TestProcessAccountEvents(unittest.TestCase):
    def get_ini(self):
        return os.path.join(os.path.dirname(__file__), 'test_sql.ini')

    def setUp(self):
        self.config = testing.setUp()
        settings = {}
        load_into_settings(self.get_ini(), settings)
        self.config.add_settings(settings)
        self.config.include("tokenserver")
        load_and_register("tokenserver", self.config)
        self.backend = self.config.registry.getUtility(INodeAssignment)
        self.backend.add_service(SERVICE, PATTERN)
        self.backend.add_node(SERVICE, "https://phx12", 100)
        self.logs = LogCapture()

    def tearDown(self):
        self.logs.uninstall()
        testing.tearDown()
        if self.backend._engine.driver == 'pysqlite':
            filename = self.backend.sqluri.split('sqlite://')[-1]
            if os.path.exists(filename):
                os.remove(filename)
        else:
            self.backend._safe_execute('delete from services')
            self.backend._safe_execute('delete from nodes')
            self.backend._safe_execute('delete from users')

    def assertMessageWasLogged(self, msg):
        """Check that a metric was logged during the request."""
        for r in self.logs.records:
            if msg in r.getMessage():
                break
        else:
            assert False, "message %r was not logged" % (msg, )

    def clearLogs(self):
        del self.logs.records[:]

    def test_delete_user(self):
        self.backend.allocate_user(SERVICE, EMAIL)
        user = self.backend.get_user(SERVICE, EMAIL)
        self.backend.update_user(SERVICE, user, client_state="abcdef")
        records = list(self.backend.get_user_records(SERVICE, EMAIL))
        self.assertEquals(len(records), 2)
        self.assertTrue(records[0]["replaced_at"] is not None)

        process_account_event(self.config,
                              message_body(
                                  event="delete",
                                  uid=UID,
                                  iss=ISS,
                              ))

        records = list(self.backend.get_user_records(SERVICE, EMAIL))
        self.assertEquals(len(records), 2)
        for row in records:
            self.assertTrue(row["replaced_at"] is not None)

    def test_delete_user_by_legacy_uid_format(self):
        self.backend.allocate_user(SERVICE, EMAIL)
        user = self.backend.get_user(SERVICE, EMAIL)
        self.backend.update_user(SERVICE, user, client_state="abcdef")
        records = list(self.backend.get_user_records(SERVICE, EMAIL))
        self.assertEquals(len(records), 2)
        self.assertTrue(records[0]["replaced_at"] is not None)

        process_account_event(self.config,
                              message_body(
                                  event="delete",
                                  uid=EMAIL,
                              ))

        records = list(self.backend.get_user_records(SERVICE, EMAIL))
        self.assertEquals(len(records), 2)
        for row in records:
            self.assertTrue(row["replaced_at"] is not None)

    def test_delete_user_who_is_not_in_the_db(self):
        records = list(self.backend.get_user_records(SERVICE, EMAIL))
        self.assertEquals(len(records), 0)

        process_account_event(self.config,
                              message_body(event="delete", uid=UID, iss=ISS))

        records = list(self.backend.get_user_records(SERVICE, EMAIL))
        self.assertEquals(len(records), 0)

    def test_reset_user(self):
        self.backend.allocate_user(SERVICE, EMAIL, generation=12)

        process_account_event(
            self.config,
            message_body(
                event="reset",
                uid=UID,
                iss=ISS,
                generation=43,
            ))

        user = self.backend.get_user(SERVICE, EMAIL)
        self.assertEquals(user["generation"], 42)

    def test_reset_user_by_legacy_uid_format(self):
        self.backend.allocate_user(SERVICE, EMAIL, generation=12)

        process_account_event(
            self.config, message_body(
                event="reset",
                uid=EMAIL,
                generation=43,
            ))

        user = self.backend.get_user(SERVICE, EMAIL)
        self.assertEquals(user["generation"], 42)

    def test_reset_user_who_is_not_in_the_db(self):
        records = list(self.backend.get_user_records(SERVICE, EMAIL))
        self.assertEquals(len(records), 0)

        process_account_event(
            self.config,
            message_body(
                event="reset",
                uid=UID,
                iss=ISS,
                generation=43,
            ))

        records = list(self.backend.get_user_records(SERVICE, EMAIL))
        self.assertEquals(len(records), 0)

    def test_password_change(self):
        self.backend.allocate_user(SERVICE, EMAIL, generation=12)

        process_account_event(
            self.config,
            message_body(
                event="passwordChange",
                uid=UID,
                iss=ISS,
                generation=43,
            ))

        user = self.backend.get_user(SERVICE, EMAIL)
        self.assertEquals(user["generation"], 42)

    def test_password_change_user_not_in_db(self):
        records = list(self.backend.get_user_records(SERVICE, EMAIL))
        self.assertEquals(len(records), 0)

        process_account_event(
            self.config,
            message_body(
                event="passwordChange",
                uid=UID,
                iss=ISS,
                generation=43,
            ))

        records = list(self.backend.get_user_records(SERVICE, EMAIL))
        self.assertEquals(len(records), 0)

    def test_malformed_events(self):

        # Unknown event type.
        process_account_event(
            self.config,
            message_body(
                event="party",
                uid=UID,
                iss=ISS,
                generation=43,
            ))
        self.assertMessageWasLogged("Dropping unknown event type")
        self.clearLogs()

        # Missing event type.
        process_account_event(self.config,
                              message_body(
                                  uid=UID,
                                  iss=ISS,
                                  generation=43,
                              ))
        self.assertMessageWasLogged("Invalid account message")
        self.clearLogs()

        # Missing uid.
        process_account_event(self.config,
                              message_body(
                                  event="delete",
                                  iss=ISS,
                              ))
        self.assertMessageWasLogged("Invalid account message")
        self.clearLogs()

        # Missing generation for reset events.
        process_account_event(self.config,
                              message_body(
                                  event="reset",
                                  uid=UID,
                                  iss=ISS,
                              ))
        self.assertMessageWasLogged("Invalid account message")
        self.clearLogs()

        # Missing generation for passwordChange events.
        process_account_event(
            self.config,
            message_body(
                event="passwordChange",
                uid=UID,
                iss=ISS,
            ))
        self.assertMessageWasLogged("Invalid account message")
        self.clearLogs()

        # Missing issuer with nonemail uid
        process_account_event(self.config,
                              message_body(
                                  event="delete",
                                  uid=UID,
                              ))
        self.assertMessageWasLogged("Invalid account message")
        self.clearLogs()

        # Non-JSON garbage.
        process_account_event(self.config, "wat")
        self.assertMessageWasLogged("Invalid account message")
        self.clearLogs()

        # Non-JSON garbage in Message field.
        process_account_event(self.config, '{ "Message": "wat" }')
        self.assertMessageWasLogged("Invalid account message")
        self.clearLogs()

        # Badly-typed JSON value in Message field.
        process_account_event(self.config, '{ "Message": "[1, 2, 3"] }')
        self.assertMessageWasLogged("Invalid account message")
        self.clearLogs()
Ejemplo n.º 33
0
class TestPanoptesPluginRunner(unittest.TestCase):
    @staticmethod
    def extract(record):
        message = record.getMessage()

        match_obj = re.match(r'(?P<name>.*):\w+(?P<body>.*)', message)
        if match_obj:
            message = match_obj.group('name') + match_obj.group('body')

        match_obj = re.match(
            r'(?P<start>.*[R|r]an in\s)\d+\.?\d*.*(?P<end>seconds.*)', message)
        if match_obj:
            return record.name, record.levelname, match_obj.group(
                'start') + match_obj.group('end')

        match_obj = re.match(
            r'(?P<start>.*took\s*)\d+\.?\d*.*(?P<seconds>seconds\D*)\d+\s(?P<end>garbage objects.*)',
            message)

        if match_obj:
            return record.name, record.levelname, match_obj.group('start') + match_obj.group('seconds') + \
                   match_obj.group('end')

        match_obj = re.match(
            r'(?P<start>Attempting to get lock for plugin .*with lock path) \".*\".*(?P<id> and identifier).*'
            r'(?P<in> in) \d\.?\d*(?P<seconds> seconds)', message)
        if match_obj:
            return record.name, record.levelname, match_obj.group('start') + match_obj.group('id') + \
                   match_obj.group('in') + match_obj.group('seconds')

        match_obj = re.match(
            r'(?P<delete>Deleting module: yapsy_loaded_plugin_Test_Polling_Plugin_Second_Instance|'
            r'Deleting module: yapsy_loaded_plugin_Test_Polling_Plugin).*',
            message)

        if match_obj:
            return record.name, record.levelname, match_obj.group('delete')

        return record.name, record.levelname, message

    @patch('redis.StrictRedis', panoptes_mock_redis_strict_client)
    @patch('kazoo.client.KazooClient', panoptes_mock_kazoo_client)
    def setUp(self):
        self.my_dir, self.panoptes_test_conf_file = get_test_conf_file()
        self._panoptes_context = PanoptesContext(
            self.panoptes_test_conf_file,
            key_value_store_class_list=[
                PanoptesTestKeyValueStore, PanoptesResourcesKeyValueStore,
                PanoptesPollingPluginKeyValueStore, PanoptesSecretsStore,
                PanoptesPollingPluginAgentKeyValueStore,
                PanoptesDiscoveryPluginAgentKeyValueStore,
                PanoptesDiscoveryPluginKeyValueStore
            ],
            create_message_producer=False,
            async_message_producer=False,
            create_zookeeper_client=True)

        self._runner_class = PanoptesPluginRunner
        self._log_capture = LogCapture(attributes=self.extract)

    def tearDown(self):
        self._log_capture.uninstall()

    def test_logging_methods(self):
        runner = self._runner_class("Test Polling Plugin", "polling",
                                    PanoptesPollingPlugin, PanoptesPluginInfo,
                                    None, self._panoptes_context,
                                    PanoptesTestKeyValueStore,
                                    PanoptesTestKeyValueStore,
                                    PanoptesTestKeyValueStore, "plugin_logger",
                                    PanoptesMetricsGroupSet, _callback)

        #  Ensure logging methods run:
        runner.info(PanoptesTestPluginNoLock(), "Test Info log message")
        runner.warn(PanoptesTestPluginNoLock(), "Test Warning log message")
        runner.error(PanoptesTestPluginNoLock(), "Test Error log message",
                     Exception)
        runner.exception(PanoptesTestPluginNoLock(),
                         "Test Exception log message")

        self._log_capture.check_present(
            ('panoptes.tests.test_runner', 'INFO',
             '[None] [{}] Test Info log message'),
            ('panoptes.tests.test_runner', 'WARNING',
             '[None] [{}] Test Warning log message'),
            ('panoptes.tests.test_runner', 'ERROR',
             '[None] [{}] Test Exception log message:'),
            order_matters=False)

    def test_basic_operations(self):
        runner = self._runner_class("Test Polling Plugin", "polling",
                                    PanoptesPollingPlugin, PanoptesPluginInfo,
                                    None, self._panoptes_context,
                                    PanoptesTestKeyValueStore,
                                    PanoptesTestKeyValueStore,
                                    PanoptesTestKeyValueStore, "plugin_logger",
                                    PanoptesMetricsGroupSet, _callback)

        runner.execute_plugin()

        self._log_capture.check_present(
            ('panoptes.tests.test_runner', 'INFO',
             'Attempting to execute plugin "Test Polling Plugin"'),
            ('panoptes.tests.test_runner', 'DEBUG', 'Found 3 plugins'),
            ('panoptes.tests.test_runner', 'DEBUG', 'Loaded plugin '
             '"Test Polling Plugin", version "0.1" of type "polling"'
             ', category "polling"'),
            ('panoptes.tests.test_runner', 'DEBUG',
             'Loaded plugin "Test Polling Plugin 2", '
             'version "0.1" of type "polling", category "polling"'),
            ('panoptes.tests.test_runner', 'DEBUG',
             'Loaded plugin "Test Polling Plugin Second Instance", '
             'version "0.1" of type "polling", category "polling"'),
            ('panoptes.tests.test_runner', 'INFO',
             '''[Test Polling Plugin] [None] '''
             '''Attempting to get lock for plugin "Test Polling Plugin"'''),
            ('panoptes.tests.test_runner', 'DEBUG',
             'Attempting to get lock for plugin "Test Polling Plugin", with lock path and '
             'identifier in seconds'),
            ('panoptes.tests.test_runner', 'INFO',
             '[Test Polling Plugin] [None] Acquired lock'),
            ('panoptes.tests.test_runner', 'INFO',
             '[Test Polling Plugin] [None]'
             ' Ran in seconds'),
            ('panoptes.tests.test_runner', 'INFO',
             '[Test Polling Plugin] [None] Released lock'),
            ('panoptes.tests.test_runner', 'INFO',
             '[Test Polling Plugin] [None] Plugin returned'
             ' a result set with 1 members'),
            ('panoptes.tests.test_runner', 'INFO',
             '[Test Polling Plugin] [None]'
             ' Callback function ran in seconds'),
            ('panoptes.tests.test_runner', 'INFO',
             '[Test Polling Plugin] [None] GC took seconds. There are garbage objects.'
             ), ('panoptes.tests.test_runner', 'DEBUG',
                 'Deleting module: yapsy_loaded_plugin_Test_Polling_Plugin'),
            ('panoptes.tests.test_runner', 'DEBUG',
             'Deleting module: yapsy_loaded_plugin_Test_Polling_Plugin'),
            ('panoptes.tests.test_runner', 'DEBUG',
             'Deleting module: yapsy_loaded_plugin_Test_Polling_Plugin_Second_Instance'
             ),
            order_matters=False)

    def test_nonexistent_plugin(self):
        runner = self._runner_class("Non-existent Plugin", "polling",
                                    PanoptesPollingPlugin, PanoptesPluginInfo,
                                    None, self._panoptes_context,
                                    PanoptesTestKeyValueStore,
                                    PanoptesTestKeyValueStore,
                                    PanoptesTestKeyValueStore, "plugin_logger",
                                    PanoptesMetricsGroupSet, _callback)
        runner.execute_plugin()
        self._log_capture.check_present((
            'panoptes.tests.test_runner', 'INFO',
            'Attempting to execute plugin "Non-existent Plugin"'
        ), ('panoptes.tests.test_runner', 'DEBUG', 'Found 3 plugins'), (
            'panoptes.tests.test_runner', 'DEBUG',
            'Loaded plugin "Test Polling Plugin", version "0.1" of type "polling", '
            'category "polling"'
        ), ('panoptes.tests.test_runner', 'DEBUG',
            'Loaded plugin "Test Polling Plugin Second Instance", version "0.1" of type '
            '"polling", category "polling"'), (
                'panoptes.tests.test_runner', 'WARNING',
                'No plugin named "Non-existent Plugin" found in "'
                '''['tests/plugins/polling']"'''),
                                        order_matters=False)

    def test_execute_now_false(self):
        mock_get_plugin_by_name = MagicMock(
            return_value=MockPluginExecuteNow())
        with patch(
                'yahoo_panoptes.framework.plugins.runner.PanoptesPluginManager.getPluginByName',
                mock_get_plugin_by_name):
            runner = self._runner_class(
                "Test Polling Plugin", "polling", PanoptesPollingPlugin,
                PanoptesPluginInfo, None, self._panoptes_context,
                PanoptesTestKeyValueStore, PanoptesTestKeyValueStore,
                PanoptesTestKeyValueStore, "plugin_logger",
                PanoptesMetricsGroupSet, _callback)
            runner.execute_plugin()

            self._log_capture.check_present(
                ('panoptes.tests.test_runner', 'INFO',
                 'Attempting to execute plugin "Test Polling Plugin"'),
                ('panoptes.tests.test_runner', 'DEBUG', 'Found 3 plugins'),
                ('panoptes.tests.test_runner', 'DEBUG', 'Loaded plugin '
                 '"Test Polling Plugin", version "0.1" of type "polling"'
                 ', category "polling"'),
                ('panoptes.tests.test_runner', 'DEBUG',
                 'Loaded plugin "Test Polling Plugin Second Instance", '
                 'version "0.1" of type "polling", category "polling"'),
                order_matters=False)

    def test_callback_failure(self):
        runner = self._runner_class(
            "Test Polling Plugin", "polling", PanoptesPollingPlugin,
            PanoptesPluginInfo, None, self._panoptes_context,
            PanoptesTestKeyValueStore, PanoptesTestKeyValueStore,
            PanoptesTestKeyValueStore, "plugin_logger",
            PanoptesMetricsGroupSet, _callback_with_exception)
        runner.execute_plugin()

        self._log_capture.check_present(
            ('panoptes.tests.test_runner', 'ERROR', '[Test Polling Plugin] '
             '[None] Results callback function failed: :'))

    def test_lock_no_lock_object(self):
        mock_plugin = MagicMock(return_value=PanoptesTestPluginNoLock)
        mock_get_context = MagicMock(return_value=self._panoptes_context)
        with patch(
                'yahoo_panoptes.framework.plugins.runner.PanoptesPluginManager.getPluginByName',
                mock_plugin):
            with patch(
                    'yahoo_panoptes.framework.plugins.runner.PanoptesPluginRunner._get_context',
                    mock_get_context):
                runner = self._runner_class(
                    "Test Polling Plugin", "polling", PanoptesPollingPlugin,
                    PanoptesPluginInfo, None, self._panoptes_context,
                    PanoptesTestKeyValueStore, PanoptesTestKeyValueStore,
                    PanoptesTestKeyValueStore, "plugin_logger",
                    PanoptesMetricsGroupSet, _callback)
                runner.execute_plugin()

                self._log_capture.check_present(
                    ('panoptes.tests.test_runner', 'ERROR',
                     '[None] [{}] Error in acquiring lock:'))

    def test_lock_is_none(self):
        mock_get_plugin_by_name = MagicMock(return_value=MockPluginLockNone())
        mock_get_context = MagicMock(return_value=self._panoptes_context)
        with patch(
                'yahoo_panoptes.framework.plugins.runner.PanoptesPluginManager.getPluginByName',
                mock_get_plugin_by_name):
            with patch(
                    'yahoo_panoptes.framework.plugins.runner.PanoptesPluginRunner._get_context',
                    mock_get_context):
                runner = self._runner_class(
                    "Test Polling Plugin", "polling", PanoptesPollingPlugin,
                    PanoptesPluginInfo, None, self._panoptes_context,
                    PanoptesTestKeyValueStore, PanoptesTestKeyValueStore,
                    PanoptesTestKeyValueStore, "plugin_logger",
                    PanoptesMetricsGroupSet, _callback)
                runner.execute_plugin()

                self._log_capture.check_present(
                    ('panoptes.tests.test_runner', 'INFO',
                     '[None] [{}] Attempting to get lock for plugin'
                     ' "Test Polling Plugin"'))

    def test_lock_is_not_locked(self):
        mock_get_plugin_by_name = MagicMock(
            return_value=MockPluginLockIsNotLocked())
        mock_get_context = MagicMock(return_value=self._panoptes_context)
        with patch(
                'yahoo_panoptes.framework.plugins.runner.PanoptesPluginManager.getPluginByName',
                mock_get_plugin_by_name):
            with patch(
                    'yahoo_panoptes.framework.plugins.runner.PanoptesPluginRunner._get_context',
                    mock_get_context):
                runner = self._runner_class(
                    "Test Polling Plugin", "polling", PanoptesPollingPlugin,
                    PanoptesPluginInfo, None, self._panoptes_context,
                    PanoptesTestKeyValueStore, PanoptesTestKeyValueStore,
                    PanoptesTestKeyValueStore, "plugin_logger",
                    PanoptesMetricsGroupSet, _callback)
                runner.execute_plugin()

                self._log_capture.check_present(
                    ('panoptes.tests.test_runner', 'INFO',
                     '[None] [{}] Attempting to get lock for plugin'
                     ' "Test Polling Plugin"'))

    def test_plugin_failure(self):
        mock_plugin = MagicMock(
            return_value=PanoptesTestPluginRaisePluginReleaseException)
        mock_get_context = MagicMock(return_value=self._panoptes_context)
        with patch(
                'yahoo_panoptes.framework.plugins.runner.PanoptesPluginManager.getPluginByName',
                mock_plugin):
            with patch(
                    'yahoo_panoptes.framework.plugins.runner.PanoptesPluginRunner._get_context',
                    mock_get_context):
                runner = self._runner_class(
                    "Test Polling Plugin", "polling", PanoptesPollingPlugin,
                    PanoptesPluginInfo, None, self._panoptes_context,
                    PanoptesTestKeyValueStore, PanoptesTestKeyValueStore,
                    PanoptesTestKeyValueStore, "plugin_logger",
                    PanoptesMetricsGroupSet, _callback)
                runner.execute_plugin()

                self._log_capture.check_present(
                    ('panoptes.tests.test_runner', 'ERROR',
                     '[None] [{}] Failed to execute plugin:'),
                    ('panoptes.tests.test_runner', 'INFO',
                     '[None] [{}] Ran in seconds'),
                    ('panoptes.tests.test_runner', 'ERROR',
                     '[None] [{}] Failed to release lock for plugin:'),
                    ('panoptes.tests.test_runner', 'WARNING',
                     '[None] [{}] Plugin did not return any results'),
                    order_matters=False)

    def test_plugin_wrong_result_type(self):
        runner = self._runner_class("Test Polling Plugin 2", "polling",
                                    PanoptesPollingPlugin, PanoptesPluginInfo,
                                    None, self._panoptes_context,
                                    PanoptesTestKeyValueStore,
                                    PanoptesTestKeyValueStore,
                                    PanoptesTestKeyValueStore, "plugin_logger",
                                    PanoptesMetricsGroupSet, _callback)
        runner.execute_plugin()

        self._log_capture.check_present((
            'panoptes.tests.test_runner', 'WARNING',
            '[Test Polling Plugin 2] [None] Plugin returned an unexpected result type: '
            '"PanoptesMetricsGroup"'))
Ejemplo n.º 34
0
 def test_simple_strict(self):
     log_capture = LogCapture(ensure_checks_above=ERROR)
     root.error('during')
     log_capture.uninstall()
     with ShouldAssert("Not asserted ERROR log(s): [('root', 'ERROR', 'during')]"):
         log_capture.ensure_checked()
Ejemplo n.º 35
0
class TestPanoptesPollingPluginRunner(unittest.TestCase):
    @patch('redis.StrictRedis', panoptes_mock_redis_strict_client)
    @patch('kazoo.client.KazooClient', panoptes_mock_kazoo_client)
    def setUp(self):

        self.my_dir, self.panoptes_test_conf_file = get_test_conf_file()
        self._panoptes_resource = PanoptesResource(
            resource_site="test",
            resource_class="test",
            resource_subclass="test",
            resource_type="test",
            resource_id="test",
            resource_endpoint="test",
            resource_creation_timestamp=_TIMESTAMP,
            resource_plugin="test")

        self._panoptes_context = PanoptesContext(
            self.panoptes_test_conf_file,
            key_value_store_class_list=[
                PanoptesTestKeyValueStore, PanoptesResourcesKeyValueStore,
                PanoptesPollingPluginKeyValueStore, PanoptesSecretsStore,
                PanoptesPollingPluginAgentKeyValueStore,
                PanoptesDiscoveryPluginAgentKeyValueStore,
                PanoptesDiscoveryPluginKeyValueStore
            ],
            create_message_producer=False,
            async_message_producer=False,
            create_zookeeper_client=True)
        self._runner_class = PanoptesPluginWithEnrichmentRunner

        self._log_capture = LogCapture(
            attributes=TestPanoptesPluginRunner.extract)

    def tearDown(self):
        self._log_capture.uninstall()

    def tearDown(self):
        self._log_capture.uninstall()

    @patch('yahoo_panoptes.framework.metrics.time')
    @patch(
        'yahoo_panoptes.framework.context.PanoptesContext._get_message_producer'
    )
    @patch('yahoo_panoptes.framework.context.PanoptesContext.message_producer',
           new_callable=PropertyMock)
    @patch(
        'yahoo_panoptes.polling.polling_plugin_agent.PanoptesPollingTaskContext'
    )
    @patch(
        'yahoo_panoptes.framework.resources.PanoptesResourceStore.get_resource'
    )
    def test_polling_plugin_agent(self, resource, panoptes_context,
                                  message_producer, message_producer_property,
                                  time):

        producer = MockPanoptesMessageProducer()
        time.return_value = 1
        message_producer.return_value = producer
        message_producer_property.return_value = producer
        resource.return_value = self._panoptes_resource
        panoptes_context.return_value = self._panoptes_context

        polling_plugin_task('Test Polling Plugin', 'polling')

        log_prefix = '[Test Polling Plugin] [plugin|test|site|test|class|test|' \
                     'subclass|test|type|test|id|test|endpoint|test]'

        self._log_capture.check_present(
            ('panoptes.tests.test_runner', 'INFO',
             'Attempting to execute plugin "Test Polling Plugin"'),
            ('panoptes.tests.test_runner', 'DEBUG',
             'Loaded plugin "Test Polling Plugin", '
             'version "0.1" of type "polling", category "polling"'),
            ('panoptes.tests.test_runner', 'DEBUG',
             'Loaded plugin "Test Polling Plugin 2", '
             'version "0.1" of type "polling", category "polling"'),
            ('panoptes.tests.test_runner', 'ERROR',
             'No enrichment data found on KV store for plugin Test'
             ' Polling Plugin resource test namespace test using key test'),
            ('panoptes.tests.test_runner', 'DEBUG',
             'Successfully created PanoptesEnrichmentCache enrichment_data '
             '{} for plugin Test Polling Plugin'),
            ('panoptes.tests.test_runner', 'DEBUG',
             'Attempting to get lock for plugin "Test Polling Plugin", '
             'with lock path and identifier in seconds'),
            ('panoptes.tests.test_runner', 'INFO',
             '{} Acquired lock'.format(log_prefix)),
            ('panoptes.tests.test_runner', 'INFO',
             '{} Plugin returned a result set with 1 members'.format(
                 log_prefix)),
            ('panoptes.tests.test_runner', 'INFO',
             '{} Callback function ran in seconds'.format(log_prefix)),
            ('panoptes.tests.test_runner', 'INFO',
             '{} Ran in seconds'.format(log_prefix)),
            ('panoptes.tests.test_runner', 'INFO',
             '{} Released lock'.format(log_prefix)),
            ('panoptes.tests.test_runner', 'INFO',
             '{} GC took seconds. There are garbage objects.'.format(
                 log_prefix)),
            ('panoptes.tests.test_runner', 'DEBUG',
             'Deleting module: yapsy_loaded_plugin_Test_Polling_Plugin'),
            ('panoptes.tests.test_runner', 'DEBUG',
             'Deleting module: yapsy_loaded_plugin_Test_Polling_Plugin'),
            ('panoptes.tests.test_runner', 'DEBUG', 'Deleting module: '
             'yapsy_loaded_plugin_Test_Polling_Plugin_Second_Instance'),
            order_matters=False)

        kafka_push_log = {
            "metrics_group_type":
            "Test",
            "metrics_group_interval":
            60,
            "metrics_group_creation_timestamp":
            1,
            "metrics_group_schema_version":
            "0.2",
            "resource": {
                "resource_site": "test",
                "resource_class": "test",
                "resource_subclass": "test",
                "resource_type": "test",
                "resource_id": "test",
                "resource_endpoint": "test",
                "resource_metadata": {
                    "_resource_ttl": "604800"
                },
                "resource_creation_timestamp": 1.0,
                "resource_plugin": "test"
            },
            "metrics": [{
                "metric_creation_timestamp": 1,
                "metric_name": "test",
                "metric_value": 0.0,
                "metric_type": "gauge"
            }],
            "dimensions": []
        }

        # Timestamps need to be removed to check Panoptes Metrics
        metric_groups_seen = 0
        for line in self._log_capture.actual():

            _, _, log = line

            if 'resource_creation_timestamp' in log:
                log = re.sub(r"resource_creation_timestamp\": \d+\.\d+,",
                             "resource_creation_timestamp\": 1.0,", log)
                resource_match = re.search(r'{.*}', log)

                if resource_match is not None:
                    self.assertEqual(
                        ordered(json.loads(resource_match.group(0))),
                        ordered(kafka_push_log))

            if log.startswith('Sent metric group'):
                metric_groups_seen += 1

            if log.startswith('Going to send metric group'):
                metric_groups_seen += 1

        self.assertEqual(metric_groups_seen, 2)
Ejemplo n.º 36
0
    def test_build_singularity_container(self, mock_shell, mock_scp):
        """
		It test the correct work of the function
		create_singularity_image
		"""

        l = LogCapture()

        filename = compiler.build_singularity_container(
            '*****@*****.**', '/test/test.def', 'image.img', '/tmp')

        #mock_shell.assert_called_with('sudo', '*****@*****.**', ['singularity', 'bootstrap', 'image.img', 'test.def'])
        mock_scp.assert_called_with(filename, '*****@*****.**', 'image.img',
                                    False)

        filename = filename[5:-4]

        try:
            val = UUID(filename, version=4)
        except ValueError:
            self.fail("Filname is not uuid4 complaint: " + filename)

        # In case not necessary to use sudo
        filename = compiler.build_singularity_container('*****@*****.**',
                                                        '/test/test.def',
                                                        'image.img',
                                                        '/tmp',
                                                        become=False)

        #mock_shell.assert_called_with('singularity', '*****@*****.**', ['bootstrap', 'image.img', 'test.def'])
        mock_scp.assert_called_with(filename, '*****@*****.**', 'image.img',
                                    False)

        filename = filename[5:-4]

        try:
            val = UUID(filename, version=4)
        except ValueError:
            self.fail("Filname is not uuid4 complaint: " + filename)

        # In case of local compilation
        filename = compiler.build_singularity_container('',
                                                        '/test/test.def',
                                                        'image.img',
                                                        '/tmp',
                                                        become=False)

        filename = filename[5:-4]

        try:
            val = UUID(filename, version=4)
        except ValueError:
            self.fail("Filname is not uuid4 complaint: " + filename)

        ## WE VERIFY ALL THE CALLS:

        call_1 = call('sudo', '*****@*****.**',
                      ['singularity', 'bootstrap', 'image.img', 'test.def'])
        call_2 = call('singularity', '*****@*****.**',
                      ['bootstrap', 'image.img', 'test.def'])
        call_3 = call('singularity', '',
                      ['bootstrap', 'image.img', '/test/test.def'])
        call_4 = call('mv', '', [ANY, ANY])
        calls = [call_1, call_2, call_3, call_4]
        mock_shell.assert_has_calls(calls)

        # Checking that we are logging the correct messages
        l.check((
            'root', 'INFO',
            "Executing [[email protected]], 'sudo singulary bootstrap image.img test.def'"
        ), ('root', 'INFO', 'Downloading image from [email protected]'), (
            'root', 'INFO',
            "Executing [[email protected]], 'singulary bootstrap image.img test.def'"
        ), ('root', 'INFO', 'Downloading image from [email protected]'), (
            'root', 'INFO',
            "Executing [], 'singulary bootstrap image.img /test/test.def'"),
                ('root', 'INFO', 'Moving image to final destination'))
        l.uninstall()
Ejemplo n.º 37
0
class Tests(TestCase):
    def setUp(self):
        self.dir = TempDirectory()
        self.log = LogCapture()
        self.r = Replacer()
        self.r.replace('datetime.datetime', test_datetime())

    def tearDown(self):
        self.r.restore()
        self.log.uninstall()
        self.dir.cleanup()

    def test_parse(self):
        path = self.dir.write('test.csv', b'''\
Name,Money Owed
Adam Alpha,100
''')
        self.assertEqual(most_owed(path), 'Adam Alpha')
        self.log.check(('root', 'INFO', 'Processing took 0:00:10'))

    def test_max(self):
        path = self.dir.write(
            'test.csv', b'''\
Name,Money Owed
Adam Alpha,100
Brian Beta, 300
''')
        self.assertEqual(most_owed(path), 'Brian Beta')

    def test_unicode(self):
        path = self.dir.write('test.csv', '''\
Name,Money Owed
C\xe9dric Cee,200
''', 'utf8')
        self.assertEqual(most_owed(path), 'C\xe9dric Cee')

    def test_whitespace(self):
        path = self.dir.write('test.csv', b'''\
Name,Money Owed
 Adam Alpha,\t100
''')
        self.assertEqual(most_owed(path), 'Adam Alpha')

    def test_invalid_numbers(self):
        path = self.dir.write(
            'test.csv', b'''\
Name,Money Owed
Adam Alpha,X
Brian Beta, 300
''')
        self.assertEqual(most_owed(path), 'Brian Beta')
        self.log.check(('root', 'WARNING', "ignoring 'X' as not valid"),
                       ('root', 'INFO', 'Processing took 0:00:10'))

    def test_malformed(self):
        path = self.dir.write('test.csv', b'''\
Name,
Adam Alpha
''')
        with ShouldRaise(KeyError('Money Owed')):
            most_owed(path)
Ejemplo n.º 38
0
class PlanClaseTestCase(PlanificacionesTestCase):
    def setUp(self):
        """Creates data for testing Planes de Clase"""
        super().setUp()

        self.logger = LogCapture()

        self.user = User.objects.create_user(
            email='*****@*****.**',
            password='******',
            first_name='David',
            last_name='Padilla',
            institution='Colegio Benalcazar'
        )

        self.data = {
            'name': 'Plan de Clase1',
            'numero_plan': 2,
            'docentes': 'David',
            'fecha': '2019-01-20',
            'asignatura': self.asignatura.id,
            'cursos': [self.curso_1.id, self.curso_2.id],
            'paralelos': 'A y C',
            'numero_estudiantes': '23',
            'tema': 'Tema del plan',
            'periodos': 'Períodos del plan',
            'metodologia': 'Metodología del plan de clase',
            'tecnica': 'Tecnica usada',
            'objetivos': [self.objetivo_1.id, self.objetivo_2.id],
            'bibliografia': 'Lorem ipsum dolor sit amet.',
            'contenido_cientifico': 'Lorem ipsum dolor sit amet.',
            'material_didactico': 'Lorem ipsum dolor sit amet.',
            'instrumento_evaluacion': 'Lorem ipsum dolor sit amet.',
            'instrumento_evaluacion': 'Lorem ipsum dolor sit amet.',
            # Formset Elementos curriculares 1
            'elementos_curriculares-TOTAL_FORMS': '2',
            'elementos_curriculares-INITIAL_FORMS': '0',
            'elementos_curriculares-MIN_NUM_FORMS': '0',
            'elementos_curriculares-MAX_NUM_FORMS': '1000',
            'elementos_curriculares-0-destreza': self.destreza_1.id,
            'elementos_curriculares-0-conocimientos_asociados': 'lorem ipsum',
            'elementos_curriculares-0-actividades_evaluacion': 'lorem ipsum',
            'elementos_curriculares-1-destreza': self.destreza_2.id,
            'elementos_curriculares-1-conocimientos_asociados': 'lorem ipsum',
            'elementos_curriculares-1-actividades_evaluacion': 'lorem ipsum',

            # Formset Procesos didacticos
            'proceso-elementos_curriculares-0-procesos_didacticos-'\
            'TOTAL_FORMS': '2',
            'proceso-elementos_curriculares-0-procesos_didacticos-'\
            'INITIAL_FORMS': '0',
            'proceso-elementos_curriculares-0-procesos_didacticos-'\
            'MIN_NUM_FORMS': '0',
            'proceso-elementos_curriculares-0-procesos_didacticos-'\
            'MAX_NUM_FORMS': '1000',
            'proceso-elementos_curriculares-0-procesos_didacticos-0-'\
            'name': 'lorem',
            'proceso-elementos_curriculares-0-procesos_didacticos-0-'\
            'description': 'lorem ipsum',
            'proceso-elementos_curriculares-0-procesos_didacticos-0-'\
            'tiempo': 'lorem ipsum',
            'proceso-elementos_curriculares-0-procesos_didacticos-0-'\
            'recursos': 'lorem ipsum',

            'proceso-elementos_curriculares-0-procesos_didacticos-1-'\
            'name': 'lorem',
            'proceso-elementos_curriculares-0-procesos_didacticos-1-'\
            'description': 'lorem ipsum',
            'proceso-elementos_curriculares-0-procesos_didacticos-1-'\
            'tiempo': 'lorem ipsum',
            'proceso-elementos_curriculares-0-procesos_didacticos-1-'\
            'recursos': 'lorem ipsum',

            'proceso-elementos_curriculares-1-procesos_didacticos-'\
            'TOTAL_FORMS': '1',
            'proceso-elementos_curriculares-1-procesos_didacticos-'\
            'INITIAL_FORMS': '0',
            'proceso-elementos_curriculares-1-procesos_didacticos-'\
            'MIN_NUM_FORMS': '0',
            'proceso-elementos_curriculares-1-procesos_didacticos-'\
            'MAX_NUM_FORMS': '1000',
            'proceso-elementos_curriculares-1-procesos_didacticos-0-'\
            'name': 'lorem',
            'proceso-elementos_curriculares-1-procesos_didacticos-0-'\
            'description': 'lorem ipsum',
            'proceso-elementos_curriculares-1-procesos_didacticos-0-'\
            'tiempo': 'lorem ipsum',
            'proceso-elementos_curriculares-1-procesos_didacticos-0-'\
            'recursos': 'lorem ipsum',
        }

        self.plan_clase = mixer.blend(PlanClase, elaborado_por=self.user)

        another_user = mixer.blend(User)
        self.another_plan = mixer.blend(PlanClase, elaborado_por=another_user)

    def tearDown(self):
        self.logger.uninstall()
Ejemplo n.º 39
0
 def test_simple_strict_asserted(self):
     log_capture = LogCapture(ensure_checks_above=ERROR)
     root.error('during')
     log_capture.uninstall()
     log_capture.check(("root", "ERROR", "during"))
     log_capture.ensure_checked()
Ejemplo n.º 40
0
class TestService(unittest.TestCase):

    def get_ini(self):
        return os.path.join(os.path.dirname(__file__),
                            'test_memorynode.ini')

    def setUp(self):
        self.config = testing.setUp()
        settings = {}
        load_into_settings(self.get_ini(), settings)
        self.config.add_settings(settings)
        self.config.include("tokenserver")
        load_and_register("tokenserver", self.config)
        self.backend = self.config.registry.getUtility(INodeAssignment)
        wsgiapp = self.config.make_wsgi_app()
        self.app = TestApp(wsgiapp)
        # Mock out the verifier to return successfully by default.
        self.mock_verifier_context = self.mock_verifier()
        self.mock_verifier_context.__enter__()
        self.logs = LogCapture()

    def tearDown(self):
        self.logs.uninstall()
        self.mock_verifier_context.__exit__(None, None, None)

    def assertMetricWasLogged(self, key):
        """Check that a metric was logged during the request."""
        for r in self.logs.records:
            if key in r.__dict__:
                break
        else:
            assert False, "metric %r was not logged" % (key,)

    def clearLogs(self):
        del self.logs.records[:]

    @contextlib.contextmanager
    def mock_verifier(self, response=None, exc=None):
        def mock_verify_method(assertion):
            if exc is not None:
                raise exc
            if response is not None:
                return response
            return {
                "status": "okay",
                "email": get_assertion_info(assertion)["principal"]["email"],
            }
        verifier = get_verifier(self.config.registry)
        orig_verify_method = verifier.__dict__.get("verify", None)
        verifier.__dict__["verify"] = mock_verify_method
        try:
            yield None
        finally:
            if orig_verify_method is None:
                del verifier.__dict__["verify"]
            else:
                verifier.__dict__["verify"] = orig_verify_method

    def _getassertion(self, **kw):
        kw.setdefault('email', '*****@*****.**')
        kw.setdefault('audience', 'http://tokenserver.services.mozilla.com')
        return make_assertion(**kw).encode('ascii')

    def test_unknown_app(self):
        headers = {'Authorization': 'BrowserID %s' % self._getassertion()}
        resp = self.app.get('/1.0/xXx/token', headers=headers, status=404)
        self.assertTrue('errors' in resp.json)

    def test_no_auth(self):
        self.app.get('/1.0/sync/1.5', status=401)

    def test_valid_app(self):
        headers = {'Authorization': 'BrowserID %s' % self._getassertion()}
        res = self.app.get('/1.0/sync/1.1', headers=headers)
        self.assertIn('https://example.com/1.1', res.json['api_endpoint'])
        self.assertIn('duration', res.json)
        self.assertEquals(res.json['duration'], 3600)
        self.assertMetricWasLogged('token.assertion.verify_success')
        self.clearLogs()

    def test_unknown_pattern(self):
        # sync 1.5 is defined in the .ini file, but  no pattern exists for it.
        headers = {'Authorization': 'BrowserID %s' % self._getassertion()}
        self.app.get('/1.0/sync/1.5', headers=headers, status=503)

    def test_discovery(self):
        res = self.app.get('/')
        self.assertEqual(res.json, {
            'auth': 'http://localhost',
            'services': {
                'sync': ['1.1', '1.5'],
            }
        })

    def test_version_returns_404_by_default(self):
        with mock.patch('os.path.exists', return_value=False):
            self.app.get('/__version__', status=404)

    def test_version_returns_file_in_current_folder_if_present(self):
        content = {'version': '0.8.1'}
        fake_file = mock.mock_open(read_data=json.dumps(content))
        with mock.patch('os.path.exists'):
            with mock.patch('tokenserver.views.open', fake_file):
                response = self.app.get('/__version__')
                self.assertEquals(response.json, content)

    def test_lbheartbeat(self):
        res = self.app.get('/__lbheartbeat__')
        self.assertEqual(res.json, {})

    def test_unauthorized_error_status(self):
        assertion = self._getassertion()
        # Totally busted auth -> generic error.
        headers = {'Authorization': 'Unsupported-Auth-Scheme IHACKYOU'}
        res = self.app.get('/1.0/sync/1.1', headers=headers, status=401)
        self.assertEqual(res.json['status'], 'error')
        # Bad signature -> "invalid-credentials"
        headers = {'Authorization': 'BrowserID %s' % assertion}
        with self.mock_verifier(exc=browserid.errors.InvalidSignatureError):
            res = self.app.get('/1.0/sync/1.1', headers=headers, status=401)
        self.assertEqual(res.json['status'], 'invalid-credentials')
        # Bad audience -> "invalid-credentials"
        with self.mock_verifier(exc=browserid.errors.AudienceMismatchError):
            res = self.app.get('/1.0/sync/1.1', headers=headers, status=401)
        self.assertEqual(res.json['status'], 'invalid-credentials')
        self.assertMetricWasLogged('token.assertion.verify_failure')
        self.assertMetricWasLogged('token.assertion.audience_mismatch_error')
        self.clearLogs()
        # Expired timestamp -> "invalid-timestamp"
        with self.mock_verifier(exc=browserid.errors.ExpiredSignatureError):
            res = self.app.get('/1.0/sync/1.1', headers=headers, status=401)
        self.assertEqual(res.json['status'], 'invalid-timestamp')
        self.assertTrue('X-Timestamp' in res.headers)
        self.assertMetricWasLogged('token.assertion.verify_failure')
        self.assertMetricWasLogged('token.assertion.expired_signature_error')
        self.clearLogs()
        # Connection error -> 503
        with self.mock_verifier(exc=browserid.errors.ConnectionError):
            res = self.app.get('/1.0/sync/1.1', headers=headers, status=503)
        self.assertMetricWasLogged('token.assertion.verify_failure')
        self.assertMetricWasLogged('token.assertion.connection_error')
        # It should also log a full traceback of the error.
        for r in self.logs.records:
            if r.msg == "Unexpected verification error":
                assert r.exc_info is not None
                break
        else:
            assert False, "failed to log a traceback for ConnectionError"
        self.clearLogs()
        # Some other wacky error -> not captured
        with self.mock_verifier(exc=ValueError):
            with self.assertRaises(ValueError):
                res = self.app.get('/1.0/sync/1.1', headers=headers)

    def test_unverified_token(self):
        headers = {'Authorization': 'BrowserID %s' % self._getassertion()}
        # Assertion should not be rejected if fxa-tokenVerified is unset
        mock_response = {
            "status": "okay",
            "email": "*****@*****.**",
            "idpClaims": {}
        }
        with self.mock_verifier(response=mock_response):
            self.app.get("/1.0/sync/1.1", headers=headers, status=200)
        # Assertion should not be rejected if fxa-tokenVerified is True
        mock_response['idpClaims']['fxa-tokenVerified'] = True
        with self.mock_verifier(response=mock_response):
            self.app.get("/1.0/sync/1.1", headers=headers, status=200)
        # Assertion should be rejected if fxa-tokenVerified is False
        mock_response['idpClaims']['fxa-tokenVerified'] = False
        with self.mock_verifier(response=mock_response):
            res = self.app.get("/1.0/sync/1.1", headers=headers, status=401)
        self.assertEqual(res.json['status'], 'invalid-credentials')

    def test_generation_number_change(self):
        headers = {"Authorization": "BrowserID %s" % self._getassertion()}
        # Start with no generation number.
        mock_response = {"status": "okay", "email": "*****@*****.**"}
        with self.mock_verifier(response=mock_response):
            res1 = self.app.get("/1.0/sync/1.1", headers=headers)
        # Now send an explicit generation number.
        # The node assignment should not change.
        mock_response["idpClaims"] = {"fxa-generation": 12}
        with self.mock_verifier(response=mock_response):
            res2 = self.app.get("/1.0/sync/1.1", headers=headers)
        self.assertEqual(res1.json["uid"], res2.json["uid"])
        self.assertEqual(res1.json["api_endpoint"], res2.json["api_endpoint"])
        # Previous generation numbers get an invalid-generation response.
        del mock_response["idpClaims"]
        with self.mock_verifier(response=mock_response):
            res = self.app.get("/1.0/sync/1.1", headers=headers, status=401)
        self.assertEqual(res.json["status"], "invalid-generation")
        mock_response["idpClaims"] = {"some-nonsense": "lolwut"}
        with self.mock_verifier(response=mock_response):
            res = self.app.get("/1.0/sync/1.1", headers=headers, status=401)
        self.assertEqual(res.json["status"], "invalid-generation")
        mock_response["idpClaims"] = {"fxa-generation": 10}
        with self.mock_verifier(response=mock_response):
            res = self.app.get("/1.0/sync/1.1", headers=headers, status=401)
        self.assertEqual(res.json["status"], "invalid-generation")
        # Equal generation numbers are accepted.
        mock_response["idpClaims"] = {"fxa-generation": 12}
        with self.mock_verifier(response=mock_response):
            res2 = self.app.get("/1.0/sync/1.1", headers=headers)
        self.assertEqual(res1.json["uid"], res2.json["uid"])
        self.assertEqual(res1.json["api_endpoint"], res2.json["api_endpoint"])
        # Later generation numbers are accepted.
        # Again, the node assignment should not change.
        mock_response["idpClaims"] = {"fxa-generation": 13}
        with self.mock_verifier(response=mock_response):
            res2 = self.app.get("/1.0/sync/1.1", headers=headers)
        self.assertEqual(res1.json["uid"], res2.json["uid"])
        self.assertEqual(res1.json["api_endpoint"], res2.json["api_endpoint"])
        # And that should lock out the previous generation number
        mock_response["idpClaims"] = {"fxa-generation": 12}
        with self.mock_verifier(response=mock_response):
            res = self.app.get("/1.0/sync/1.1", headers=headers, status=401)
        self.assertEqual(res.json["status"], "invalid-generation")
        # Various nonsense generation numbers should give errors.
        mock_response["idpClaims"] = {"fxa-generation": "whatswrongwithyour"}
        with self.mock_verifier(response=mock_response):
            res = self.app.get("/1.0/sync/1.1", headers=headers, status=401)
        self.assertEqual(res.json["status"], "invalid-generation")
        mock_response["idpClaims"] = {"fxa-generation": None}
        with self.mock_verifier(response=mock_response):
            res = self.app.get("/1.0/sync/1.1", headers=headers, status=401)
        self.assertEqual(res.json["status"], "invalid-generation")
        mock_response["idpClaims"] = {"fxa-generation": "42"}
        with self.mock_verifier(response=mock_response):
            res = self.app.get("/1.0/sync/1.1", headers=headers, status=401)
        self.assertEqual(res.json["status"], "invalid-generation")
        mock_response["idpClaims"] = {"fxa-generation": ["I", "HACK", "YOU"]}
        with self.mock_verifier(response=mock_response):
            res = self.app.get("/1.0/sync/1.1", headers=headers, status=401)
        self.assertEqual(res.json["status"], "invalid-generation")

    def test_client_state_change(self):
        mock_response = {
            "status": "okay",
            "email": "*****@*****.**",
            "idpClaims": {"fxa-generation": 1234},
        }
        # Start with no client-state header.
        headers = {'Authorization': 'BrowserID %s' % self._getassertion()}
        with self.mock_verifier(response=mock_response):
            res = self.app.get('/1.0/sync/1.1', headers=headers)
        uid0 = res.json['uid']
        # No change == same uid.
        with self.mock_verifier(response=mock_response):
            res = self.app.get('/1.0/sync/1.1', headers=headers)
        self.assertEqual(res.json['uid'], uid0)
        # Changing client-state header requires changing generation number.
        headers['X-Client-State'] = 'aaaa'
        with self.mock_verifier(response=mock_response):
            res = self.app.get('/1.0/sync/1.1', headers=headers, status=401)
        self.assertEqual(res.json['status'], 'invalid-client-state')
        desc = res.json['errors'][0]['description']
        self.assertTrue(desc.endswith('new value with no generation change'))
        # Change the client-state header, get a new uid.
        headers['X-Client-State'] = 'aaaa'
        mock_response["idpClaims"]["fxa-generation"] += 1
        with self.mock_verifier(response=mock_response):
            res = self.app.get('/1.0/sync/1.1', headers=headers)
        uid1 = res.json['uid']
        self.assertNotEqual(uid1, uid0)
        # No change == same uid.
        with self.mock_verifier(response=mock_response):
            res = self.app.get('/1.0/sync/1.1', headers=headers)
        self.assertEqual(res.json['uid'], uid1)
        # Send a client-state header, get a new uid.
        headers['X-Client-State'] = 'bbbb'
        mock_response["idpClaims"]["fxa-generation"] += 1
        with self.mock_verifier(response=mock_response):
            res = self.app.get('/1.0/sync/1.1', headers=headers)
        uid2 = res.json['uid']
        self.assertNotEqual(uid2, uid0)
        self.assertNotEqual(uid2, uid1)
        # No change == same uid.
        with self.mock_verifier(response=mock_response):
            res = self.app.get('/1.0/sync/1.1', headers=headers)
        self.assertEqual(res.json['uid'], uid2)
        # Use a previous client-state, get an auth error.
        headers['X-Client-State'] = 'aaaa'
        with self.mock_verifier(response=mock_response):
            res = self.app.get('/1.0/sync/1.1', headers=headers, status=401)
        self.assertEqual(res.json['status'], 'invalid-client-state')
        desc = res.json['errors'][0]['description']
        self.assertTrue(desc.endswith('stale value'))
        del headers['X-Client-State']
        with self.mock_verifier(response=mock_response):
            res = self.app.get('/1.0/sync/1.1', headers=headers, status=401)
        self.assertEqual(res.json['status'], 'invalid-client-state')
        headers['X-Client-State'] = 'aaaa'
        mock_response["idpClaims"]["fxa-generation"] += 1
        with self.mock_verifier(response=mock_response):
            res = self.app.get('/1.0/sync/1.1', headers=headers, status=401)
        self.assertEqual(res.json['status'], 'invalid-client-state')

    def test_client_state_cannot_revert_to_empty(self):
        # Start with a client-state header.
        headers = {
            'Authorization': 'BrowserID %s' % self._getassertion(),
            'X-Client-State': 'aaa',
        }
        res = self.app.get('/1.0/sync/1.1', headers=headers)
        uid0 = res.json['uid']
        # Sending no client-state will fail.
        del headers['X-Client-State']
        res = self.app.get('/1.0/sync/1.1', headers=headers, status=401)
        self.assertEqual(res.json['status'], 'invalid-client-state')
        desc = res.json['errors'][0]['description']
        self.assertTrue(desc.endswith('empty string'))
        headers['X-Client-State'] = ''
        res = self.app.get('/1.0/sync/1.1', headers=headers, status=401)
        self.assertEqual(res.json['status'], 'invalid-client-state')
        desc = res.json['errors'][0]['description']
        self.assertTrue(desc.endswith('empty string'))
        # And the uid will be unchanged.
        headers['X-Client-State'] = 'aaa'
        res = self.app.get('/1.0/sync/1.1', headers=headers)
        self.assertEqual(res.json['uid'], uid0)

    def test_client_specified_duration(self):
        headers = {'Authorization': 'BrowserID %s' % self._getassertion()}
        # It's ok to request a shorter-duration token.
        res = self.app.get('/1.0/sync/1.1?duration=12', headers=headers)
        self.assertEquals(res.json['duration'], 12)
        # But you can't exceed the server's default value.
        res = self.app.get('/1.0/sync/1.1?duration=4000', headers=headers)
        self.assertEquals(res.json['duration'], 3600)
        # And nonsense values are ignored.
        res = self.app.get('/1.0/sync/1.1?duration=lolwut', headers=headers)
        self.assertEquals(res.json['duration'], 3600)
        res = self.app.get('/1.0/sync/1.1?duration=-1', headers=headers)
        self.assertEquals(res.json['duration'], 3600)

    def test_allow_new_users(self):
        # New users are allowed by default.
        settings = self.config.registry.settings
        self.assertEquals(settings.get('tokenserver.allow_new_users'), None)
        assertion = self._getassertion(email="*****@*****.**")
        headers = {'Authorization': 'BrowserID %s' % assertion}
        self.app.get('/1.0/sync/1.1', headers=headers, status=200)
        # They're allowed if we explicitly allow them.
        settings['tokenserver.allow_new_users'] = True
        assertion = self._getassertion(email="*****@*****.**")
        headers = {'Authorization': 'BrowserID %s' % assertion}
        self.app.get('/1.0/sync/1.1', headers=headers, status=200)
        # They're not allowed if we explicitly disable them.
        settings['tokenserver.allow_new_users'] = False
        assertion = self._getassertion(email="*****@*****.**")
        headers = {'Authorization': 'BrowserID %s' % assertion}
        res = self.app.get('/1.0/sync/1.1', headers=headers, status=401)
        self.assertEqual(res.json['status'], 'new-users-disabled')
        # But existing users are still allowed.
        assertion = self._getassertion(email="*****@*****.**")
        headers = {'Authorization': 'BrowserID %s' % assertion}
        self.app.get('/1.0/sync/1.1', headers=headers, status=200)
        assertion = self._getassertion(email="*****@*****.**")
        headers = {'Authorization': 'BrowserID %s' % assertion}
        self.app.get('/1.0/sync/1.1', headers=headers, status=200)

    def test_metrics_uid_logging(self):
        assert "fxa.metrics_uid_secret_key" in self.config.registry.settings
        assertion = self._getassertion(email="*****@*****.**")
        headers = {'Authorization': 'BrowserID %s' % assertion}
        self.app.get('/1.0/sync/1.1', headers=headers, status=200)
        self.assertMetricWasLogged('uid')
        self.assertMetricWasLogged('uid.first_seen_at')

    def test_metrics_uid_is_returned_in_response(self):
        assert "fxa.metrics_uid_secret_key" in self.config.registry.settings
        assertion = self._getassertion(email="*****@*****.**")
        headers = {'Authorization': 'BrowserID %s' % assertion}
        res = self.app.get('/1.0/sync/1.1', headers=headers, status=200)
        self.assertTrue('hashed_fxa_uid' in res.json)
Ejemplo n.º 41
0
 def test_simple_strict_asserted_3(self):
     log_capture = LogCapture(ensure_checks_above=ERROR)
     root.error('during')
     log_capture.uninstall()
     log_capture.mark_all_checked()
     log_capture.ensure_checked()
Ejemplo n.º 42
0
class TestWatcherConfigfile(TestBase):

    def additional_setup(self):
        self.temp_dir = tempfile.mkdtemp()
        self.abs_fname = self.temp_dir + "/r.spec"
        self.conf = {
            "file"                : self.abs_fname,
            "region_name"         : "dummy-region",
            "vpc_id"              : "dummy-vpc",
            "mode"                : "configfile",
            "health"              : "icmpecho",
            "icmp_check_interval" : 2
        }
        self.watcher_plugin_class = \
                main.load_plugin("configfile", DEFAULT_WATCHER_PLUGIN_MOD)
        self.health_plugin_class = \
                main.load_plugin("icmpecho", DEFAULT_HEALTH_PLUGIN_MOD)

        # The watcher thread needs to have a config file available right at the
        # start, even if there's nothing in it
        self.write_config({})

    def setUp(self):
        self.lc = LogCapture()
        self.lc.setLevel(logging.DEBUG)
        self.lc.addFilter(test_common.MyLogCaptureFilter())

        self.additional_setup()

        self.addCleanup(self.cleanup)

        self.old_handle_spec = vpc.handle_spec

        # Monkey patch the handle_spec function, which is called by the
        # watcher. The handle_spec function is defined in the VPC module.
        # However, it was directly imported by the watcher module, so it's now
        # a copy in the watcher module namespace. Thus, the patch has to be
        # done actually in the watcher module. For safety, we'll do it in both
        # the vpc and watcher module.
        def new_handle_spec(*args, **kwargs):
            pass
        watcher.handle_spec = vpc.handle_spec = new_handle_spec

    def additional_cleanup(self):
        shutil.rmtree(self.temp_dir)

    def cleanup(self):
        self.lc.uninstall()
        watcher.handle_spec = vpc.handle_spec = self.old_handle_spec
        self.additional_cleanup()

    def write_config(self, data):
        with open(self.abs_fname, "w+") as f:
            f.write(json.dumps(data))

    def start_thread_log_tuple(self):
        return [
            ('root', 'INFO',
             "Configfile watcher plugin: Starting to watch route spec file "
             "'%s' for changes..." % self.abs_fname)
        ]

    def change_event_log_tuple(self):
        return ('root', 'INFO',
                "Detected file change event for %s" %
                self.abs_fname)

    def test_watcher_thread_no_config(self):
        os.remove(self.abs_fname)
        watcher_plugin, health_plugin = \
                watcher.start_plugins(
                    self.conf,
                    self.watcher_plugin_class, self.health_plugin_class,
                    2)
        time.sleep(0.5)

        # Config file doesn't exist yet, so we should get an error.
        # Health monitor is started with a second delay, so no messages from
        # there, yet.
        l = self.start_thread_log_tuple()
        l.extend([
            ('root', 'ERROR',
             "Config ignored: Cannot open file: "
             "[Errno 2] No such file or directory: '%s'" % self.abs_fname),
            ('root', 'INFO',
             'ICMPecho health monitor plugin: Starting to watch instances.')
        ])
        self.lc.check(*l)

        watcher.stop_plugins(watcher_plugin, health_plugin)

    def test_watcher_thread_wrong_config(self):
        watcher_plugin, health_plugin = \
                watcher.start_plugins(
                    self.conf,
                    self.watcher_plugin_class, self.health_plugin_class,
                    2)
        time.sleep(1.2)

        self.lc.clear()
        inp = "MALFORMED"
        self.write_config(inp)

        time.sleep(1)
        # Config file malformed
        l = [
            self.change_event_log_tuple(),
            ('root', 'ERROR',
             'Config ignored: Expected dictionary at top level')
        ]
        self.lc_compare(l)

        watcher.stop_plugins(watcher_plugin, health_plugin)

    def test_watcher_thread(self):
        # Monkey patch the healthcheck method of the ICMP health monitor class,
        # since we don't really want to send out ICMP echo requests when we run
        # the tests. Will indicate failure for all IP addresses starting with
        # "3."
        def new_do_health_checks(s, addrs):
            return [a for a in addrs if a.startswith("3.")], []

        # We do this in the class, before the plugin is instantiated
        self.health_plugin_class.do_health_checks = new_do_health_checks

        watcher_plugin, health_plugin = \
                watcher.start_plugins(
                    self.conf,
                    self.watcher_plugin_class, self.health_plugin_class,
                    2)

        time.sleep(2)

        l = self.start_thread_log_tuple()
        l.extend([
             ('root', 'INFO',
              'ICMPecho health monitor plugin: Starting to watch instances.'),
             ('root', 'DEBUG', 'Checking live IPs: (none alive)')])
        self.lc.check(*l)
        self.lc.clear()

        inp = {
                  u"10.1.0.0/16" : [u"1.1.1.1", u"2.2.2.2"],
                  u"10.2.0.0/16" : [u"3.3.3.3"]
              }
        self.write_config(inp)

        time.sleep(2)

        watcher._event_monitor_loop(
            "dummy-region", "dummy-vpc",
            watcher_plugin, health_plugin,
            iterations=1, sleep_time=0.5)

        time.sleep(2)

        self.lc.check(
            self.change_event_log_tuple(),
            ('root', 'DEBUG', 'Checking live IPs: (none alive)'),
            ('root', 'DEBUG',
             'New route spec detected. Updating health-monitor '
             'with: 1.1.1.1,2.2.2.2,3.3.3.3'),
            ('root', 'DEBUG', 'event_monitor_loop ended: Global stop'),
            ('root', 'DEBUG', u'Checking live IPs: 1.1.1.1,2.2.2.2,3.3.3.3'),
            ('root', 'INFO', u'Currently failed IPs: 3.3.3.3'))
        self.lc.clear()

        inp = {
                  u"10.1.0.0/16" : [u"4.4.4.4", u"2.2.2.2"],
                  u"10.2.0.0/16" : [u"3.3.3.3"]
              }
        self.write_config(inp)

        time.sleep(1)
        """
        Remove this check: The log messages may come through in a different
        order, which isn't a problem.

        self.lc.check(
            ('root', 'INFO',
             'Detected file change event for %s' % self.abs_fname),
            ('root', 'DEBUG', 'Checking live IPs: 1.1.1.1,2.2.2.2'))
        """
        self.lc.clear()

        watcher._event_monitor_loop(
            "dummy-region", "dummy-vpc",
            watcher_plugin, health_plugin,
            iterations=1, sleep_time=0.5)

        time.sleep(2)
        self.lc.check(
            ('root', 'DEBUG',
             'New route spec detected. Updating health-monitor '
             'with: 2.2.2.2,3.3.3.3,4.4.4.4'),
            ('root', 'DEBUG', 'event_monitor_loop ended: Global stop'),
            ('root', 'DEBUG', u'Checking live IPs: 2.2.2.2,4.4.4.4'))

        self.lc.clear()

        watcher._event_monitor_loop(
            "dummy-region", "dummy-vpc",
            watcher_plugin, health_plugin,
            iterations=2, sleep_time=1, route_check_time_interval=1)

        time.sleep(2)
        self.lc.check(
            ('root', 'DEBUG', u'Checking live IPs: 2.2.2.2,4.4.4.4'),
            ('root', 'DEBUG', 'Time for regular route check'),
            ('root', 'DEBUG', 'event_monitor_loop ended: Global stop'),
            ('root', 'DEBUG', u'Checking live IPs: 2.2.2.2,4.4.4.4'))

        watcher.stop_plugins(watcher_plugin, health_plugin)
Ejemplo n.º 43
0
class TestVpcBotoInteractions(unittest.TestCase):
    """
    We use the moto mock framework for boto in order to test our interactions
    with boto.

    """
    def setUp(self):
        self.lc = LogCapture()
        self.lc.addFilter(test_common.MyLogCaptureFilter())
        self.addCleanup(self.cleanup)
        # Hosts are chosen randomly from a prefix group. Therefore, we need to
        # seed the random number generator with a specific value in order to
        # have reproducible tests.
        random.seed(123)

    def cleanup(self):
        self.lc.uninstall()

    @mock_ec2_deprecated
    def make_mock_vpc(self):
        """
        Use plain (but mocked) boto functions to create a small VPC with two
        subnets and two instances as a basis for our tests.

        (not quite sure why this doesn't run in setUp().

        """
        con = boto.vpc.connect_to_region("ap-southeast-2")

        # Note that moto doesn't seem to honor the subnet and VPC address
        # ranges, it seems all instances always get something random from a
        # 10/8 range.
        self.new_vpc = con.create_vpc('10.0.0.0/16')
        self.new_subnet_a = con.create_subnet(self.new_vpc.id, '10.1.0.0/16')
        self.new_subnet_b = con.create_subnet(self.new_vpc.id, '10.2.0.0/16')

        res1 = con.run_instances('ami-1234abcd',
                                 subnet_id=self.new_subnet_a.id)
        res2 = con.run_instances('ami-1234abcd',
                                 subnet_id=self.new_subnet_b.id)
        self.i1 = res1.instances[0]
        self.i2 = res2.instances[0]
        self.i1ip = self.i1.private_ip_address
        self.i2ip = self.i2.private_ip_address

    @mock_ec2_deprecated
    def test_connect(self):
        self.make_mock_vpc()

        # With a test VPC created, we now test our own functions

        # In the mocked test the meta data won't contain the info we need (vpc
        # and region name), because the emulated EC2 instance isn't in any
        # region or vpc.
        meta = vpc.get_ec2_meta_data()
        self.assertTrue(meta == {})

        self.assertRaises(VpcRouteSetError, vpc.connect_to_region, "blah")

        con = vpc.connect_to_region("ap-southeast-2")

        # Error when specifying non-existent VPC
        self.assertRaises(VpcRouteSetError, vpc.get_vpc_overview, con,
                          "non-existent-vpc", "ap-southeast-2")

        # Get the default: First VPC if no VPC is specified
        d = vpc.get_vpc_overview(con, None, "ap-southeast-2")
        self.assertEqual(d['vpc'].id, "vpc-be745e76")

        # Get specified VPC
        d = vpc.get_vpc_overview(con, self.new_vpc.id, "ap-southeast-2")
        self.assertEqual(d['vpc'].id, "vpc-be745e76")

        self.assertEqual(
            sorted([
                'subnets', 'route_tables', 'instance_by_id',
                'ip_subnet_lookup', 'instances', 'rt_subnet_lookup', 'zones',
                'vpc'
            ]), sorted(d.keys()))

        self.assertEqual(self.new_vpc.id, d['vpc'].id)
        self.assertTrue(self.new_subnet_a.id in [s.id for s in d['subnets']])
        self.assertTrue(self.new_subnet_b.id in [s.id for s in d['subnets']])
        self.assertTrue(len(d['zones']) == 3)
        self.assertTrue(len(d['route_tables']) == 1)
        self.assertTrue(len(d['instance_by_id'].keys()) == 2)
        self.assertTrue(d['instance_by_id'][self.i1.id].id == self.i1.id)
        self.assertTrue(d['instance_by_id'][self.i2.id].id == self.i2.id)

        self.assertRaises(VpcRouteSetError, vpc.find_instance_and_eni_by_ip, d,
                          "9.9.9.9")  # Non existent IP
        self.assertTrue(
            vpc.find_instance_and_eni_by_ip(d, self.i1ip)[0].id == self.i1.id)
        self.assertTrue(
            vpc.find_instance_and_eni_by_ip(d, self.i2ip)[0].id == self.i2.id)

    def _prepare_mock_env(self):
        self.make_mock_vpc()

        con = vpc.connect_to_region("ap-southeast-2")

        d = vpc.get_vpc_overview(con, self.new_vpc.id, "ap-southeast-2")

        rt_id = d['route_tables'][0].id

        con.associate_route_table(route_table_id=rt_id,
                                  subnet_id=self.new_subnet_a.id)
        con.associate_route_table(route_table_id=rt_id,
                                  subnet_id=self.new_subnet_b.id)

        d = vpc.get_vpc_overview(con, self.new_vpc.id, "ap-southeast-2")

        i1, eni1 = vpc.find_instance_and_eni_by_ip(d, self.i1ip)
        i2, eni2 = vpc.find_instance_and_eni_by_ip(d, self.i2ip)

        return con, d, i1, eni1, i2, eni2, rt_id

    @mock_ec2_deprecated
    def test_process_route_spec_config(self):
        con, d, i1, eni1, i2, eni2, rt_id = self._prepare_mock_env()

        route_spec = {u"10.1.0.0/16": [self.i1ip, self.i2ip]}

        d['cluster_node_subnets'] = \
                        vpc.make_cluster_node_subnet_list(d, route_spec)
        # Process a simple route spec, a route should have been added
        self.lc.clear()
        vpc.process_route_spec_config(con, d, route_spec, [], [])
        # One of the hosts is randomly chosen. We seeded the random number
        # generator at in this module, so we know that it will choose the
        # second host in this case.
        self.lc.check(
            ('root', 'DEBUG', 'Route spec processing. No failed IPs.'),
            ('root', 'INFO', "--- adding route in RT '%s' "
             "10.1.0.0/16 -> %s (%s, %s)" %
             (rt_id, self.i1ip, i1.id, eni1.id)))

        # One of the two IPs questionable, switch over
        d = vpc.get_vpc_overview(con, self.new_vpc.id, "ap-southeast-2")
        d['cluster_node_subnets'] = \
                        vpc.make_cluster_node_subnet_list(d, route_spec)
        self.lc.clear()
        vpc.process_route_spec_config(con, d, route_spec, [], [self.i1ip])
        self.lc.check(
            ('root', 'DEBUG', 'Route spec processing. No failed IPs.'),
            ('root', 'INFO',
             "--- eni in route in RT 'rtb-84dc7f2c' can't be found: "
             "10.1.0.0/16 -> (none) (instance '%s')" % i1.id),
            ('root', 'INFO',
             "--- updating existing route in RT '%s' 10.1.0.0/16 -> "
             "%s (%s, %s) (old IP: None, reason: old IP failed/questionable "
             "or not eligible anymore)" % (rt_id, self.i2ip, i2.id, eni2.id)))

        # Now switch back
        d = vpc.get_vpc_overview(con, self.new_vpc.id, "ap-southeast-2")
        d['cluster_node_subnets'] = \
                        vpc.make_cluster_node_subnet_list(d, route_spec)
        self.lc.clear()
        vpc.process_route_spec_config(con, d, route_spec, [], [self.i2ip])
        self.lc.check(
            ('root', 'DEBUG', 'Route spec processing. No failed IPs.'),
            ('root', 'INFO',
             "--- eni in route in RT 'rtb-84dc7f2c' can't be found: "
             "10.1.0.0/16 -> (none) (instance '%s')" % i2.id),
            ('root', 'INFO',
             "--- updating existing route in RT '%s' 10.1.0.0/16 -> "
             "%s (%s, %s) (old IP: None, reason: old IP failed/questionable "
             "or not eligible anymore)" % (rt_id, self.i1ip, i1.id, eni1.id)))

        # One of the two IPs failed, switch over
        d = vpc.get_vpc_overview(con, self.new_vpc.id, "ap-southeast-2")
        d['cluster_node_subnets'] = \
                        vpc.make_cluster_node_subnet_list(d, route_spec)
        self.lc.clear()
        vpc.process_route_spec_config(con, d, route_spec, [self.i1ip], [])
        self.lc.check(
            ('root', 'DEBUG',
             'Route spec processing. Failed IPs: %s' % self.i1ip),
            ('root', 'INFO',
             "--- eni in route in RT 'rtb-84dc7f2c' can't be found: "
             "10.1.0.0/16 -> (none) (instance '%s')" % i1.id),
            ('root', 'INFO',
             "--- updating existing route in RT '%s' 10.1.0.0/16 -> "
             "%s (%s, %s) (old IP: None, reason: old IP failed/questionable "
             "or not eligible anymore)" % (rt_id, self.i2ip, i2.id, eni2.id)))

        # Now all IPs for a route have failed
        d = vpc.get_vpc_overview(con, self.new_vpc.id, "ap-southeast-2")
        self.lc.clear()
        vpc.process_route_spec_config(con, d, route_spec,
                                      [self.i1ip, self.i2ip], [])
        self.lc.check(
            ('root', 'DEBUG', 'Route spec processing. Failed IPs: %s,%s' %
             (self.i1ip, self.i2ip)),
            ('root', 'INFO',
             "--- eni in route in RT 'rtb-84dc7f2c' can't be found: "
             "10.1.0.0/16 -> (none) (instance '%s')" % i2.id),
            ('root', 'WARNING',
             '--- cannot find available target for route update 10.1.0.0/16! '
             'Nothing I can do...'))

        # Add new route, remove old one
        route_spec = {u"10.2.0.0/16": [self.i1ip]}

        d = vpc.get_vpc_overview(con, self.new_vpc.id, "ap-southeast-2")
        d['cluster_node_subnets'] = \
                        vpc.make_cluster_node_subnet_list(d, route_spec)
        self.lc.clear()
        vpc.process_route_spec_config(con, d, route_spec, [], [])
        self.lc.check(
            ('root', 'DEBUG', 'Route spec processing. No failed IPs.'),
            ('root', 'INFO',
             "--- eni in route in RT 'rtb-84dc7f2c' can't be found: "
             "10.1.0.0/16 -> (none) (instance '%s')" % i2.id),
            ('root', 'INFO', "--- route not in spec, deleting in RT '%s': "
             "10.1.0.0/16 -> ... ((unknown), (unknown))" % rt_id),
            ('root', 'INFO', "--- adding route in RT '%s' "
             "10.2.0.0/16 -> %s (%s, %s)" %
             (rt_id, self.i1ip, i1.id, eni1.id)))

        # Protect old route (ignore_routes), add new route, watch the old route
        # NOT disappear.
        CURRENT_STATE.ignore_routes.append("10.2.0.0/16")  # protected route
        route_spec = {u"10.3.0.0/16": [self.i1ip]}

        d = vpc.get_vpc_overview(con, self.new_vpc.id, "ap-southeast-2")
        d['cluster_node_subnets'] = \
                        vpc.make_cluster_node_subnet_list(d, route_spec)
        self.lc.clear()
        vpc.process_route_spec_config(con, d, route_spec, [], [])
        # See in the logs that 10.2.0.0/16 wasn't deleted, even though it's not
        # in the route spec anymore.
        self.lc.check(
            ('root', 'DEBUG', 'Route spec processing. No failed IPs.'),
            ('root', 'INFO', "--- adding route in RT '%s' "
             "10.3.0.0/16 -> %s (%s, %s)" %
             (rt_id, self.i1ip, i1.id, eni1.id)))

    @mock_ec2_deprecated
    def test_add_new_route(self):
        con, d, i1, eni1, i2, eni2, rt_id = self._prepare_mock_env()
        route_spec = {"10.9.0.0/16": [self.i1ip]}
        d['cluster_node_subnets'] = \
                        vpc.make_cluster_node_subnet_list(d, route_spec)

        self.lc.clear()
        vpc._add_new_route("10.9.0.0/16", self.i1ip, d, con, rt_id)
        self.lc.check(('root', 'INFO', "--- adding route in RT '%s' "
                       "10.9.0.0/16 -> %s (%s, %s)" %
                       (rt_id, self.i1ip, i1.id, eni1.id)))

        self.lc.clear()
        vpc._add_new_route("10.9.0.0/16", "99.99.99.99", d, con, rt_id)
        self.lc.check(
            ('root', 'ERROR', "*** failed to add route in RT '%s' "
             "10.9.0.0/16 -> 99.99.99.99 (Could not find instance/eni "
             "for '99.99.99.99' in VPC '%s'.)" % (rt_id, self.new_vpc.id)))

    @mock_ec2_deprecated
    def test_update_route(self):
        con, d, i1, eni1, i2, eni2, rt_id = self._prepare_mock_env()

        route_spec = {"10.9.0.0/16": [self.i1ip]}
        d['cluster_node_subnets'] = \
                        vpc.make_cluster_node_subnet_list(d, route_spec)

        vpc._add_new_route("10.9.0.0/16", self.i1ip, d, con, rt_id)

        self.lc.clear()
        route_spec = {"10.9.0.0/16": [self.i2ip]}
        d['cluster_node_subnets'] = \
                        vpc.make_cluster_node_subnet_list(d, route_spec)
        vpc._update_route("10.9.0.0/16", self.i2ip, self.i1ip, d, con, rt_id,
                          "foobar")
        self.lc.check(
            ('root', 'INFO', "--- updating existing route in RT '%s' "
             "10.9.0.0/16 -> %s (%s, %s) "
             "(old IP: %s, reason: foobar)" %
             (rt_id, self.i2ip, i2.id, eni2.id, self.i1ip)))

        self.lc.clear()
        vpc._update_route("10.9.0.0/16", "9.9.9.9", self.i2ip, d, con, rt_id,
                          "foobar")
        self.lc.check(
            ('root', 'ERROR', "*** failed to update route in RT '%s' "
             "10.9.0.0/16 -> %s (Could not find instance/eni "
             "for '9.9.9.9' in VPC '%s'.)" %
             (rt_id, self.i2ip, self.new_vpc.id)))

        # Trying to update a non-existent route
        self.lc.clear()
        vpc._update_route("10.9.9.9/16", self.i1ip, self.i2ip, d, con, rt_id,
                          "foobar")
        self.lc.check(
            ('root', 'INFO',
             "--- updating existing route in RT '%s' 10.9.9.9/16 -> %s "
             "(%s, %s) (old IP: %s, reason: foobar)" %
             (rt_id, self.i1ip, i1.id, eni1.id, self.i2ip)),
            ('root', 'ERROR',
             "*** failed to update route in RT '%s' 10.9.9.9/16 -> %s "
             "(replace_route failed: u'%s~10.9.9.9/16')" %
             (rt_id, self.i2ip, rt_id)))

    @mock_ec2_deprecated
    def test_get_real_instance_if_mismatched(self):
        con, d, i1, eni1, i2, eni2, rt_id = self._prepare_mock_env()

        self.assertFalse(vpc._get_real_instance_if_mismatch(d, None, i1, eni1))
        ret = vpc._get_real_instance_if_mismatch(d, self.i1ip, i1, eni1)
        self.assertFalse(ret)

        for inst, eni in [(i2, eni2), (i1, eni2), (i2, eni1), (i1, None),
                          (None, eni1), (i2, None), (None, eni2),
                          (None, None)]:
            ret = vpc._get_real_instance_if_mismatch(d, self.i1ip, inst, eni)
            self.assertEqual(ret.id, i1.id)

    @mock_ec2_deprecated
    def test_get_host_for_route(self):
        con, d, i1, eni1, i2, eni2, rt_id = self._prepare_mock_env()

        vpc._add_new_route("10.9.0.0/16", self.i1ip, d, con, rt_id)

        rt = d['route_tables'][0]
        self.assertEqual(rt.id, rt_id)

        route = rt.routes[0]
        # Moto doesn't maintain intance or interface ID in the routes
        # correctly, so need to set this one manually
        route.instance_id = i1.id
        route.interface_id = eni1.id

        # Find correct host for route (the passed in cidr is only used for
        # logging)
        self.assertEqual((i1.id, self.i1ip, eni1.id),
                         vpc._get_host_for_route(d, route, rt, "cidr-log"))

        # Look for broken route without an instance id
        route.instance_id = None
        self.lc.clear()
        self.assertEqual(('(unknown)', None, '(unknown)'),
                         vpc._get_host_for_route(d, route, rt, "cidr-log"))
        self.lc.check(
            ('root', 'INFO', "--- obsoleted route in RT '%s' cidr-log -> "
             "... (doesn't point to instance anymore)" % rt_id))

        # Look for broken route with instance id for non-existent instance
        route.instance_id = "blah"
        self.lc.clear()
        self.assertEqual(('(unknown)', None, '(unknown)'),
                         vpc._get_host_for_route(d, route, rt, "cidr-log"))
        self.lc.check(('root', 'INFO',
                       "--- instance in route in RT '%s' can't be found: "
                       "cidr-log -> ... (instance 'blah')" % rt_id))

    @mock_ec2_deprecated
    def test_update_existing_routes(self):
        con, d, i1, eni1, i2, eni2, rt_id = self._prepare_mock_env()

        route_spec = {u"10.0.0.0/16": [self.i1ip]}

        d['cluster_node_subnets'] = \
                        vpc.make_cluster_node_subnet_list(d, route_spec)
        vpc._add_new_route("10.0.0.0/16", self.i1ip, d, con, rt_id)

        routes_in_rts = {}

        # Test that a protected route doesn't get updated
        self.lc.clear()
        CURRENT_STATE.ignore_routes = ["10.0.0.0/8"]
        vpc._update_existing_routes(route_spec, [], [], d, con, routes_in_rts)
        self.assertTrue(rt_id in CURRENT_STATE.vpc_state['route_tables'])
        self.assertTrue(
            "10.0.0.0/16" in CURRENT_STATE.vpc_state['route_tables'][rt_id])
        self.assertTrue("Ignored: Protected CIDR" in CURRENT_STATE.
                        vpc_state['route_tables'][rt_id]["10.0.0.0/16"])
        self.lc.check()

        # Now we un-protect the route and try again. Moto doesn't manage the
        # instance or interface ID in routes, so this will fail, because the
        # route doesn't look like it's pointing to an instance
        CURRENT_STATE.ignore_routes = []
        vpc._update_existing_routes(route_spec, [], [], d, con, routes_in_rts)
        self.assertTrue("Ignored: Not a route to an instance" in CURRENT_STATE.
                        vpc_state['route_tables'][rt_id]["10.0.0.0/16"])
        self.lc.check()

        # Now we manually set the instance and eni id in the route, so that the
        # test can proceed.
        rt = d['route_tables'][0]
        self.assertEqual(rt.id, rt_id)

        route = rt.routes[0]
        # Moto doesn't maintain instance or interface ID in the routes
        # correctly, so need to set this one manually. This time the route spec
        # won't contain eligible hosts.
        route.instance_id = i1.id
        route.interface_id = eni1.id
        self.lc.clear()
        route_spec = {u"10.0.0.0/16": []}
        vpc._update_existing_routes(route_spec, [], [], d, con, routes_in_rts)
        self.lc.check(
            ('root', 'INFO',
             "--- route not in spec, deleting in RT '%s': 10.0.0.0/16 -> "
             "... (%s, %s)" % (rt_id, i1.id, eni1.id)))

        # Get a refresh, since deleting via Boto interface doesn't update the
        # cached vpc-info
        d = vpc.get_vpc_overview(con, self.new_vpc.id, "ap-southeast-2")
        d['cluster_node_subnets'] = \
                        vpc.make_cluster_node_subnet_list(d, route_spec)
        # There shouldn't be any routes left now
        rt = d['route_tables'][0]
        self.assertFalse(rt.routes)

        # Now try again, but with proper route spec. First we need to create
        # the route again and manually...
        route_spec = {u"10.0.0.0/16": [self.i2ip]}
        d['cluster_node_subnets'] = \
                        vpc.make_cluster_node_subnet_list(d, route_spec)
        vpc._add_new_route("10.0.0.0/16", self.i1ip, d, con, rt_id)
        # ... and update our cached vpc info
        d = vpc.get_vpc_overview(con, self.new_vpc.id, "ap-southeast-2")
        d['cluster_node_subnets'] = \
                        vpc.make_cluster_node_subnet_list(d, route_spec)
        rt = d['route_tables'][0]
        route = rt.routes[0]
        route.instance_id = i1.id
        route.interface_id = eni1.id

        # Only IP for spec is in failed IPs, can't do anything
        self.lc.clear()
        vpc._update_existing_routes(route_spec, [self.i2ip], [], d, con,
                                    routes_in_rts)
        self.lc.check(('root', 'WARNING',
                       '--- cannot find available target for route update '
                       '10.0.0.0/16! Nothing I can do...'))

        # Now with available IPs
        self.lc.clear()
        vpc._update_existing_routes(route_spec, [], [], d, con, routes_in_rts)
        self.lc.check(
            ('root', 'INFO',
             "--- updating existing route in RT '%s' 10.0.0.0/16 -> "
             "%s (%s, %s) (old IP: %s, reason: old IP failed/questionable "
             "or not eligible anymore)" %
             (rt_id, self.i2ip, i2.id, eni2.id, self.i1ip)))

        # Now with same route spec again
        d = vpc.get_vpc_overview(con, self.new_vpc.id, "ap-southeast-2")
        rt = d['route_tables'][0]
        route = rt.routes[0]
        route.instance_id = i2.id
        route.interface_id = eni2.id
        self.lc.clear()
        routes_in_rts = {}
        vpc._update_existing_routes(route_spec, [], [], d, con, routes_in_rts)
        self.lc.check(('root', 'INFO',
                       "--- route exists already in RT '%s': 10.0.0.0/16 -> "
                       "%s (%s, %s)" % (rt_id, self.i2ip, i2.id, eni2.id)))

    @mock_ec2_deprecated
    def test_add_missing_routes(self):
        con, d, i1, eni1, i2, eni2, rt_id = self._prepare_mock_env()

        route_spec = {u"10.0.0.0/16": [self.i1ip]}
        routes_in_rts = {}
        self.lc.clear()
        vpc._update_existing_routes(route_spec, [], [], d, con, routes_in_rts)
        self.lc.check()

        self.lc.clear()
        d['cluster_node_subnets'] = \
                        vpc.make_cluster_node_subnet_list(d, route_spec)
        vpc._add_missing_routes(route_spec, [], [], {}, d, con, routes_in_rts)
        self.lc.check(
            ('root', 'INFO', "--- adding route in RT '%s' 10.0.0.0/16 -> "
             "%s (%s, %s)" % (rt_id, self.i1ip, i1.id, eni1.id)))

        # The route exists already (passed in routes_in_rts), so no new route
        # should be created here.
        self.lc.clear()
        vpc._add_missing_routes(route_spec, [], [], {"10.0.0.0/16": self.i1ip},
                                d, con, {rt_id: ["10.0.0.0/16"]})
        self.lc.check()

        # Force a route creation by passing nothing for routes_in_rts and
        # passing in a 'previous' choice for the router
        self.lc.clear()
        vpc._add_missing_routes(route_spec, [], [], {"10.0.0.0/16": self.i1ip},
                                d, con, {rt_id: []})
        self.lc.check(
            ('root', 'INFO', "--- adding route in RT '%s' 10.0.0.0/16 -> "
             "%s (%s, %s)" % (rt_id, self.i1ip, i1.id, eni1.id)))

        # Now try the same with the only possible IP in failed IPs.
        self.lc.clear()
        vpc._add_missing_routes(route_spec, [self.i1ip], [], {}, d, con,
                                {rt_id: []})
        self.lc.check(('root', 'WARNING',
                       '--- cannot find available target for route addition '
                       '10.0.0.0/16! Nothing I can do...'))

    @mock_ec2_deprecated
    def test_multi_address(self):
        # Testing that we can find interfaces, which have the specified IP on a
        # second, private IP address
        con, d, i1, eni1, i2, eni2, rt_id = self._prepare_mock_env()

        priv = eni1.private_ip_addresses[0]

        priv = boto.ec2.networkinterface.PrivateIPAddress(
            private_ip_address="10.9.9.9", primary=False)
        eni1.private_ip_addresses.append(priv)
        vpc._make_ip_subnet_lookup(d)

        self.lc.clear()
        route_spec = {"10.0.0.0/16": ["10.9.9.9"]}
        d['cluster_node_subnets'] = \
                        vpc.make_cluster_node_subnet_list(d, route_spec)
        vpc._add_missing_routes(route_spec, [], [], {}, d, con, {rt_id: []})
        self.lc.check(('root', 'INFO',
                       "--- adding route in RT '%s' 10.0.0.0/16 -> 10.9.9.9 "
                       "(%s, %s)" % (rt_id, i1.id, eni1.id)))

    @mock_ec2_deprecated
    def test_handle_spec(self):
        self.make_mock_vpc()

        # Need to take a peek inside the VPC so we can properly evaluate the
        # output later on
        con = vpc.connect_to_region("ap-southeast-2")
        d = vpc.get_vpc_overview(con, self.new_vpc.id, "ap-southeast-2")
        route_spec = {u"10.2.0.0/16": [self.i1ip]}
        d['cluster_node_subnets'] = \
                        vpc.make_cluster_node_subnet_list(d, route_spec)
        i, eni = vpc.find_instance_and_eni_by_ip(d, self.i1ip)

        rt_id = d['route_tables'][0].id

        con.associate_route_table(route_table_id=rt_id,
                                  subnet_id=self.new_subnet_a.id)
        con.associate_route_table(route_table_id=rt_id,
                                  subnet_id=self.new_subnet_b.id)

        # Test handle_spec
        vid = self.new_vpc.id
        self.lc.clear()
        vpc.handle_spec("ap-southeast-2", vid, route_spec, [], [])
        self.lc.check(
            ('root', 'DEBUG', 'Handle route spec'),
            ('root', 'DEBUG', "Connecting to AWS region 'ap-southeast-2'"),
            ('root', 'DEBUG', "Retrieving information for VPC '%s'" % vid),
            ('root', 'DEBUG', 'Route spec processing. No failed IPs.'),
            ('root', 'INFO',
             "--- adding route in RT '%s' 10.2.0.0/16 -> %s (%s, %s)" %
             (rt_id, self.i1ip, self.i1.id, eni.id)))

        # mock the get_instance_private_ip_from_route() function in vpc. Reason
        # being: The boto mocking library (moto) doesn't handle ENIs in routes
        # correctly. Therefore, a match against the information we get from the
        # routes will never work. So, we provide a wrapper, which fills the
        # instance's ENI information into the route. This means that this
        # function now will always match. It's good for testing the 'match'
        # part of the code.
        old_func = vpc.get_instance_private_ip_from_route

        def my_get_instance_private_ip_from_route(instance, route):
            route.interface_id = instance.interfaces[0].id
            return old_func(instance, route)

        vpc.get_instance_private_ip_from_route = \
                                my_get_instance_private_ip_from_route
        self.lc.clear()
        vpc.handle_spec("ap-southeast-2", vid, route_spec, [], [])

        vpc.get_instance_private_ip_from_route = old_func

        self.lc.check(
            ('root', 'DEBUG', 'Handle route spec'),
            ('root', 'DEBUG', "Connecting to AWS region 'ap-southeast-2'"),
            ('root', 'DEBUG', "Retrieving information for VPC '%s'" % vid),
            ('root', 'DEBUG', 'Route spec processing. No failed IPs.'),
            ('root', 'INFO',
             "--- route exists already in RT '%s': 10.2.0.0/16 -> "
             "%s (%s, %s)" % (rt_id, self.i1ip, self.i1.id, eni.id)))
class TestMetrics(unittest2.TestCase):
    def setUp(self):
        self.logs = LogCapture()

    def tearDown(self):
        self.logs.uninstall()

    def test_service_metrics(self):
        stub_service = Service(name="stub", path="/stub")

        @stub_service.get()
        @metrics_timer("view_time")
        def stub_view(request):
            request.metrics["stub"] = "stub-a-dub-dub"
            return {}

        with pyramid.testing.testConfig() as config:
            config.include("cornice")
            config.include("mozsvc")
            register_service_views(config, stub_service)
            app = TestApp(config.make_wsgi_app())
            res = app.get("/stub")
            self.assertEquals(res.body, "{}")

        self.assertTrue(len(self.logs.records), 1)
        r = self.logs.records[0]
        self.assertEquals(r.stub, "stub-a-dub-dub")
        self.assertTrue(0 < r.request_time < 0.1)
        self.assertTrue(0 < r.view_time <= r.request_time)

    def test_timing_decorator(self):
        @metrics_timer("timer1")
        def doit1():
            time.sleep(0.01)

        def viewit(request):
            doit1()

        request = Request.blank("/")
        initialize_request_metrics(request)
        with pyramid.testing.testConfig(request=request):
            viewit(request)

        ts = request.metrics["timer1"]
        self.assertTrue(0.01 < ts < 0.1)

    def test_timing_contextmanager(self):
        def viewit(request):
            with metrics_timer("timer1"):
                time.sleep(0.01)

        request = Request.blank("/")
        initialize_request_metrics(request)
        with pyramid.testing.testConfig(request=request):
            viewit(request)

        ts = request.metrics["timer1"]
        self.assertTrue(0.01 < ts < 0.1)

    def test_timing_contextmanager_with_explicit_request_object(self):
        def viewit(request):
            with metrics_timer("timer1", request):
                time.sleep(0.01)

        request = Request.blank("/")
        initialize_request_metrics(request)
        viewit(request)

        ts = request.metrics["timer1"]
        self.assertTrue(0.01 < ts < 0.1)

    def test_timing_contextmanager_doesnt_fail_if_no_metrics_dict(self):
        def viewit(request):
            with metrics_timer("timer1"):
                time.sleep(0.01)

        request = Request.blank("/")
        with pyramid.testing.testConfig(request=request):
            viewit(request)

        self.assertFalse(hasattr(request, "metrics"))

    def test_timing_contextmanager_doesnt_fail_if_no_reqest_object(self):
        with metrics_timer("timer1"):
            time.sleep(0.01)

    def test_that_service_metrics_include_correct_response_codes(self):
        stub_service = Service(name="stub", path="/{what}")

        @stub_service.get()
        def stub_view(request):
            what = request.matchdict["what"]
            if what == "ok":
                return Response(status=200)
            if what == "notfound":
                return Response(status=404)
            if what == "forbidden":
                return Response(status=403)
            if what == "exc_forbidden":
                raise HTTPForbidden
            if what == "impl_forbidden":
                request.response.status_code = 403
                return ""
            raise HTTPNotFound

        with pyramid.testing.testConfig() as config:
            config.include("cornice")
            config.include("mozsvc")
            register_service_views(config, stub_service)
            app = TestApp(config.make_wsgi_app())

            app.get("/ok", status=200)
            r = self.logs.records[-1]
            self.assertEquals(r.code, 200)

            app.get("/notfound", status=404)
            r = self.logs.records[-1]
            self.assertEquals(r.code, 404)
            app.get("/forbidden", status=403)
            r = self.logs.records[-1]
            self.assertEquals(r.code, 403)

            app.get("/exc_notfound", status=404)
            r = self.logs.records[-1]
            self.assertEquals(r.code, 404)
            app.get("/exc_forbidden", status=403)
            r = self.logs.records[-1]
            self.assertEquals(r.code, 403)

            app.get("/impl_forbidden", status=403)
            r = self.logs.records[-1]
            self.assertEquals(r.code, 403)
Ejemplo n.º 45
0
 def test_clear_global_state(self):
     from logging import _handlers, _handlerList
     capture = LogCapture()
     capture.uninstall()
     self.assertFalse(capture in _handlers)
     self.assertFalse(capture in _handlerList)
Ejemplo n.º 46
0
    def test_get_cpuinfo_node(self, mock_shell):
        """
        It verfies that given a testbed it is possible to get the cpuinfo
        information of the node.
        """

        l = LogCapture() # we cature the logger

        # When the testbed is local
        testbed = Testbed("name1",
                            True,
                            Testbed.slurm_category,
                            Testbed.protocol_local,
                            "user@server",
                            ['slurm'])

        node_1 = Node() # We add some nodes to Testbed_1
        node_1.name = "node_1"
        node_1.information_retrieved = True
        node_2 = Node()
        node_2.name = "node_2"
        node_2.information_retrieved = True
        testbed.nodes = [ node_1, node_2]
        node_1.disabled = True
        node_3 = Node()
        node_3.name = "node_3"
        node_3.information_retrieved = True

        # When the node does not belong to the testbed it should return empty list
        cpus = parser.get_cpuinfo_node(testbed, node_3)

        self.assertEquals(0, len(cpus))

        # When the node is there, we have to get double CPU info
        mock_shell.return_value = self.command_output

        cpus = parser.get_cpuinfo_node(testbed, node_2)

        self.assertEquals(8, len(cpus))
        mock_shell.assert_called_with(command="ssh",
                                      params=["node_2", "'cat", "/proc/cpuinfo'"])
        self.assertEqual(mock_shell.call_count, 1)

        # When the node is dissabled it should return an empty list
        cpus = parser.get_cpuinfo_node(testbed, node_1)

        self.assertEquals(0, len(cpus))
        self.assertEqual(mock_shell.call_count, 1)

        # When the testbed is using ssh protocol
        testbed = Testbed("name1",
                            True,
                            Testbed.slurm_category,
                            Testbed.protocol_ssh,
                            "user@server",
                            ['slurm'])
        testbed.nodes = [node_2]

        mock_shell.return_value = self.command_output

        cpus = parser.get_cpuinfo_node(testbed, node_2)

        self.assertEquals(8, len(cpus))
        mock_shell.assert_called_with(command="ssh",
                                      server=testbed.endpoint,
                                      params=["node_2", "'cat", "/proc/cpuinfo'"])
        self.assertEqual(mock_shell.call_count, 2)

        # We simulate what happens if we get an exception executing the command
        error = subprocess.CalledProcessError(returncode=255, cmd="ls")
        mock_shell.side_effect = error

        cpus = parser.get_cpuinfo_node(testbed, node_2)

        self.assertEquals(0, len(cpus))
        self.assertEqual(mock_shell.call_count, 3)

        # When the testbed has an unknown protocol
        testbed = Testbed("name1",
                            True,
                            Testbed.slurm_category,
                            "xxx",
                            "user@server",
                            ['slurm'])
        testbed.nodes = [node_2]

        cpus = parser.get_cpuinfo_node(testbed, node_2)

        self.assertEquals(0, len(cpus))
        self.assertEqual(mock_shell.call_count, 3)

        # We verify that we raised the right errors
        # Checking that we are logging the correct messages
        l.check(
            ('root', 'ERROR', 'Exception trying to get the node cpu info'),
            ('root', 'INFO', 'Tesbed protocol: xxx not supported to get node information')
            )
        l.uninstall() # We uninstall the capture of the logger
Ejemplo n.º 47
0
class TestJsonLogFormatter(TestCase):

    def setUp(self):
        self.handler = LogCapture()
        self.logger_name = "TestingTestPilot"
        self.formatter = JsonLogFormatter(logger_name=self.logger_name)

    def tearDown(self):
        self.handler.uninstall()

    def _fetchLastLog(self):
        self.assertEquals(len(self.handler.records), 1)
        details = json.loads(self.formatter.format(self.handler.records[0]))
        jsonschema.validate(details, JSON_LOGGING_SCHEMA)
        return details

    def test_basic_operation(self):
        """Ensure log formatter contains all the expected fields and values"""
        message_text = "simple test"
        logging.debug(message_text)
        details = self._fetchLastLog()

        expected_present = ["Timestamp", "Hostname"]
        for key in expected_present:
            self.assertTrue(key in details)

        expected_meta = {
            "Severity": 7,
            "Type": "root",
            "Pid": os.getpid(),
            "Logger": self.logger_name,
            "EnvVersion": self.formatter.LOGGING_FORMAT_VERSION
        }
        for key, value in expected_meta.items():
            self.assertEquals(value, details[key])

        self.assertEquals(details['Fields']['message'], message_text)

    def test_custom_paramters(self):
        """Ensure log formatter can handle custom parameters"""
        logger = logging.getLogger("mozsvc.test.test_logging")
        logger.warn("custom test %s", "one", extra={"more": "stuff"})
        details = self._fetchLastLog()

        self.assertEquals(details["Type"], "mozsvc.test.test_logging")
        self.assertEquals(details["Severity"], 4)

        fields = details['Fields']
        self.assertEquals(fields["message"], "custom test one")
        self.assertEquals(fields["more"], "stuff")

    def test_logging_error_tracebacks(self):
        """Ensure log formatter includes exception traceback information"""
        try:
            raise ValueError("\n")
        except Exception:
            logging.exception("there was an error")
        details = self._fetchLastLog()

        expected_meta = {
            "Severity": 3,
        }
        for key, value in expected_meta.items():
            self.assertEquals(value, details[key])

        fields = details['Fields']
        expected_fields = {
            'message': 'there was an error',
            'error': "ValueError('\\n',)"
        }
        for key, value in expected_fields.items():
            self.assertEquals(value, fields[key])

        self.assertTrue(fields['traceback'].startswith('Uncaught exception:'))
        self.assertTrue("<class 'ValueError'>" in fields['traceback'])
Ejemplo n.º 48
0
class Test(unittest.TestCase):

    def setUp(self):
        from testfixtures import LogCapture
        self.log_capture = LogCapture()
        self.log_group  = 'phidgeter.analog'
        self.lvl = 'DEBUG'

    def tearDown(self):
        self.log_capture.uninstall()

    def test_log_captures(self):
        # verification of log matching functionality
        from logging import getLogger
        getLogger().info('a message')
        self.log_capture.check(('root', 'INFO', 'a message'))

    def test_zero_enable(self):
        phd_analog = AnalogOut()
        result = phd_analog.zero_disable()
        self.assertTrue(result, "Successfully turned off")

        result = phd_analog.zero_enable()
        self.assertTrue(result, "Successfully turned off")

        result = phd_analog.zero_toggle()
        self.assertTrue(result, "Successfully toggled")

    def test_two_disable(self):
        phd_analog = AnalogOut()
        result = phd_analog.two_disable()
        self.assertTrue(result, "Successfully turned off")

    def test_open_phidget(self):
        """ Apparently, LogCapture is setup to compare the entire log
         entries at once. So go through all of the operations, then
         check the total log output at the end.
        """
        phd = AnalogOut()
        self.assertTrue(phd.zero_enable())

        gr = self.log_group
        self.log_capture.check(
            (gr, "DEBUG", "Start of phidgeter with serial: None"),
            (gr, "DEBUG", "Attempting to open phidget"),
            (gr, "DEBUG", "Attempt to open first found"),
            (gr, "DEBUG", "Wait for attach 10300ms"),
            (gr, "INFO", "Opened phidget"),
            (gr, "DEBUG", "Attempting to close phidget"),
            (gr, "INFO", "Closed phidget")
            )

    def test_phidget_by_serial(self):
        # Get the serial number of the phidget from the usb descriptor
        serial = self.find_serial()

        # connect to that phidget precisely
        phd = AnalogOut(serial)
        self.assertTrue(phd.zero_enable())

    def test_two_trigger(self):
        phd = AnalogOut()
        self.assertTrue(phd.two_toggle())

    def find_serial(self):
        """ On linux only, use pyusb to enumerate all devices connected
        to the bus, return the string from the usb descriptor, which is
        the device serial number.
        """
        import platform
        if platform.system() != "Linux":
            return self.find_windows_serial()

        print "Finding serial number using pyusb"
        import usb
        for bus in usb.busses():
            devices = bus.devices
            for dev in devices:
                if dev.idVendor == 0x06c2:
                    print "  idVendor:",hex(dev.idVendor)
                    print "  idProduct:",hex(dev.idProduct)
                    ld = dev.open()
                    local_serial = ld.getString(dev.iSerialNumber, 256)
                    return local_serial

        raise ValueError("Can't find phidget (linux)")

    def find_windows_serial(self):
        """ On windows, the phidget appears as an HID, and will not be
        enumerated by pyusb. Use the windows management information command to
        find ther deviceid, which has the serial number at the end.
        """

        from subprocess import Popen, PIPE

        print "Finding serial number using wmic"

        # Be careful how quotres are used in windows .bat files.
        wmic_cmd = "wmic path CIM_LogicalDevice where " + \
                   "\'DeviceID like \"%%VID_06C2%%\"\' get /value"

        # Can call popen directly, if you can figure out the escaping
        bat_out = open("wmic_cmd.bat", "w")
        bat_out.write(wmic_cmd)
        bat_out.close()

        sp = Popen("wmic_cmd.bat", stdin=PIPE, stdout=PIPE, stderr=PIPE)
        stdout, stderr = sp.communicate()

        # on the command line stdout is the return code, stderr is the
        # response of the wmic command. In popen land it is reversed.
        # Mogrify the string to be the end of the deviceid line, and
        # nothing after
        for line in stdout.split("\r"):
            if "DeviceID=USB" in line:
                local_serial = line.split("\\")[-1]
                local_serial = local_serial.split("\r")[0]
                return local_serial

        return "serial not found"
Ejemplo n.º 49
0
class TestPanoptesPluginWithEnrichmentRunner(TestPanoptesPluginRunner):
    @patch('redis.StrictRedis', panoptes_mock_redis_strict_client)
    @patch('kazoo.client.KazooClient', panoptes_mock_kazoo_client)
    def setUp(self):

        self.my_dir, self.panoptes_test_conf_file = get_test_conf_file()
        self._panoptes_resource = PanoptesResource(
            resource_site="test",
            resource_class="test",
            resource_subclass="test",
            resource_type="test",
            resource_id="test",
            resource_endpoint="test",
            resource_creation_timestamp=_TIMESTAMP,
            resource_plugin="test")

        self._panoptes_context = PanoptesContext(
            self.panoptes_test_conf_file,
            key_value_store_class_list=[
                PanoptesTestKeyValueStore, PanoptesResourcesKeyValueStore,
                PanoptesPollingPluginKeyValueStore, PanoptesSecretsStore,
                PanoptesPollingPluginAgentKeyValueStore,
                PanoptesDiscoveryPluginAgentKeyValueStore,
                PanoptesDiscoveryPluginKeyValueStore
            ],
            create_message_producer=False,
            async_message_producer=False,
            create_zookeeper_client=True)
        self._runner_class = PanoptesPluginWithEnrichmentRunner

        self._log_capture = LogCapture(
            attributes=TestPanoptesPluginRunner.extract)

    def tearDown(self):
        self._log_capture.uninstall()

    def test_basic_operations(self):
        # Test where enrichment is None
        mock_panoptes_enrichment_cache = Mock(return_value=None)
        with patch(
                'yahoo_panoptes.framework.plugins.runner.PanoptesEnrichmentCache',
                mock_panoptes_enrichment_cache):
            runner = self._runner_class(
                "Test Polling Plugin", "polling", PanoptesPollingPlugin,
                PanoptesPluginInfo, self._panoptes_resource,
                self._panoptes_context, PanoptesTestKeyValueStore,
                PanoptesTestKeyValueStore, PanoptesTestKeyValueStore,
                "plugin_logger", PanoptesMetricsGroupSet, _callback)
            runner.execute_plugin()

            self._log_capture.check_present((
                'panoptes.tests.test_runner', 'ERROR',
                '[Test Polling Plugin] [plugin|test|site|test|class|test|subclass|test|'
                'type|test|id|test|endpoint|test] '
                'Could not setup context for plugin:'),
                                            order_matters=False)
            self._log_capture.uninstall()

        self._log_capture = LogCapture(
            attributes=TestPanoptesPluginRunner.extract)
        # Test with enrichment
        runner = self._runner_class(
            "Test Polling Plugin", "polling", PanoptesPollingPlugin,
            PanoptesPluginInfo, self._panoptes_resource,
            self._panoptes_context, PanoptesTestKeyValueStore,
            PanoptesTestKeyValueStore, PanoptesTestKeyValueStore,
            "plugin_logger", PanoptesMetricsGroupSet, _callback)
        runner.execute_plugin()

        self._log_capture.check_present(
            ('panoptes.tests.test_runner', 'INFO',
             'Attempting to execute plugin "Test Polling Plugin"'),
            ('panoptes.tests.test_runner', 'DEBUG', 'Found 3 plugins'),
            ('panoptes.tests.test_runner', 'DEBUG', 'Loaded plugin '
             '"Test Polling Plugin", version "0.1" of type "polling"'
             ', category "polling"'),
            ('panoptes.tests.test_runner', 'DEBUG',
             'Loaded plugin "Test Polling Plugin 2", '
             'version "0.1" of type "polling", category "polling"'),
            ('panoptes.tests.test_runner', 'DEBUG',
             'Loaded plugin "Test Polling Plugin Second Instance", '
             'version "0.1" of type "polling", category "polling"'),
            ('panoptes.tests.test_runner', 'INFO',
             '[Test Polling Plugin] [plugin|test|site|test|class|test|subclass|test|'
             'type|test|id|test|endpoint|test] Attempting to get lock for plugin '
             '"Test Polling Plugin"'),
            ('panoptes.tests.test_runner', 'DEBUG',
             'Attempting to get lock for plugin "Test Polling Plugin", with lock path and '
             'identifier in seconds'),
            ('panoptes.tests.test_runner', 'INFO',
             '[Test Polling Plugin] [plugin|test|site|test|class|test|subclass|test|'
             'type|test|id|test|endpoint|test] Acquired lock'),
            ('panoptes.tests.test_runner', 'INFO',
             '[Test Polling Plugin] [plugin|test|site|test|class|test|subclass|test|'
             'type|test|id|test|endpoint|test]'
             ' Ran in seconds'),
            ('panoptes.tests.test_runner', 'INFO',
             '[Test Polling Plugin] [plugin|test|site|test|class|test|subclass|test|'
             'type|test|id|test|endpoint|test] Released lock'),
            ('panoptes.tests.test_runner', 'INFO',
             '[Test Polling Plugin] [plugin|test|site|test|class|test|subclass|test|'
             'type|test|id|test|endpoint|test] Plugin returned'
             ' a result set with 1 members'),
            ('panoptes.tests.test_runner', 'INFO',
             '[Test Polling Plugin] [plugin|test|site|test|class|test|subclass|test|'
             'type|test|id|test|endpoint|test]'
             ' Callback function ran in seconds'),
            ('panoptes.tests.test_runner', 'INFO',
             '[Test Polling Plugin] [plugin|test|site|test|class|test|subclass|test|type|'
             'test|id|test|endpoint|test] GC took seconds. There are garbage objects.'
             ),
            ('panoptes.tests.test_runner', 'ERROR',
             'No enrichment data found on KV store for plugin Test Polling Plugin '
             'resource test namespace test using key test'),
            ('panoptes.tests.test_runner', 'DEBUG',
             'Successfully created PanoptesEnrichmentCache enrichment_data {} for plugin '
             'Test Polling Plugin'),
            order_matters=False)

    def test_callback_failure(self):
        runner = self._runner_class(
            "Test Polling Plugin", "polling", PanoptesPollingPlugin,
            PanoptesPluginInfo, self._panoptes_resource,
            self._panoptes_context, PanoptesTestKeyValueStore,
            PanoptesTestKeyValueStore, PanoptesTestKeyValueStore,
            "plugin_logger", PanoptesMetricsGroupSet, _callback_with_exception)
        runner.execute_plugin()

        self._log_capture.check_present((
            'panoptes.tests.test_runner', 'ERROR', '[Test Polling Plugin] '
            '[plugin|test|site|test|class|test|subclass|test|'
            'type|test|id|test|endpoint|test] Results callback function failed: :'
        ))

    # 'pass' is needed for these methods because the only difference in their logging output from
    # TestPanoptesPluginRunner is the presence of the PanoptesResource in some log messages.
    def test_lock_no_lock_object(self):
        pass

    def test_lock_is_none(self):
        pass

    def test_lock_is_not_locked(self):
        pass

    def test_plugin_failure(self):
        pass

    def test_plugin_wrong_result_type(self):
        runner = self._runner_class("Test Polling Plugin 2", "polling",
                                    PanoptesPollingPlugin, PanoptesPluginInfo,
                                    None, self._panoptes_context,
                                    PanoptesTestKeyValueStore,
                                    PanoptesTestKeyValueStore,
                                    PanoptesTestKeyValueStore, "plugin_logger",
                                    PanoptesMetric, _callback)
        runner.execute_plugin()

        self._log_capture.check_present((
            'panoptes.tests.test_runner', 'ERROR',
            '[Test Polling Plugin 2] [None] Could not setup context for plugin:'
        ))
Ejemplo n.º 50
0
    def test_update_cpu_node_information(self, mock_parser):
        """
        Test that the correct work of this function
        """
        l = LogCapture()  # we cature the logger

        # We store some data in the db for the test.
        testbed, node_1, node_2 = self._create_initial_db_data()

        node_3 = Node()
        node_3.name = "node_3"
        node_3.information_retrieved = True
        testbed.nodes.append(node_3)

        node_3.cpus = [
            CPU("Intel", "Xeon", "x86_64", "e6333", "2600Mhz", True, 2,
                "cache", "111")
        ]

        # So, testbed has 3 nodes, one disabled and the other ones enabled
        db.session.commit()

        cpus_result = [
            CPU("Intel2", "Xeon2", "x86_64", "e6333", "2600Mhz", True, 2,
                "cache", "111"),
            CPU("Intel3", "Xeon3", "x86_64", "e6333", "2600Mhz", True, 2,
                "cache", "111")
        ]

        mock_parser.return_value = cpus_result

        slurm.update_cpu_node_information()

        # We verify the results
        self.assertEquals(
            0, len(db.session.query(CPU).filter_by(vendor_id="Intel").all()))
        self.assertEquals(
            1, len(db.session.query(CPU).filter_by(vendor_id="Intel2").all()))
        self.assertEquals(
            1, len(db.session.query(CPU).filter_by(vendor_id="Intel3").all()))

        calls = [mock.call(testbed, node_2), mock.call(testbed, node_3)]
        mock_parser.assert_has_calls(calls)
        self.assertEquals(2, mock_parser.call_count)

        # In case an error occours retrieving the information
        mock_parser.return_value = []

        slurm.update_cpu_node_information()

        calls = [mock.call(testbed, node_2), mock.call(testbed, node_3)]
        mock_parser.assert_has_calls(calls)
        self.assertEquals(4, mock_parser.call_count)

        # Checking that we are logging the correct messages
        l.check(('root', 'INFO', 'Updating CPU info for node: node_2'),
                ('root', 'INFO', 'Updating CPU info for node: node_3'),
                ('root', 'ERROR',
                 'Impossible to update CPU info for node: node_2'),
                ('root', 'ERROR',
                 'Impossible to update CPU info for node: node_3'))
        l.uninstall()  # We uninstall the capture of the logger
class TestComponentParsing(unittest.TestCase):
    def setUp(self):
        self.l = LogCapture()
        
    def tearDown(self):
        self.l.uninstall()
    
    def test_that_empty_dictionary_is_returned_if_no_components_found(self):
        op = ComponentParser()
        
        result = op.components
        
        assert_that(result, is_({}))
    
    def test_that_parse_requires_an_outer_components_element(self):
        root = ET.Element('component')  #Note the lack of a final 's'
        op = ComponentParser()
        
        op.parse(root)
        result = op.components
        
        assert_that(result, is_({}))
        
    
    def test_that_parse_generates_a_component_if_xml_contains_component_element(self):
        root = ET.Element('components')
        ET.SubElement(root, 'component', {'name':"don't care"})
        op = ComponentParser()
        
        op.parse(root)
        result = op.components
        
        assert_that(result, is_not([]))
        assert_that(type(result), is_(type(Component)))
    
    def test_that_parse_generates_two_component_if_xml_contains_two_component_elements(self):
        root = ET.Element('components')
        name1 = "first"
        name2 = "second"
        ET.SubElement(root, 'component', {'name':name1})
        ET.SubElement(root, 'component', {'name':name2})
        op = ComponentParser()
        
        op.parse(root)
        result = op.components
        
        assert_that(len(result), is_(2))
        assert_that(type(result[name1]), is_(type(Component)))
        assert_that(type(result[name2]), is_(type(Component)))
                
    def test_that_parse_generates_empty_dictionary_if_xml_does_not_contain_components_element(self):
        root = ET.Element('model')
        op = ComponentParser()
        
        op.parse(root)
        result = op.components
        
        assert_that(result, is_({}))
        
    
    def test_that_parse_generates_a_component_with_attributes_if_specified(self):
        root = ET.Element('components')
        component_name = 'example'
        ET.SubElement(root, 'component', { 'name':component_name, 'random':'abc', 'random2':'123'})
        op = ComponentParser()
        
        op.parse(root)
        result = op.components[component_name]
        
        assert_that(result.attributes, is_not(None))
        assert_that(result.attributes, has_entry('name', 'example'))
        assert_that(result.attributes, has_entry('random', 'abc'))
        assert_that(result.attributes, has_entry('random2', '123'))
        
    def test_that_parse_sets_component_names_as_specified(self):
        name = 'sample component'
        root = ET.Element('components')
        ET.SubElement(root, 'component', {'name':name})
        op = ComponentParser()
        
        op.parse(root)
        result = op.components[name]
        
        assert_that(result.name, is_(name))

    def test_that_parse_generates_a_component_including_category_if_specified_with_name(self):
        category = 'publication'
        name = 'sample component'
        root = _component_tree_with_category(category, name)
        op = ComponentParser()
        
        op.parse(root)
        result = op.components[name]
        
        assert_that(result.category_names, is_not(None))
        assert_that(result.category_names, has_item(category))
        
    def test_that_parse_generates_a_component_including_two_categories_if_two_specified(self):
        root = ET.Element('components')
        component_name = "example"
        sample_component = ET.SubElement(root, 'component', {'name':component_name})
        name1 = "publication"
        name2 = "enquiry"
        ET.SubElement(sample_component, 'category', {'name':name1})
        ET.SubElement(sample_component, 'category', {'name':name2})
        op = ComponentParser()
        
        op.parse(root)
        result = op.components[component_name]
        
        assert_that(result.category_names, is_not(None))
        assert_that(result.category_names, has_item(name1))
        assert_that(result.category_names, has_item(name2))
        
    @unittest.expectedFailure
    def test_that_parse_raises_if_category_has_no_name(self):
        '''Allow categories with no name in xml file, just means the element will be skipped'''        
        root = ET.Element('components')
        sample_component = ET.SubElement(root, 'component')
        ET.SubElement(sample_component, 'category')
        op = ComponentParser()
        
        self.assertRaises(AttributeError, op.parse)
        
    def test_that_parse_skip_element_and_generates_warning_if_component_has_no_name(self):
        root = ET.Element('components')
        sample_component = ET.SubElement(root, 'component')
        ET.SubElement(sample_component, 'category')
        op = ComponentParser()
        
        op.parse(root)
        result = op.components
        
        assert_that(result, is_({}))
        self.l.check(('root', 'WARNING', "Ignoring component definition with no name"))
        
Ejemplo n.º 52
0
class DownloadPaymentsTest(TestCase):

    test_settings = {
        'DOWNLOADER':
        'django_pain.tests.commands.test_download_payments.DummyStatementDownloader',
        'PARSER':
        'django_pain.tests.commands.test_download_payments.DummyStatementParser',
        'DOWNLOADER_PARAMS': {
            'base_url': 'https://bank.test',
            'password': '******'
        }
    }  # type: Mapping[str, Any]

    def setUp(self):
        account = BankAccount(account_number='1234567890/2010', currency='CZK')
        account.save()
        self.account = account
        self.log_handler = LogCapture(
            'django_pain.management.commands.download_payments',
            propagate=False)

    def tearDown(self):
        self.log_handler.uninstall()

    @override_settings(PAIN_DOWNLOADERS={'test': test_settings})
    def test_import_payments(self):
        out = StringIO()
        call_command('download_payments',
                     '--no-color',
                     '--verbosity=3',
                     stdout=out)

        self.assertQuerysetEqual(BankPayment.objects.values_list(
            'identifier',
            'account',
            'counter_account_number',
            'transaction_date',
            'amount',
            'amount_currency',
            'variable_symbol',
        ), [
            ('PAYMENT_1', self.account.pk, '098765/4321', date(
                2020, 9, 15), Decimal('42.00'), 'CZK', '1234'),
            ('PAYMENT_2', self.account.pk, '098765/4321', date(
                2020, 9, 17), Decimal('370.00'), 'CZK', ''),
        ],
                                 transform=tuple,
                                 ordered=False)

        self.log_handler.check(
            ('django_pain.management.commands.download_payments', 'INFO',
             'Command download_payments started.'),
            ('django_pain.management.commands.download_payments', 'INFO',
             'Processing: test'),
            ('django_pain.management.commands.download_payments', 'DEBUG',
             'Downloading payments for test.'),
            ('django_pain.management.commands.download_payments', 'DEBUG',
             'Parsing payments for test.'),
            ('django_pain.management.commands.download_payments', 'DEBUG',
             'Saving payments for test.'),
            ('django_pain.management.commands.download_payments', 'INFO',
             'Command download_payments finished.'))

    @freeze_time("2020-01-09T23:30")
    @patch(
        'django_pain.tests.commands.test_download_payments.DummyStatementDownloader.get_statement'
    )
    @override_settings(PAIN_DOWNLOADERS={'test': test_settings})
    def test_default_parameters(self, mock_method):
        with override_settings(USE_TZ=False):
            call_command('download_payments', '--no-color')
        mock_method.assert_called_with(date(year=2020, month=1, day=2),
                                       date(year=2020, month=1, day=9))

        with override_settings(USE_TZ=True, TIME_ZONE='Europe/Prague'):
            call_command('download_payments', '--no-color')
        mock_method.assert_called_with(date(year=2020, month=1, day=3),
                                       date(year=2020, month=1, day=10))

    @patch(
        'django_pain.tests.commands.test_download_payments.DummyStatementDownloader.get_statement'
    )
    @override_settings(PAIN_DOWNLOADERS={'test': test_settings})
    def test_parameters(self, mock_method):
        call_command('download_payments', '--no-color', '--start',
                     '2020-01-01', '--end', '2020-01-21')

        end_date = date(2020, 1, 21)
        start_date = date(2020, 1, 1)
        mock_method.assert_called_with(start_date, end_date)

    @override_settings(PAIN_DOWNLOADERS={'test': test_settings})
    def test_invalid_start(self):
        with self.assertRaisesRegex(
                CommandError,
                'Error: argument -s/--start: invalid parse_date_safe value'):
            call_command('download_payments', '--no-color', '--start', 'abc')

    @override_settings(PAIN_DOWNLOADERS={'test': test_settings})
    def test_invalid_end(self):
        with self.assertRaisesRegex(
                CommandError,
                'Error: argument -e/--end: invalid parse_date_safe value'):
            call_command('download_payments', '--no-color', '--end', 'abc')

    @override_settings(PAIN_DOWNLOADERS={'test': test_settings})
    def test_invalid_time_interval(self):
        with self.assertRaisesRegex(
                CommandError,
                'Start date has to be lower or equal to the end date'):
            call_command('download_payments', '--no-color', '--start',
                         '2020-09-02', '--end', '2020-09-01')

    @patch(
        'django_pain.tests.commands.test_download_payments.DummyStatementDownloader.__init__'
    )
    @override_settings(PAIN_DOWNLOADERS={'test': test_settings})
    def test_downloader_init_error(self, mock_method):
        mock_method.side_effect = ValueError
        call_command('download_payments', '--no-color')
        self.log_handler.check(
            ('django_pain.management.commands.download_payments', 'INFO',
             'Command download_payments started.'),
            ('django_pain.management.commands.download_payments', 'INFO',
             'Processing: test'),
            ('django_pain.management.commands.download_payments', 'ERROR',
             'Could not init Downloader for test.'),
            ('django_pain.management.commands.download_payments', 'INFO',
             'Command download_payments finished.'))

    @patch(
        'django_pain.tests.commands.test_download_payments.DummyStatementDownloader.get_statement'
    )
    @override_settings(PAIN_DOWNLOADERS={'test': test_settings})
    def test_download_error(self, mock_method):
        mock_method.side_effect = TellerDownloadError
        call_command('download_payments', '--no-color')
        self.log_handler.check(
            ('django_pain.management.commands.download_payments', 'INFO',
             'Command download_payments started.'),
            ('django_pain.management.commands.download_payments', 'INFO',
             'Processing: test'),
            ('django_pain.management.commands.download_payments', 'DEBUG',
             'Downloading payments for test.'),
            ('django_pain.management.commands.download_payments', 'ERROR',
             'Downloading payments for test failed.'),
            ('django_pain.management.commands.download_payments', 'INFO',
             'Command download_payments finished.'))

    @patch(
        'django_pain.tests.commands.test_download_payments.DummyStatementParser.parse_string'
    )
    @override_settings(PAIN_DOWNLOADERS={'test': test_settings})
    def test_parser_error(self, mock_method):
        mock_method.side_effect = ValueError('Something went wrong.')
        call_command('download_payments', '--no-color')
        self.log_handler.check(
            ('django_pain.management.commands.download_payments', 'INFO',
             'Command download_payments started.'),
            ('django_pain.management.commands.download_payments', 'INFO',
             'Processing: test'),
            ('django_pain.management.commands.download_payments', 'DEBUG',
             'Downloading payments for test.'),
            ('django_pain.management.commands.download_payments', 'DEBUG',
             'Parsing payments for test.'),
            ('django_pain.management.commands.download_payments', 'ERROR',
             'Something went wrong.'),
            ('django_pain.management.commands.download_payments', 'INFO',
             'Command download_payments finished.'))

    @patch(
        'django_pain.tests.commands.test_download_payments.DummyStatementParser.parse_string'
    )
    @override_settings(PAIN_DOWNLOADERS={'test': test_settings})
    def test_invalid_account(self, mock_method):
        statement = BankStatement('11111/11')
        mock_method.return_value = statement
        with self.assertRaisesRegex(CommandError,
                                    'Bank account 11111/11 does not exist'):
            call_command('download_payments', '--no-color')

    @override_settings(PAIN_DOWNLOADERS={'test': test_settings})
    def test_payment_already_exist(self):
        out = StringIO()
        err = StringIO()
        call_command('download_payments',
                     '--no-color',
                     '--verbosity=3',
                     stdout=out)
        call_command('download_payments',
                     '--no-color',
                     '--verbosity=3',
                     stdout=out,
                     stderr=err)

        self.assertEqual(err.getvalue(), '')
        self.log_handler.check(
            ('django_pain.management.commands.download_payments', 'INFO',
             'Command download_payments started.'),
            ('django_pain.management.commands.download_payments', 'INFO',
             'Processing: test'),
            ('django_pain.management.commands.download_payments', 'DEBUG',
             'Downloading payments for test.'),
            ('django_pain.management.commands.download_payments', 'DEBUG',
             'Parsing payments for test.'),
            ('django_pain.management.commands.download_payments', 'DEBUG',
             'Saving payments for test.'),
            ('django_pain.management.commands.download_payments', 'INFO',
             'Command download_payments finished.'),
            ('django_pain.management.commands.download_payments', 'INFO',
             'Command download_payments started.'),
            ('django_pain.management.commands.download_payments', 'INFO',
             'Processing: test'),
            ('django_pain.management.commands.download_payments', 'DEBUG',
             'Downloading payments for test.'),
            ('django_pain.management.commands.download_payments', 'DEBUG',
             'Parsing payments for test.'),
            ('django_pain.management.commands.download_payments', 'DEBUG',
             'Saving payments for test.'),
            ('django_pain.management.commands.download_payments', 'INFO',
             'Payment ID PAYMENT_1 already exists - skipping.'),
            ('django_pain.management.commands.download_payments', 'INFO',
             'Payment ID PAYMENT_2 already exists - skipping.'),
            ('django_pain.management.commands.download_payments', 'INFO',
             'Skipped 2 payments.'),
            ('django_pain.management.commands.download_payments', 'INFO',
             'Command download_payments finished.'))

    @override_settings(PAIN_DOWNLOADERS={'test': test_settings})
    def test_quiet_command(self):
        out = StringIO()
        err = StringIO()
        call_command('download_payments',
                     '--no-color',
                     '--verbosity=0',
                     stdout=out)
        call_command('download_payments',
                     '--no-color',
                     '--verbosity=0',
                     stderr=err)

        self.assertEqual(out.getvalue(), '')
        self.assertEqual(err.getvalue(), '')

    @override_settings(
        PAIN_IMPORT_CALLBACKS=[
            'django_pain.import_callbacks.skip_credit_card_transaction_summary'
        ],
        PAIN_DOWNLOADERS={
            'test': {
                **test_settings, 'PARSER':
                'django_pain.tests.commands.test_download_payments.DummyCreditCardSummaryParser'
            }
        })  # noqa
    def test_import_callback_exception(self):
        out = StringIO()
        err = StringIO()
        call_command('download_payments',
                     '--no-color',
                     '--verbosity=3',
                     stdout=out,
                     stderr=err)

        self.assertEqual(out.getvalue().strip(), '')
        self.assertEqual(err.getvalue().strip().split('\n'), [
            'Payment ID PAYMENT_3 has not been saved due to the following errors:',
            'Payment is credit card transaction summary.',
        ])
        self.assertEqual(BankPayment.objects.count(), 0)
        self.log_handler.check(
            ('django_pain.management.commands.download_payments', 'INFO',
             'Command download_payments started.'),
            ('django_pain.management.commands.download_payments', 'INFO',
             'Processing: test'),
            ('django_pain.management.commands.download_payments', 'DEBUG',
             'Downloading payments for test.'),
            ('django_pain.management.commands.download_payments', 'DEBUG',
             'Parsing payments for test.'),
            ('django_pain.management.commands.download_payments', 'DEBUG',
             'Saving payments for test.'),
            ('django_pain.management.commands.download_payments', 'WARNING',
             'Payment ID PAYMENT_3 has not been saved due to the following errors:'
             ), ('django_pain.management.commands.download_payments',
                 'WARNING', 'Payment is credit card transaction summary.'),
            ('django_pain.management.commands.download_payments', 'INFO',
             'Skipped 1 payments.'),
            ('django_pain.management.commands.download_payments', 'INFO',
             'Command download_payments finished.'))
Ejemplo n.º 53
0
class DjangoHookboxTest(TestCase):

    def _cb_all(self, op, user, channel = '-', payload = None):
        if channel in self.all_calls:
            self.all_calls[channel] += 1
        else:
            self.all_calls[channel] = 1
        return None

    def _cb_create(self, op, user, channel = None):
        if channel in self.create_calls:
            self.create_calls[channel] += 1
        else:
            self.create_calls[channel] = 1

        if channel == '/a/':
            return {
                'history_size': 2,
                'reflective':   False,
                'presenceful':  False,
                'moderated':    True,
            }
        elif channel == '/b/':
            return 'denied'
        elif channel == '/c/':
            return [False, {'msg': 'also denied'}]
        else:
            return None

    def setUp(self):
        self.all_calls = {}
        self.create_calls = {}

        # HACK: don't allow other apps to mess with us or vice versa...
        self.old_cbs = djhookbox.views._callbacks
        djhookbox.views._callbacks = []
        djhookbox.whcallback(self._cb_all)
        djhookbox.whcallback('create')(self._cb_create)

        User.objects.create_user('a', '*****@*****.**', 'a').save()

        self.logcap = LogCapture()

    def tearDown(self):
        djhookbox.views._callbacks = self.old_cbs
        self.logcap.uninstall()

    @server
    def test_create(self):
        self.assertRaises(djhookbox.HookboxError,
            djhookbox.publish, '/a/', json.dumps({'foo': 'bar'}))

        djhookbox.create('/a/')
        djhookbox.publish('/a/', json.dumps({'foo': 'bar'}))

        # TODO: Test send_hook works
        # TODO: Confirm it actually did something

    @server
    def test_web_api_token(self):
        secret = djhookbox.apitoken
        try:
            djhookbox.apitoken += '...not!'
            self.assertRaises(djhookbox.HookboxError,
                              djhookbox.publish, '/a/', json.dumps({'foo': 'bar'}))
            self.assertCreateCalls({})
        finally:
            djhookbox.apitoken = secret

    def test_webhook_secret(self):
        self.client.login(username = '******', password = '******')
        response = self.client.post(connect_url, {
            'channel_name': 'a',
            'secret':       djhookbox.views.secret,
        })
        self.assertSuccess(response)

        response = self.client.post(connect_url, {
            'channel_name': 'a',
        })
        data = self.decode(response)
        self.assertFalse(data[0], 'webhook secret verification should have failed (forgotton to set settings.HOOKBOX_WEBHOOK_SECRET?)')

        response = self.client.post(connect_url, {
            'channel_name': 'a',
            'secret':       djhookbox.views.secret + '...not!',
        })
        data = self.decode(response)
        self.assertFalse(data[0], 'webhook secret verification should have failed')

    def test_signals(self):
        class Listener(object):
            def __call__(self, *args, **kwargs):
                self.signal = kwargs.get('signal')
                self.sender = kwargs.get('sender').username
                self.kwargs = kwargs

        def doTest(which, params = dict(), **checks):
            listener = Listener()
            djhookbox.views.signals[which].connect(listener)

            self.client.login(username = '******', password = '******')
            params['secret'] = djhookbox.views.secret
            response = self.client.post(reverse('hookbox_%s' % which), params)

            self.assertSuccess(response)
            self.assertEquals(listener.sender, 'a')
            for (key, value) in checks.iteritems():
                self.assertEquals(listener.kwargs.get(key), value)

            self.client.logout()
            djhookbox.views.signals[which].disconnect(listener)

        doTest('connect')
        doTest('disconnect')
        doTest('subscribe', {'channel_name': 'b'}, channel = 'b')
        doTest('unsubscribe', {'channel_name': 'b'}, channel = 'b')

    def test_all_cbs(self):
        self.client.login(username = '******', password = '******')
        params = {
            'secret': djhookbox.views.secret,
            'channel_name': 'a',
        }

        response = self.client.post(connect_url, params)
        self.assertSuccess(response)
        self.assertAllCalls({'-': 1})

        response = self.client.post(reverse('hookbox_subscribe'), params)
        self.assertSuccess(response)
        self.assertAllCalls({'-': 1, 'a': 1})

        response = self.client.post(reverse('hookbox_publish'), {
            'secret': djhookbox.views.secret,
            'channel_name': 'a',
            'payload': json.dumps(["Hello world"]),
        })
        self.assertSuccess(response)
        self.assertAllCalls({'-': 1, 'a': 2})

        response = self.client.post(reverse('hookbox_destroy_channel'), params)
        self.assertSuccess(response)
        self.assertAllCalls({'-': 1, 'a': 3})

        response = self.client.post(reverse('hookbox_disconnect'), params)
        self.assertSuccess(response)
        self.assertAllCalls({'-': 2, 'a': 3})

    def test_warn_multiple_results(self):

        @djhookbox.whcallback
        def _cb_1(op, user, channel = '-'):
            return [True, {}]

        @djhookbox.whcallback
        def _cb_2(op, user, channel = '-'):
            return [True, {}]

        params = {'secret': djhookbox.views.secret}

        logging.getLogger('djhookbox').setLevel(logging.WARNING)

        response = self.client.post(connect_url, params)
        self.assertSuccess(response)
        self.assertAllCalls({'-': 1})

        response = self.client.post(reverse('hookbox_disconnect'), params)
        self.assertSuccess(response)
        self.assertAllCalls({'-': 2})

        self.logcap.check(
            ('djhookbox', 'WARNING', 'multiple results returned from connect callback'),
            ('djhookbox', 'WARNING', 'multiple results returned from disconnect callback'),
        )

    def test_explicit_deny(self):
        response = self.client.post(reverse('hookbox_create_channel'), {
            'secret': djhookbox.views.secret,
            'channel_name': '/b/',
        })

        data = self.decode(response)
        self.assertEquals(data[0], False, 'unexpected success')
        self.assertEquals(data[1], {'msg': 'denied'})
        self.assertAllCalls({'/b/': 1})

        response = self.client.post(reverse('hookbox_create_channel'), {
            'secret': djhookbox.views.secret,
            'channel_name': '/c/',
        })

        data = self.decode(response)
        self.assertEquals(data[0], False, 'unexpected success')
        self.assertEquals(data[1], {'msg': 'also denied'})
        self.assertAllCalls({'/b/': 1, '/c/': 1})

    def test_callback_error(self):

        @djhookbox.whcallback
        def _cb_1(op, user, channel = '-'):
            raise Exception('something bad')

        response = self.client.post(reverse('hookbox_create_channel'), {
            'secret': djhookbox.views.secret,
            'channel_name': '/a/',
        })

        data = self.decode(response)
        self.assertEquals(data[0], False, 'unexpected success')
        self.assertEquals(data[1], {'msg': 'something bad'})
        self.assertAllCalls({'/a/': 1})

    def decode(self, response):
        self.assertEquals(response.status_code, 200)
        self.assert_(('Content-Type', 'application/json') in response.items())

        result = json.loads(response.content)
        self.assert_(isinstance(result, list), 'unexpected result returned from server: %s' % str(result))
        self.assertEquals(len(result), 2)
        self.assert_(isinstance(result[0], bool), 'unexpected result returned from server: %s' % str(result))
        self.assert_(isinstance(result[1], dict), 'unexpected result returned from server: %s' % str(result))
        return result

    def assertSuccess(self, response):
        data = self.decode(response)
        
        if not data[0] and 'msg' in data[1]:
            self.fail(data[1]['msg'])
        else:
            self.assert_(data[0])

    def assertAllCalls(self, calls):
        self.assertEquals(self.all_calls, calls)

    def assertCreateCalls(self, calls):
        self.assertEquals(self.create_calls, calls)
Ejemplo n.º 54
0
class TestControlNodeSerial(unittest.TestCase):
    def setUp(self):
        self.popen_patcher = mock.patch(
            'gateway_code.utils.subprocess_timeout.Popen')
        popen_class = self.popen_patcher.start()
        self.popen = popen_class.return_value

        self.popen.terminate.side_effect = self._terminate
        self.popen.poll.return_value = None

        self.readline_ret_vals = queue.Queue(0)
        self.popen.stderr.readline.side_effect = self.readline_ret_vals.get
        self.readline_ret_vals.put(b'cn_serial_ready\n')

        self.cn = cn_interface.ControlNodeSerial('tty')
        self.log_error = LogCapture('gateway_code', level=logging.WARNING)

    def tearDown(self):
        self.cn.stop()
        mock.patch.stopall()
        self.log_error.uninstall()

    def _terminate(self):
        self.readline_ret_vals.put(b'')

    def test_normal_start_stop(self):
        ret_start = self.cn.start()
        self.assertEqual(0, ret_start)
        self.assertTrue(self.popen.stderr.readline.called)

        self.cn.stop()
        self.assertTrue(self.popen.terminate.called)
        self.assertTrue(self.readline_ret_vals.empty())

    def test_start_error_in_cn_serial(self):

        # poll should return an error
        self.popen.poll.return_value = 2

        ret_start = self.cn.start()
        self.assertNotEqual(0, ret_start)
        self.log_error.check(
            ('gateway_code', 'ERROR',
             'Control node serial reader thread ended prematurely'))
        self.cn.stop()

    def test_stop_before_start(self):
        self.cn.stop()

    def test_stop_with_cn_interface_allready_stopped(self):

        # Simulate cn_interface stopped
        self.readline_ret_vals.put(b'')
        self.popen.stdin.write.side_effect = IOError()
        self.popen.terminate.side_effect = OSError()

        self.cn.start()

        # try sending command
        ret = self.cn.send_command(['test', 'cmd'])
        self.assertEqual(None, ret)
        self.log_error.check(('gateway_code', 'ERROR',
                              'control_node_serial process is terminated'))

        self.log_error.clear()
        self.cn.stop()
        self.log_error.check(('gateway_code', 'ERROR',
                              'Control node process already terminated'))

    def test_stop_terminate_failed(self):
        """Stop cn_interface but terminate does not stop it."""
        # terminate does not stop process
        self.popen.terminate.side_effect = None
        timeout_expired = cn_interface.subprocess_timeout.TimeoutExpired
        self.popen.wait.side_effect = timeout_expired('cn_serial_interface', 3)
        # kill does it
        self.popen.kill.side_effect = self._terminate

        self.cn.start()
        self.cn.stop()

        self.assertTrue(self.popen.kill.called)
        self.log_error.check(('gateway_code', 'WARNING',
                              'Control node serial not terminated, kill it'))

# Test command sending

    def test_send_command(self):
        self.popen.stdin.write.side_effect = \
            (lambda *x: self.readline_ret_vals.put(b'start ACK\n'))

        self.cn.start()
        ret = self.cn.send_command(['start', 'DC'])
        self.assertEqual(['start', 'ACK'], ret)
        self.cn.stop()

    def test_send_command_no_answer(self):
        self.cn.start()
        ret = self.cn.send_command(['start', 'DC'])
        self.assertIsNone(ret)
        self.cn.stop()

    def test_send_command_cn_interface_stoped(self):
        ret = self.cn.send_command(['lala'])
        self.assertIsNone(ret)

    def test_answer_and_answer_with_queue_full(self):
        # get two answers without sending command
        self.readline_ret_vals.put(b'set ACK\n')
        self.readline_ret_vals.put(b'start ACK\n')

        self.cn.start()
        self.cn.stop()

        self.log_error.check(
            ('gateway_code', 'ERROR',
             f'Control node answer queue full: {["start", "ACK"]}'))

# _cn_interface_args

    def test__cn_interface_args(self):
        args = self.cn._cn_interface_args()
        self.assertIn(self.cn.tty, args)
        self.assertNotIn('-c', args)
        self.assertNotIn('-d', args)

        # OML config
        args = self.cn._cn_interface_args('<omlc></omlc>')
        self.assertIn('-c', args)
        self.assertNotIn('-d', args)
        self.cn._oml_cfg_file.close()

        # Debug mode
        self.cn.measures_debug = (lambda x: None)
        args = self.cn._cn_interface_args()
        self.assertNotIn('-c', args)
        self.assertIn('-d', args)


# _config_oml coverage tests

    def test_empty_config_oml(self):
        # No experiment description
        ret = self.cn._oml_config_file(None)
        self.assertIsNone(ret)

    @mock.patch(utils.READ_CONFIG, utils.read_config_mock('m3'))
    def test_config_oml(self):
        oml_xml_cfg = '''<omlc id='{node_id}' exp_id='{exp_id}'>\n</omlc>'''
        self.cn.start(oml_xml_cfg)
        self.assertIsNotNone(self.cn._oml_cfg_file)

        self.cn.stop()

    def test_oml_xml_config(self):
        exp_files = {
            'consumption': '/tmp/consumption',
            'radio': '/tmp/radio',
            'event': '/tmp/event',
            'sniffer': '/tmp/sniffer',
            'log': '/tmp/log',
        }

        oml_xml_cfg = self.cn.oml_xml_config('m3-1', '1234', exp_files)
        self.assertIsNotNone(oml_xml_cfg)
        self.assertTrue(oml_xml_cfg.startswith('<omlc'))

        # No output if none or empty
        oml_xml_cfg = self.cn.oml_xml_config('m3-1', '1234', None)
        self.assertIsNone(oml_xml_cfg)
        oml_xml_cfg = self.cn.oml_xml_config('m3-1', '1234', {})
        self.assertIsNone(oml_xml_cfg)
Ejemplo n.º 55
0
class SearchResultsViewTests(unittest.TestCase):
    def setUp(self):
        self.log = LogCapture()
        cleanUp()
        testing.registerDummyRenderer('opencore.views:templates/generic_layout.pt')
        testing.registerDummyRenderer(
            'opencore.views:templates/community_layout.pt')

    def tearDown(self):
        self.log.uninstall()
        cleanUp()

    def _callFUT(self, context, request):
        from opencore.views.search import SearchResultsView
        from opencore.views.api import get_template_api
        request.api = get_template_api(context, request)
        view = SearchResultsView(context, request)
        view.type_to_result_dict[DummyContent] = 'test-content'
        return view()

    def test_no_searchterm(self):
        from webob.multidict import MultiDict
        context = testing.DummyModel()
        request = testing.DummyRequest(params=MultiDict())
        from opencore.models.interfaces import ICatalogSearch
        testing.registerAdapter(DummyEmptySearch, (Interface),
                                ICatalogSearch)
        result = self._callFUT(context, request)
        #self.assertEqual(result.status, '404 Not Found')

    def test_bad_kind(self):
        from webob.multidict import MultiDict
        context = testing.DummyModel()
        request = testing.DummyRequest(
            params=MultiDict({'kind':'unknown', 'body':'yo'}))
        from zope.interface import Interface
        from opencore.models.interfaces import ICatalogSearch
        from webob.exc import HTTPBadRequest
        testing.registerAdapter(DummyEmptySearch, (Interface),
                                ICatalogSearch)
        self.assertRaises(HTTPBadRequest, self._callFUT, context, request)

    def test_none_kind(self):
        from webob.multidict import MultiDict
        context = testing.DummyModel()
        request = testing.DummyRequest(params=MultiDict({'body':'yo'}))
        from zope.interface import Interface
        from opencore.models.interfaces import ICatalogSearch
        from repoze.lemonade.testing import registerContentFactory
        registerContentFactory(DummyContent, IDummyContent)
        testing.registerAdapter(DummySearch, (Interface),
                                ICatalogSearch)
        result = self._callFUT(context, request)
        self.assertEqual(result['terms'], ['yo'])
        self.assertEqual(len(result['results']), 1)

    def test_known_kind(self):
        from webob.multidict import MultiDict
        from opencore.models.interfaces import IGroupSearchFactory
        from repoze.lemonade.testing import registerContentFactory
        from zope.interface import Interface
        content = DummyContent()
        def search_factory(*arg, **kw):
            return DummySearchFactory(content)
        testing.registerUtility(
            search_factory, IGroupSearchFactory, name='People')
        context = testing.DummyModel()
        request = testing.DummyRequest(
            params=MultiDict({'body':'yo', 'kind':'People'}))
        from opencore.models.interfaces import ICatalogSearch
        registerContentFactory(DummyContent, IDummyContent)
        testing.registerAdapter(DummySearch, (Interface),
                                ICatalogSearch)
        result = self._callFUT(context, request)
        self.assertEqual(result['terms'], ['yo', 'People'])
        self.assertEqual(len(result['results']), 1)

    def test_community_search(self):
        context = testing.DummyModel()
        context.title = 'Citizens'
        from webob.multidict import MultiDict
        from opencore.models.interfaces import ICommunity
        from zope.interface import directlyProvides
        directlyProvides(context, ICommunity)
        request = testing.DummyRequest(params=MultiDict({'body':'yo'}))
        from zope.interface import Interface
        from opencore.models.interfaces import ICatalogSearch
        from repoze.lemonade.testing import registerContentFactory
        registerContentFactory(DummyContent, IDummyContent)
        testing.registerAdapter(DummySearch, (Interface),
                                ICatalogSearch)
        result = self._callFUT(context, request)
        self.assertEqual(result['community'], 'Citizens')
        self.assertEqual(result['terms'], ['yo'])
        self.assertEqual(len(result['results']), 1)

    def test_parse_error(self):
        from webob.multidict import MultiDict
        context = testing.DummyModel()
        request = testing.DummyRequest(params=MultiDict({'body':'the'}))
        from zope.interface import Interface
        from opencore.models.interfaces import ICatalogSearch
        from repoze.lemonade.testing import registerContentFactory
        registerContentFactory(DummyContent, IDummyContent)
        testing.registerAdapter(ParseErrorSearch, (Interface),
                                ICatalogSearch)
        result = self._callFUT(context, request)
        self.assertEqual(len(result['terms']), 0)
        self.assertEqual(len(result['results']), 0)
        self.assertEqual(result['error'], "Error: 'the' is nonsense")
Ejemplo n.º 56
0
class TestService(unittest.TestCase):

    def get_ini(self):
        return os.path.join(os.path.dirname(__file__),
                            'test_memorynode.ini')

    def setUp(self):
        self.config = testing.setUp()
        settings = {}
        load_into_settings(self.get_ini(), settings)
        self.config.add_settings(settings)
        self.config.include("tokenserver")
        load_and_register("tokenserver", self.config)
        self.backend = self.config.registry.getUtility(INodeAssignment)
        wsgiapp = self.config.make_wsgi_app()
        self.app = TestApp(wsgiapp)
        # Mock out the verifier to return successfully by default.
        self.mock_browserid_verifier_context = self.mock_browserid_verifier()
        self.mock_browserid_verifier_context.__enter__()
        self.mock_oauth_verifier_context = self.mock_oauth_verifier()
        self.mock_oauth_verifier_context.__enter__()
        self.logs = LogCapture()

    def tearDown(self):
        self.logs.uninstall()
        self.mock_oauth_verifier_context.__exit__(None, None, None)
        self.mock_browserid_verifier_context.__exit__(None, None, None)

    def assertExceptionWasLogged(self, msg):
        for r in self.logs.records:
            if r.msg == msg:
                assert r.exc_info is not None
                break
        else:
            assert False, "exception with message %r was not logged" % (msg,)

    def assertMessageWasNotLogged(self, msg):
        for r in self.logs.records:
            if r.msg == msg:
                assert False, "message %r was unexpectedly logged" % (msg,)

    def assertMetricWasLogged(self, key):
        """Check that a metric was logged during the request."""
        for r in self.logs.records:
            if key in r.__dict__:
                break
        else:
            assert False, "metric %r was not logged" % (key,)

    def clearLogs(self):
        del self.logs.records[:]

    def unsafelyParseToken(self, token):
        # For testing purposes, don't check HMAC or anything...
        token = token.encode("utf8")
        return json.loads(decode_token_bytes(token)[:-32].decode("utf8"))

    @contextlib.contextmanager
    def mock_browserid_verifier(self, response=None, exc=None):
        def mock_verify_method(assertion):
            if exc is not None:
                raise exc
            if response is not None:
                return response
            return {
                "status": "okay",
                "email": get_assertion_info(assertion)["principal"]["email"],
            }
        verifier = get_browserid_verifier(self.config.registry)
        orig_verify_method = verifier.__dict__.get("verify", None)
        verifier.__dict__["verify"] = mock_verify_method
        try:
            yield None
        finally:
            if orig_verify_method is None:
                del verifier.__dict__["verify"]
            else:
                verifier.__dict__["verify"] = orig_verify_method

    @contextlib.contextmanager
    def mock_oauth_verifier(self, response=None, exc=None):
        def mock_verify_method(token):
            if exc is not None:
                raise exc
            if response is not None:
                return response
            return {
                "email": token.decode("hex"),
                "idpClaims": {},
            }
        verifier = get_oauth_verifier(self.config.registry)
        orig_verify_method = verifier.__dict__.get("verify", None)
        verifier.__dict__["verify"] = mock_verify_method
        try:
            yield None
        finally:
            if orig_verify_method is None:
                del verifier.__dict__["verify"]
            else:
                verifier.__dict__["verify"] = orig_verify_method

    def _getassertion(self, **kw):
        kw.setdefault('email', '*****@*****.**')
        kw.setdefault('audience', 'http://tokenserver.services.mozilla.com')
        return make_assertion(**kw).encode('ascii')

    def _gettoken(self, email='*****@*****.**'):
        return email.encode('hex')

    def test_unknown_app(self):
        headers = {'Authorization': 'BrowserID %s' % self._getassertion()}
        resp = self.app.get('/1.0/xXx/token', headers=headers, status=404)
        self.assertTrue('errors' in resp.json)

    def test_invalid_client_state(self):
        headers = {'X-Client-State': 'state!'}
        resp = self.app.get('/1.0/sync/1.5', headers=headers, status=400)
        self.assertEquals(resp.json['errors'][0]['location'], 'header')
        self.assertEquals(resp.json['errors'][0]['name'], 'X-Client-State')
        headers = {'X-Client-State': 'foobar\n\r\t'}
        resp = self.app.get('/1.0/sync/1.5', headers=headers, status=400)
        self.assertEquals(resp.json['errors'][0]['location'], 'header')
        self.assertEquals(resp.json['errors'][0]['name'], 'X-Client-State')

    def test_no_auth(self):
        self.app.get('/1.0/sync/1.5', status=401)

    def test_valid_app(self):
        headers = {'Authorization': 'BrowserID %s' % self._getassertion()}
        res = self.app.get('/1.0/sync/1.1', headers=headers)
        self.assertIn('https://example.com/1.1', res.json['api_endpoint'])
        self.assertIn('duration', res.json)
        self.assertEquals(res.json['duration'], 3600)
        self.assertMetricWasLogged('token.assertion.verify_success')
        self.clearLogs()

    def test_unknown_pattern(self):
        # sync 1.5 is defined in the .ini file, but  no pattern exists for it.
        headers = {'Authorization': 'BrowserID %s' % self._getassertion()}
        self.app.get('/1.0/sync/1.5', headers=headers, status=503)

    def test_discovery(self):
        res = self.app.get('/')
        self.assertEqual(res.json, {
            'auth': 'http://localhost',
            'services': {
                'sync': ['1.1', '1.5'],
            },
            'browserid': {
                'allowed_issuers': None,
                'trusted_issuers': None,
            },
            'oauth': {
                'default_issuer': 'api.accounts.firefox.com',
                'scope': 'https://identity.mozilla.com/apps/oldsync',
                'server_url': 'https://oauth.accounts.firefox.com/v1',
            }
        })

    def test_version_returns_404_by_default(self):
        # clear cache
        try:
            del tokenserver.views.version_view.__json__
        except AttributeError:
            pass
        with mock.patch('os.path.exists', return_value=False):
            self.app.get('/__version__', status=404)

    def test_version_returns_file_in_current_folder_if_present(self):
        # clear cache
        try:
            del tokenserver.views.version_view.__json__
        except AttributeError:
            pass
        content = {'version': '0.8.1'}
        fake_file = mock.mock_open(read_data=json.dumps(content))
        with mock.patch('os.path.exists'):
            with mock.patch('tokenserver.views.open', fake_file):
                response = self.app.get('/__version__')
                self.assertEquals(response.json, content)

    def test_lbheartbeat(self):
        res = self.app.get('/__lbheartbeat__')
        self.assertEqual(res.json, {})

    def test_unauthorized_error_status(self):
        # Totally busted auth -> generic error.
        headers = {'Authorization': 'Unsupported-Auth-Scheme IHACKYOU'}
        res = self.app.get('/1.0/sync/1.1', headers=headers, status=401)
        self.assertEqual(res.json['status'], 'error')

        # BrowserID verifier errors
        assertion = self._getassertion()
        headers = {'Authorization': 'BrowserID %s' % assertion}
        # Bad signature -> "invalid-credentials"
        errs = browserid.errors
        with self.mock_browserid_verifier(exc=errs.InvalidSignatureError):
            res = self.app.get('/1.0/sync/1.1', headers=headers, status=401)
        self.assertEqual(res.json['status'], 'invalid-credentials')
        # Bad audience -> "invalid-credentials"
        with self.mock_browserid_verifier(exc=errs.AudienceMismatchError):
            res = self.app.get('/1.0/sync/1.1', headers=headers, status=401)
        self.assertEqual(res.json['status'], 'invalid-credentials')
        self.assertMetricWasLogged('token.assertion.verify_failure')
        self.assertMetricWasLogged('token.assertion.audience_mismatch_error')
        self.clearLogs()
        # Expired timestamp -> "invalid-timestamp"
        with self.mock_browserid_verifier(exc=errs.ExpiredSignatureError):
            res = self.app.get('/1.0/sync/1.1', headers=headers, status=401)
        self.assertEqual(res.json['status'], 'invalid-timestamp')
        self.assertTrue('X-Timestamp' in res.headers)
        self.assertMetricWasLogged('token.assertion.verify_failure')
        self.assertMetricWasLogged('token.assertion.expired_signature_error')
        self.clearLogs()
        # Connection error -> 503
        with self.mock_browserid_verifier(exc=errs.ConnectionError):
            res = self.app.get('/1.0/sync/1.1', headers=headers, status=503)
        self.assertMetricWasLogged('token.assertion.verify_failure')
        self.assertMetricWasLogged('token.assertion.connection_error')
        self.assertExceptionWasLogged('Unexpected verification error')
        self.clearLogs()
        # Some other wacky error -> not captured
        with self.mock_browserid_verifier(exc=ValueError):
            with self.assertRaises(ValueError):
                res = self.app.get('/1.0/sync/1.1', headers=headers)

        # OAuth verifier errors
        token = self._gettoken()
        headers = {'Authorization': 'Bearer %s' % token}
        # Bad token -> "invalid-credentials"
        err = fxa.errors.ClientError({"code": 400, "errno": 108})
        with self.mock_oauth_verifier(exc=err):
            res = self.app.get('/1.0/sync/1.1', headers=headers, status=401)
        self.assertEqual(res.json['status'], 'invalid-credentials')
        self.assertMetricWasLogged('token.oauth.errno.108')
        self.assertMessageWasNotLogged('Unexpected verification error')
        # Untrusted scopes -> "invalid-credentials"
        err = fxa.errors.TrustError({"code": 400, "errno": 999})
        with self.mock_oauth_verifier(exc=err):
            res = self.app.get('/1.0/sync/1.1', headers=headers, status=401)
        self.assertEqual(res.json['status'], 'invalid-credentials')
        self.assertMessageWasNotLogged('Unexpected verification error')
        # Connection error -> 503
        with self.mock_oauth_verifier(exc=errs.ConnectionError):
            res = self.app.get('/1.0/sync/1.1', headers=headers, status=503)
        self.assertMetricWasLogged('token.oauth.verify_failure')
        self.assertMetricWasLogged('token.oauth.connection_error')
        self.assertExceptionWasLogged('Unexpected verification error')
        self.clearLogs()
        # Some other wacky error -> not captured
        with self.mock_oauth_verifier(exc=ValueError):
            with self.assertRaises(ValueError):
                res = self.app.get('/1.0/sync/1.1', headers=headers)

    def test_unverified_token(self):
        headers = {'Authorization': 'BrowserID %s' % self._getassertion()}
        # Assertion should not be rejected if fxa-tokenVerified is unset
        mock_response = {
            "status": "okay",
            "email": "*****@*****.**",
            "idpClaims": {}
        }
        with self.mock_browserid_verifier(response=mock_response):
            self.app.get("/1.0/sync/1.1", headers=headers, status=200)
        # Assertion should not be rejected if fxa-tokenVerified is True
        mock_response['idpClaims']['fxa-tokenVerified'] = True
        with self.mock_browserid_verifier(response=mock_response):
            self.app.get("/1.0/sync/1.1", headers=headers, status=200)
        # Assertion should be rejected if fxa-tokenVerified is False
        mock_response['idpClaims']['fxa-tokenVerified'] = False
        with self.mock_browserid_verifier(response=mock_response):
            res = self.app.get("/1.0/sync/1.1", headers=headers, status=401)
        self.assertEqual(res.json['status'], 'invalid-credentials')

    def test_generation_number_change(self):
        headers = {"Authorization": "BrowserID %s" % self._getassertion()}
        # Start with no generation number.
        mock_response = {"status": "okay", "email": "*****@*****.**"}
        with self.mock_browserid_verifier(response=mock_response):
            res1 = self.app.get("/1.0/sync/1.1", headers=headers)
        # Now send an explicit generation number.
        # The node assignment should not change.
        mock_response["idpClaims"] = {"fxa-generation": 12}
        with self.mock_browserid_verifier(response=mock_response):
            res2 = self.app.get("/1.0/sync/1.1", headers=headers)
        self.assertEqual(res1.json["uid"], res2.json["uid"])
        self.assertEqual(res1.json["api_endpoint"], res2.json["api_endpoint"])
        # Clients that don't report generation number are still allowed.
        del mock_response["idpClaims"]
        with self.mock_browserid_verifier(response=mock_response):
            res2 = self.app.get("/1.0/sync/1.1", headers=headers)
        self.assertEqual(res1.json["uid"], res2.json["uid"])
        mock_response["idpClaims"] = {"some-nonsense": "lolwut"}
        with self.mock_browserid_verifier(response=mock_response):
            res2 = self.app.get("/1.0/sync/1.1", headers=headers)
        self.assertEqual(res1.json["uid"], res2.json["uid"])
        # But previous generation numbers get an invalid-generation response.
        mock_response["idpClaims"] = {"fxa-generation": 10}
        with self.mock_browserid_verifier(response=mock_response):
            res = self.app.get("/1.0/sync/1.1", headers=headers, status=401)
        self.assertEqual(res.json["status"], "invalid-generation")
        # Equal generation numbers are accepted.
        mock_response["idpClaims"] = {"fxa-generation": 12}
        with self.mock_browserid_verifier(response=mock_response):
            res2 = self.app.get("/1.0/sync/1.1", headers=headers)
        self.assertEqual(res1.json["uid"], res2.json["uid"])
        self.assertEqual(res1.json["api_endpoint"], res2.json["api_endpoint"])
        # Later generation numbers are accepted.
        # Again, the node assignment should not change.
        mock_response["idpClaims"] = {"fxa-generation": 13}
        with self.mock_browserid_verifier(response=mock_response):
            res2 = self.app.get("/1.0/sync/1.1", headers=headers)
        self.assertEqual(res1.json["uid"], res2.json["uid"])
        self.assertEqual(res1.json["api_endpoint"], res2.json["api_endpoint"])
        # And that should lock out the previous generation number
        mock_response["idpClaims"] = {"fxa-generation": 12}
        with self.mock_browserid_verifier(response=mock_response):
            res = self.app.get("/1.0/sync/1.1", headers=headers, status=401)
        self.assertEqual(res.json["status"], "invalid-generation")
        # Various nonsense generation numbers should give errors.
        mock_response["idpClaims"] = {"fxa-generation": "whatswrongwithyour"}
        with self.mock_browserid_verifier(response=mock_response):
            res = self.app.get("/1.0/sync/1.1", headers=headers, status=401)
        self.assertEqual(res.json["status"], "invalid-generation")
        mock_response["idpClaims"] = {"fxa-generation": None}
        with self.mock_browserid_verifier(response=mock_response):
            res = self.app.get("/1.0/sync/1.1", headers=headers, status=401)
        self.assertEqual(res.json["status"], "invalid-generation")
        mock_response["idpClaims"] = {"fxa-generation": "42"}
        with self.mock_browserid_verifier(response=mock_response):
            res = self.app.get("/1.0/sync/1.1", headers=headers, status=401)
        self.assertEqual(res.json["status"], "invalid-generation")
        mock_response["idpClaims"] = {"fxa-generation": ["I", "HACK", "YOU"]}
        with self.mock_browserid_verifier(response=mock_response):
            res = self.app.get("/1.0/sync/1.1", headers=headers, status=401)
        self.assertEqual(res.json["status"], "invalid-generation")

    def test_client_state_change(self):
        mock_response = {
            "status": "okay",
            "email": "*****@*****.**",
            "idpClaims": {"fxa-generation": 1234, "fxa-keysChangedAt": 1234},
        }
        # Start with no client-state header.
        headers = {'Authorization': 'BrowserID %s' % self._getassertion()}
        with self.mock_browserid_verifier(response=mock_response):
            res = self.app.get('/1.0/sync/1.1', headers=headers)
        uid0 = res.json['uid']
        # No change == same uid.
        with self.mock_browserid_verifier(response=mock_response):
            res = self.app.get('/1.0/sync/1.1', headers=headers)
        self.assertEqual(res.json['uid'], uid0)
        # Changing client-state header requires changing generation.
        headers['X-Client-State'] = 'aaaa'
        with self.mock_browserid_verifier(response=mock_response):
            res = self.app.get('/1.0/sync/1.1', headers=headers, status=401)
        self.assertEqual(res.json['status'], 'invalid-client-state')
        desc = res.json['errors'][0]['description']
        self.assertTrue(desc.endswith('new value with no generation change'))
        # Changing client-state header requires changing keys_changed_at.
        mock_response["idpClaims"]["fxa-generation"] += 1
        headers['X-Client-State'] = 'aaaa'
        with self.mock_browserid_verifier(response=mock_response):
            res = self.app.get('/1.0/sync/1.1', headers=headers, status=401)
        self.assertEqual(res.json['status'], 'invalid-client-state')
        desc = res.json['errors'][0]['description']
        self.assertTrue(desc.endswith('with no keys_changed_at change'))
        # Change the client-state header, get a new uid.
        mock_response["idpClaims"]["fxa-keysChangedAt"] += 1
        headers['X-Client-State'] = 'aaaa'
        with self.mock_browserid_verifier(response=mock_response):
            res = self.app.get('/1.0/sync/1.1', headers=headers)
        uid1 = res.json['uid']
        self.assertNotEqual(uid1, uid0)
        # No change == same uid.
        with self.mock_browserid_verifier(response=mock_response):
            res = self.app.get('/1.0/sync/1.1', headers=headers)
        self.assertEqual(res.json['uid'], uid1)
        # Send a client-state header, get a new uid.
        headers['X-Client-State'] = 'bbbb'
        mock_response["idpClaims"]["fxa-generation"] += 1
        mock_response["idpClaims"]["fxa-keysChangedAt"] += 1
        with self.mock_browserid_verifier(response=mock_response):
            res = self.app.get('/1.0/sync/1.1', headers=headers)
        uid2 = res.json['uid']
        self.assertNotEqual(uid2, uid0)
        self.assertNotEqual(uid2, uid1)
        # No change == same uid.
        with self.mock_browserid_verifier(response=mock_response):
            res = self.app.get('/1.0/sync/1.1', headers=headers)
        self.assertEqual(res.json['uid'], uid2)
        # Use a previous client-state, get an auth error.
        headers['X-Client-State'] = 'aaaa'
        with self.mock_browserid_verifier(response=mock_response):
            res = self.app.get('/1.0/sync/1.1', headers=headers, status=401)
        self.assertEqual(res.json['status'], 'invalid-client-state')
        desc = res.json['errors'][0]['description']
        self.assertTrue(desc.endswith('stale value'))
        del headers['X-Client-State']
        with self.mock_browserid_verifier(response=mock_response):
            res = self.app.get('/1.0/sync/1.1', headers=headers, status=401)
        self.assertEqual(res.json['status'], 'invalid-client-state')
        headers['X-Client-State'] = 'aaaa'
        mock_response["idpClaims"]["fxa-generation"] += 1
        mock_response["idpClaims"]["fxa-keysChangedAt"] += 1
        with self.mock_browserid_verifier(response=mock_response):
            res = self.app.get('/1.0/sync/1.1', headers=headers, status=401)
        self.assertEqual(res.json['status'], 'invalid-client-state')

    def test_fxa_kid_change(self):
        # Starting off not reporting keys_changed_at.
        # We don't expect to encounter this in production, but it might
        # happen to self-hosters who update tokenserver without updating
        # their FxA stack.
        headers = {
            "Authorization": "BrowserID %s" % self._getassertion(),
            "X-Client-State": "616161",
        }
        mock_response = {
            "status": "okay",
            "email": "*****@*****.**",
            "idpClaims": {"fxa-generation": 1234},
        }
        with self.mock_browserid_verifier(response=mock_response):
            res = self.app.get('/1.0/sync/1.1', headers=headers)
        token = self.unsafelyParseToken(res.json["id"])
        self.assertEqual(token["fxa_kid"], "0000000001234-YWFh")
        # Now pretend we updated FxA and it started sending keys_changed_at.
        mock_response["idpClaims"]["fxa-generation"] = 2345
        mock_response["idpClaims"]["fxa-keysChangedAt"] = 2345
        headers["X-Client-State"] = "626262"
        with self.mock_browserid_verifier(response=mock_response):
            res = self.app.get('/1.0/sync/1.1', headers=headers)
        token = self.unsafelyParseToken(res.json["id"])
        self.assertEqual(token["fxa_kid"], "0000000002345-YmJi")
        # If we roll back the FxA stack so it stops reporting keys_changed_at,
        # users will get locked out because we can't produce `fxa_kid`.
        del mock_response["idpClaims"]["fxa-keysChangedAt"]
        with self.mock_browserid_verifier(response=mock_response):
            res = self.app.get('/1.0/sync/1.1', headers=headers, status=401)
        self.assertEqual(res.json["status"], "invalid-keysChangedAt")
        # We will likewise reject values below the high-water mark.
        mock_response["idpClaims"]["fxa-keysChangedAt"] = 2340
        with self.mock_browserid_verifier(response=mock_response):
            res = self.app.get('/1.0/sync/1.1', headers=headers, status=401)
        self.assertEqual(res.json["status"], "invalid-keysChangedAt")
        # But accept the correct value, even if generation number changes.
        mock_response["idpClaims"]["fxa-generation"] = 3456
        mock_response["idpClaims"]["fxa-keysChangedAt"] = 2345
        with self.mock_browserid_verifier(response=mock_response):
            res = self.app.get('/1.0/sync/1.1', headers=headers)
        token = self.unsafelyParseToken(res.json["id"])
        self.assertEqual(token["fxa_kid"], "0000000002345-YmJi")
        # TODO: ideally we will error if keysChangedAt changes without a
        # change in generation, but we can't do that until all servers
        # are running the latest version of the code.
        # mock_response["idpClaims"]["fxa-keysChangedAt"] = 4567
        # headers["X-Client-State"] = "636363"
        # with self.mock_browserid_verifier(response=mock_response):
        #     res = self.app.get('/1.0/sync/1.1', headers=headers, status=401)
        # self.assertEqual(res.json["status"], "invalid-keysChangedAt")
        # But accept further updates if both values change in unison.
        mock_response["idpClaims"]["fxa-generation"] = 4567
        mock_response["idpClaims"]["fxa-keysChangedAt"] = 4567
        headers["X-Client-State"] = "636363"
        with self.mock_browserid_verifier(response=mock_response):
            res = self.app.get('/1.0/sync/1.1', headers=headers)
        token = self.unsafelyParseToken(res.json["id"])
        self.assertEqual(token["fxa_kid"], "0000000004567-Y2Nj")

    def test_fxa_kid_change_with_oauth(self):
        # Starting off not reporting keys_changed_at.
        # This uses BrowserID since OAuth always reports keys_changed_at.
        headers_browserid = {
            "Authorization": "BrowserID %s" % self._getassertion(),
            "X-Client-State": "616161",
        }
        mock_response = {
            "status": "okay",
            "email": "*****@*****.**",
            "idpClaims": {"fxa-generation": 1234},
        }
        with self.mock_browserid_verifier(response=mock_response):
            res = self.app.get('/1.0/sync/1.1', headers=headers_browserid)
        token0 = self.unsafelyParseToken(res.json["id"])
        self.assertEqual(token0["fxa_kid"], "0000000001234-YWFh")
        # Now an OAuth client shows up, setting keys_changed_at.
        # (The value matches generation number above, beause in this scenario
        # FxA hasn't been updated to track and report keysChangedAt yet).
        headers_oauth = {
            "Authorization": "Bearer %s" % self._gettoken("*****@*****.**"),
            "X-KeyID": "1234-YWFh",
        }
        res = self.app.get('/1.0/sync/1.1', headers=headers_oauth)
        token = self.unsafelyParseToken(res.json["id"])
        self.assertEqual(token["fxa_kid"], "0000000001234-YWFh")
        self.assertEqual(token["uid"], token0["uid"])
        self.assertEqual(token["node"], token0["node"])
        # At this point, BrowserID clients are locked out until FxA is updated,
        # because we're now expecting to see keys_changed_at for that user.
        with self.mock_browserid_verifier(response=mock_response):
            res = self.app.get('/1.0/sync/1.1', headers=headers_browserid,
                               status=401)
        self.assertEqual(res.json["status"], "invalid-keysChangedAt")
        # We will likewise reject values below the high-water mark.
        mock_response["idpClaims"]["fxa-keysChangedAt"] = 1230
        with self.mock_browserid_verifier(response=mock_response):
            res = self.app.get('/1.0/sync/1.1', headers=headers_browserid,
                               status=401)
        self.assertEqual(res.json["status"], "invalid-keysChangedAt")
        headers_oauth["X-KeyID"] = "1230-YWFh"
        res = self.app.get('/1.0/sync/1.1', headers=headers_oauth, status=401)
        self.assertEqual(res.json["status"], "invalid-keysChangedAt")
        # We accept new values via OAuth.
        headers_oauth["X-KeyID"] = "2345-YmJi"
        res = self.app.get('/1.0/sync/1.1', headers=headers_oauth)
        token = self.unsafelyParseToken(res.json["id"])
        self.assertEqual(token["fxa_kid"], "0000000002345-YmJi")
        self.assertNotEqual(token["uid"], token0["uid"])
        self.assertEqual(token["node"], token0["node"])
        # And via BrowserID, as long as generation number increases as well.
        headers_browserid["X-Client-State"] = "636363"
        mock_response["idpClaims"]["fxa-generation"] = 3456
        mock_response["idpClaims"]["fxa-keysChangedAt"] = 3456
        with self.mock_browserid_verifier(response=mock_response):
            res = self.app.get('/1.0/sync/1.1', headers=headers_browserid)
        token = self.unsafelyParseToken(res.json["id"])
        self.assertEqual(token["fxa_kid"], "0000000003456-Y2Nj")

    def test_kid_change_during_gradual_tokenserver_rollout(self):
        # Let's start with a user already in the db, with no keys_changed_at.
        user0 = self.backend.allocate_user("sync-1.1", "*****@*****.**",
                                           generation=1234,
                                           client_state="616161")
        # User hits updated tokenserver node, writing keys_changed_at to db.
        headers = {
            "Authorization": "BrowserID %s" % self._getassertion(),
            "X-Client-State": "616161",
        }
        mock_response = {
            "status": "okay",
            "email": "*****@*****.**",
            "idpClaims": {
                "fxa-generation": 1234,
                "fxa-keysChangedAt": 1200,
            },
        }
        with self.mock_browserid_verifier(response=mock_response):
            self.app.get('/1.0/sync/1.1', headers=headers)
        # That should not have triggered a node re-assignment.
        user1 = self.backend.get_user("sync-1.1", mock_response["email"])
        self.assertEqual(user1['uid'], user0['uid'])
        self.assertEqual(user1['node'], user0['node'])
        # That should have written keys_changed_at into the db.
        self.assertEqual(user1["generation"], 1234)
        self.assertEqual(user1["keys_changed_at"], 1200)
        # User does a password reset on their Firefox Account.
        mock_response["idpClaims"]["fxa-generation"] = 2345
        mock_response["idpClaims"]["fxa-keysChangedAt"] = 2345
        headers["X-Client-State"] = "626262"
        # They sync again, but hit a tokenserver node that isn't updated yet.
        # Simulate this by writing the updated data directly to the backend,
        # which should trigger a node re-assignment.
        self.backend.update_user("sync-1.1", user1,
                                 generation=2345,
                                 client_state="626262")
        self.assertNotEqual(user1['uid'], user0['uid'])
        self.assertEqual(user1['node'], user0['node'])
        # They sync again, hitting an updated tokenserver node.
        # This should succeed, despite keys_changed_at appearing to have
        # changed without any corresponding change in generation number.
        with self.mock_browserid_verifier(response=mock_response):
            res = self.app.get('/1.0/sync/1.1', headers=headers)
        token = self.unsafelyParseToken(res.json["id"])
        self.assertEqual(token["fxa_kid"], "0000000002345-YmJi")
        # That should not have triggered a second node re-assignment.
        user2 = self.backend.get_user("sync-1.1", mock_response["email"])
        self.assertEqual(user2['uid'], user1['uid'])
        self.assertEqual(user2['node'], user1['node'])

    def test_client_state_cannot_revert_to_empty(self):
        # Start with a client-state header.
        headers = {
            'Authorization': 'BrowserID %s' % self._getassertion(),
            'X-Client-State': 'aaaa',
        }
        res = self.app.get('/1.0/sync/1.1', headers=headers)
        uid0 = res.json['uid']
        # Sending no client-state will fail.
        del headers['X-Client-State']
        res = self.app.get('/1.0/sync/1.1', headers=headers, status=401)
        self.assertEqual(res.json['status'], 'invalid-client-state')
        desc = res.json['errors'][0]['description']
        self.assertTrue(desc.endswith('empty string'))
        headers['X-Client-State'] = ''
        res = self.app.get('/1.0/sync/1.1', headers=headers, status=401)
        self.assertEqual(res.json['status'], 'invalid-client-state')
        desc = res.json['errors'][0]['description']
        self.assertTrue(desc.endswith('empty string'))
        # And the uid will be unchanged.
        headers['X-Client-State'] = 'aaaa'
        res = self.app.get('/1.0/sync/1.1', headers=headers)
        self.assertEqual(res.json['uid'], uid0)

    def test_credentials_from_oauth_and_browserid(self):
        # Send initial credentials via oauth.
        headers_oauth = {
            "Authorization": "Bearer %s" % self._gettoken(),
            "X-KeyID": "7-YWFh",
        }
        res1 = self.app.get("/1.0/sync/1.1", headers=headers_oauth)
        # Send the same credentials via BrowserID
        headers_browserid = {
            "Authorization": "BrowserID %s" % self._getassertion(),
            "X-Client-State": "616161",
        }
        mock_response = {
            "status": "okay",
            "email": "*****@*****.**",
            "idpClaims": {"fxa-generation": 12, "fxa-keysChangedAt": 7},
        }
        with self.mock_browserid_verifier(response=mock_response):
            res2 = self.app.get("/1.0/sync/1.1", headers=headers_browserid)
        # They should get the same node assignment.
        self.assertEqual(res1.json["uid"], res2.json["uid"])
        self.assertEqual(res1.json["api_endpoint"], res2.json["api_endpoint"])
        # Earlier generation number via BrowserID -> invalid-generation
        mock_response["idpClaims"]['fxa-generation'] = 11
        with self.mock_browserid_verifier(response=mock_response):
            res = self.app.get("/1.0/sync/1.1", headers=headers_browserid,
                               status=401)
        self.assertEqual(res1.json["api_endpoint"], res2.json["api_endpoint"])
        self.assertEqual(res.json["status"], "invalid-generation")
        # Earlier keys_changed_at via BrowserID is not accepted.
        mock_response["idpClaims"]['fxa-generation'] = 12
        mock_response["idpClaims"]['fxa-keysChangedAt'] = 6
        with self.mock_browserid_verifier(response=mock_response):
            res1 = self.app.get("/1.0/sync/1.1", headers=headers_browserid,
                                status=401)
        self.assertEqual(res1.json['status'], 'invalid-keysChangedAt')
        # Earlier keys_changed_at via OAuth is not accepted.
        headers_oauth['X-KeyID'] = '6-YWFh'
        res1 = self.app.get("/1.0/sync/1.1", headers=headers_oauth, status=401)
        self.assertEqual(res1.json['status'], 'invalid-keysChangedAt')
        # Change client-state via BrowserID.
        headers_browserid['X-Client-State'] = '626262'
        mock_response["idpClaims"]['fxa-generation'] = 42
        mock_response["idpClaims"]['fxa-keysChangedAt'] = 42
        with self.mock_browserid_verifier(response=mock_response):
            res1 = self.app.get("/1.0/sync/1.1", headers=headers_browserid)
        # Previous OAuth creds are rejected due to keys_changed_at update.
        headers_oauth['X-KeyID'] = '7-YmJi'
        res2 = self.app.get("/1.0/sync/1.1", headers=headers_oauth, status=401)
        self.assertEqual(res2.json['status'], 'invalid-keysChangedAt')
        # Updated OAuth creds are accepted.
        headers_oauth['X-KeyID'] = '42-YmJi'
        res2 = self.app.get("/1.0/sync/1.1", headers=headers_oauth)
        # They should again get the same node assignment.
        self.assertEqual(res1.json["uid"], res2.json["uid"])
        self.assertEqual(res1.json["api_endpoint"], res2.json["api_endpoint"])

    def test_client_specified_duration(self):
        headers = {'Authorization': 'BrowserID %s' % self._getassertion()}
        # It's ok to request a shorter-duration token.
        res = self.app.get('/1.0/sync/1.1?duration=12', headers=headers)
        self.assertEquals(res.json['duration'], 12)
        # But you can't exceed the server's default value.
        res = self.app.get('/1.0/sync/1.1?duration=4000', headers=headers)
        self.assertEquals(res.json['duration'], 3600)
        # And nonsense values are ignored.
        res = self.app.get('/1.0/sync/1.1?duration=lolwut', headers=headers)
        self.assertEquals(res.json['duration'], 3600)
        res = self.app.get('/1.0/sync/1.1?duration=-1', headers=headers)
        self.assertEquals(res.json['duration'], 3600)

    def test_allow_new_users(self):
        # New users are allowed by default.
        settings = self.config.registry.settings
        self.assertEquals(settings.get('tokenserver.allow_new_users'), None)
        assertion = self._getassertion(email="*****@*****.**")
        headers = {'Authorization': 'BrowserID %s' % assertion}
        self.app.get('/1.0/sync/1.1', headers=headers, status=200)
        # They're allowed if we explicitly allow them.
        settings['tokenserver.allow_new_users'] = True
        assertion = self._getassertion(email="*****@*****.**")
        headers = {'Authorization': 'BrowserID %s' % assertion}
        self.app.get('/1.0/sync/1.1', headers=headers, status=200)
        # They're not allowed if we explicitly disable them.
        settings['tokenserver.allow_new_users'] = False
        assertion = self._getassertion(email="*****@*****.**")
        headers = {'Authorization': 'BrowserID %s' % assertion}
        res = self.app.get('/1.0/sync/1.1', headers=headers, status=401)
        self.assertEqual(res.json['status'], 'new-users-disabled')
        # But existing users are still allowed.
        assertion = self._getassertion(email="*****@*****.**")
        headers = {'Authorization': 'BrowserID %s' % assertion}
        self.app.get('/1.0/sync/1.1', headers=headers, status=200)
        assertion = self._getassertion(email="*****@*****.**")
        headers = {'Authorization': 'BrowserID %s' % assertion}
        self.app.get('/1.0/sync/1.1', headers=headers, status=200)

    def test_metrics_uid_logging(self):
        assert "fxa.metrics_uid_secret_key" in self.config.registry.settings
        assertion = self._getassertion(email="*****@*****.**")
        headers = {'Authorization': 'BrowserID %s' % assertion}
        self.app.get('/1.0/sync/1.1', headers=headers, status=200)
        self.assertMetricWasLogged('uid')
        self.assertMetricWasLogged('uid.first_seen_at')
        self.assertMetricWasLogged('metrics_uid')
        self.assertMetricWasLogged('metrics_device_id')

    def test_uid_and_kid_from_browserid_assertion(self):
        assertion = self._getassertion(email="*****@*****.**")
        headers_browserid = {
            "Authorization": "BrowserID %s" % (assertion,),
            "X-Client-State": "616161",
        }
        mock_response = {
            "status": "okay",
            "email": "*****@*****.**",
            "idpClaims": {"fxa-generation": 13, 'fxa-keysChangedAt': 12},
        }
        with self.mock_browserid_verifier(response=mock_response):
            res = self.app.get("/1.0/sync/1.1", headers=headers_browserid)
        token = self.unsafelyParseToken(res.json["id"])
        self.assertEqual(token["uid"], res.json["uid"])
        self.assertEqual(token["fxa_uid"], "testuser")
        self.assertEqual(token["fxa_kid"], "0000000000012-YWFh")
        self.assertNotEqual(token["hashed_fxa_uid"], token["fxa_uid"])
        self.assertEqual(token["hashed_fxa_uid"], res.json["hashed_fxa_uid"])
        self.assertIn("hashed_device_id", token)

    def test_uid_and_kid_from_oauth_token(self):
        oauth_token = self._gettoken(email="*****@*****.**")
        headers_oauth = {
            "Authorization": "Bearer %s" % (oauth_token,),
            "X-KeyID": "12-YWFh",
        }
        res = self.app.get("/1.0/sync/1.1", headers=headers_oauth)
        token = self.unsafelyParseToken(res.json["id"])
        self.assertEqual(token["uid"], res.json["uid"])
        self.assertEqual(token["fxa_uid"], "testuser")
        self.assertEqual(token["fxa_kid"], "0000000000012-YWFh")
        self.assertNotEqual(token["hashed_fxa_uid"], token["fxa_uid"])
        self.assertEqual(token["hashed_fxa_uid"], res.json["hashed_fxa_uid"])
        self.assertIn("hashed_device_id", token)

    def test_metrics_uid_is_returned_in_response(self):
        assert "fxa.metrics_uid_secret_key" in self.config.registry.settings
        assertion = self._getassertion(email="*****@*****.**")
        headers = {'Authorization': 'BrowserID %s' % assertion}
        res = self.app.get('/1.0/sync/1.1', headers=headers, status=200)
        self.assertTrue('hashed_fxa_uid' in res.json)

    def test_node_type_is_returned_in_response(self):
        assertion = self._getassertion(email="*****@*****.**")
        headers = {'Authorization': 'BrowserID %s' % assertion}
        res = self.app.get('/1.0/sync/1.1', headers=headers, status=200)
        self.assertEqual(res.json['node_type'], 'example')