Beispiel #1
0
def test_replace_dead_worker():
    pool = mod.Pool(processes=2)
    with capture_log_output(mod.__name__) as log:
        results = list(pool.imap_unordered(die, range(3)))
    logs = log.get_output()
    eq(logs.count("replaced worker"), 3, logs)
    eq(results, [])
    def test_blob_expires(self):
        now = datetime(2017, 1, 1)
        manager = BlobMeta.objects.partitioned_query(self.args["parent_id"])
        pre_expire_count = manager.count()

        with capture_log_output(mod.__name__) as logs:
            with patch('corehq.blobs.metadata._utcnow', return_value=now):
                self.db.put(BytesIO(b'content'), timeout=60, **self.args)

            self.assertIsNotNone(
                self.db.get(key=self.key, type_code=CODES.tempfile))
            with patch('corehq.blobs.tasks._utcnow',
                       return_value=now + timedelta(minutes=61)):
                bytes_deleted = delete_expired_blobs()

            self.assertEqual(bytes_deleted, len('content'))

            with self.assertRaises(NotFound):
                self.db.get(key=self.key, type_code=CODES.tempfile)

            self.assertEqual(manager.all().count(), pre_expire_count)
            self.assertRegex(
                logs.get_output(),
                r"deleted expired blobs: .+'blob-identifier'",
            )
    def test_blob_expires(self):
        now = datetime(2017, 1, 1)
        shard = get_db_alias_for_partitioned_doc(self.args["parent_id"])
        manager = BlobMeta.objects.using(shard)
        pre_expire_count = manager.count()

        with capture_log_output(mod.__name__) as logs:
            with patch('corehq.blobs.metadata._utcnow', return_value=now):
                self.db.put(StringIO('content'), timeout=60, **self.args)

            self.assertIsNotNone(self.db.get(key=self.key))
            with patch('corehq.blobs.tasks._utcnow',
                       return_value=now + timedelta(minutes=61)):
                bytes_deleted = delete_expired_blobs()

            self.assertEqual(bytes_deleted, len('content'))

            with self.assertRaises(NotFound):
                self.db.get(key=self.key)

            self.assertEqual(manager.all().count(), pre_expire_count)
            self.assertRegexpMatches(
                logs.get_output(),
                r"deleted expired blobs: .+'blob-identifier'",
            )
Beispiel #4
0
def test_process_item_error():
    with capture_log_output(mod.__name__) as log:
        results = set(mod.Pool().imap_unordered(one_over, [-1, 0, 1]))
    logs = log.get_output()
    eq(logs, Regex("error processing item in worker"))
    eq(logs, Regex("ZeroDivisionError"))
    eq(results, {-1, 1})
 def _test_success(self, auto_flush):
     kafka_producer = ChangeProducer(auto_flush=auto_flush)
     with capture_log_output(KAFKA_AUDIT_LOGGER) as logs:
         meta = ChangeMeta(document_id=uuid.uuid4().hex, data_source_type='dummy-type',
                           data_source_name='dummy-name')
         kafka_producer.send_change(topics.CASE, meta)
         if not auto_flush:
             kafka_producer.flush()
     self._check_logs(logs, meta.document_id, [CHANGE_PRE_SEND, CHANGE_SENT])
    def test_error_asynchronous(self):
        kafka_producer = ChangeProducer(auto_flush=False)
        future = Future()
        kafka_producer.producer.send = Mock(return_value=future)

        meta = ChangeMeta(
            document_id=uuid.uuid4().hex, data_source_type='dummy-type', data_source_name='dummy-name'
        )

        with capture_log_output(KAFKA_AUDIT_LOGGER) as logs:
            kafka_producer.send_change(topics.CASE, meta)
            future.failure(Exception())

        self._check_logs(logs, meta.document_id, [CHANGE_PRE_SEND, CHANGE_ERROR])
 def test_diff_case_with_wrong_domain(self):
     wrong_domain = create_domain("wrong")
     self.addCleanup(wrong_domain.delete)
     self.submit_form(make_test_form("form-1"), domain="wrong")
     self.do_migration(case_diff="none", domain="wrong")
     self.do_migration(case_diff="none")
     clear_local_domain_sql_backend_override(self.domain_name)
     with capture_log_output("corehq.apps.couch_sql_migration") as log, \
             self.augmented_couch_case("test-case") as case:
         # modify case so it would have a diff (if it were diffed)
         case.age = '35'
         case.save()
         # try to diff case in wrong domain
         self.do_case_diffs(cases="test-case")
    def test_error_synchronous(self):
        kafka_producer = ChangeProducer()
        future = Future()
        future.get = Mock(side_effect=Exception())
        kafka_producer.producer.send = Mock(return_value=future)

        meta = ChangeMeta(
            document_id=uuid.uuid4().hex, data_source_type='dummy-type', data_source_name='dummy-name'
        )

        with capture_log_output(KAFKA_AUDIT_LOGGER) as logs:
            with self.assertRaises(Exception):
                kafka_producer.send_change(topics.CASE, meta)

        self._check_logs(logs, meta.document_id, [CHANGE_PRE_SEND, CHANGE_ERROR])
Beispiel #9
0
 def test_failed_diff(self):
     self.pool_mock.stop()
     self.addCleanup(self.pool_mock.start)
     self.submit_form(make_test_form("form-1", case_id="case-1"))
     self._do_migration(case_diff="none")
     with patch("corehq.apps.couch_sql_migration.casediff.diff_case") as mock, \
             capture_log_output("corehq.apps.couch_sql_migration.parallel") as log:
         mock.side_effect = Exception("diff failed!")
         self.do_case_diffs()
     logs = log.get_output()
     self.assertIn("error processing item in worker", logs)
     self.assertIn("Exception: diff failed!", logs)
     self._compare_diffs([])
     db = open_state_db(self.domain_name, self.state_dir)
     self.assertEqual(list(db.iter_undiffed_case_ids()), ["case-1"])
 def test_failed_diff(self):
     self.pool_mock.stop()
     self.addCleanup(self.pool_mock.start)
     self.submit_form(make_test_form("form-1", case_id="case-1"))
     self.do_migration(case_diff="none")
     # patch init_worker to make subprocesses use the same database
     # connections as this process (which is operating in a transaction)
     init_worker_path = "corehq.apps.couch_sql_migration.casedifftool.init_worker"
     with patch(init_worker_path, mod.global_diff_state), \
             patch("corehq.apps.couch_sql_migration.casediff.diff_case") as mock, \
             capture_log_output("corehq.apps.couch_sql_migration.parallel") as log:
         mock.side_effect = Exception("diff failed!")
         self.do_case_diffs()
     logs = log.get_output()
     self.assertIn("error processing item in worker", logs)
     self.assertIn("Exception: diff failed!", logs)
     self.compare_diffs()
     db = open_state_db(self.domain_name, self.state_dir)
     self.assertEqual(list(db.iter_undiffed_case_ids()), ["case-1"])
    def _test_submit_bad_data(self, bad_data):
        f, path = tmpfile(mode='wb')
        with f:
            f.write(bad_data)
        with open(path, 'rb') as f:
            with capture_log_output('', logging.WARNING) as logs:
                res = self.client.post(self.url, {"xml_submission_file": f})
            self.assertEqual(422, res.status_code)
            self.assertIn('Invalid XML', res.content.decode('utf-8'))

        # make sure we logged it
        [log] = FormAccessors(self.domain.name).get_forms_by_type(
            'SubmissionErrorLog', limit=1)

        self.assertIsNotNone(log)
        self.assertIn('Invalid XML', log.problem)
        self.assertEqual(bad_data, log.get_xml())
        self.assertEqual(log.form_data, {})
        return logs.get_output()
Beispiel #12
0
    def _send_to_es_mock_errors(self, exception, retries):
        doc = {'_id': uuid.uuid4().hex, 'doc_type': 'MyCoolDoc', 'property': 'bar'}

        with mock.patch("pillowtop.processors.elastic._propagate_failure", return_value=False), \
             mock.patch("pillowtop.processors.elastic._retries", return_value=retries), \
             mock.patch("pillowtop.processors.elastic._sleep_between_retries"), \
             mock.patch("pillowtop.processors.elastic._get_es_interface") as _get_es_interface, \
             capture_log_output("pillowtop") as log:
            es_interface = mock.Mock()
            es_interface.index_doc.side_effect = exception
            _get_es_interface.return_value = es_interface
            send_to_elasticsearch(
                TEST_INDEX_INFO,
                doc_type=TEST_INDEX_INFO.type,
                doc_id=doc['_id'],
                es_getter=None,
                name='test',
                data=doc,
                es_merge_update=False,
                delete=False
            )
        return log.get_output()
Beispiel #13
0
    def test_blob_expires(self):
        now = datetime(2017, 1, 1)
        pre_expire_count = BlobExpiration.objects.all().count()

        with capture_log_output(mod.__name__) as logs:
            with patch('corehq.blobs.util._utcnow', return_value=now):
                self.db.put(StringIO('content'), self.identifier, bucket=self.bucket, timeout=60)

            self.assertIsNotNone(self.db.get(self.identifier, self.bucket))
            with patch('corehq.blobs.tasks._utcnow', return_value=now + timedelta(minutes=61)):
                bytes_deleted = delete_expired_blobs()

            self.assertEqual(bytes_deleted, len('content'))

            with self.assertRaises(NotFound):
                self.db.get(self.identifier, self.bucket)

            self.assertEqual(BlobExpiration.objects.all().count(), pre_expire_count)
            self.assertRegexpMatches(
                logs.get_output(),
                r"deleted expired blobs: .+/blob-bucket/blob-identifier'",
            )
Beispiel #14
0
    def test_legacy_blob_expires(self):
        # this test can be removed when BlobExpiration is removed
        now = datetime(2017, 1, 1)
        pre_expire_count = BlobExpiration.objects.all().count()

        with capture_log_output(mod.__name__) as logs:
            args = self.args.copy()
            args["key"] = blob_key = "bucket/" + self.key
            meta = self.db.put(BytesIO(b'content'), **args)
            self.assertFalse(meta.expires_on, meta.expires_on)
            self.addCleanup(lambda: self.db.delete(key=blob_key))

            # create legacy BlobExpiration object
            expire = BlobExpiration(
                bucket="bucket",
                identifier=self.key,
                expires_on=now + timedelta(minutes=60),
                length=7,
            )
            expire.save()
            self.addCleanup(BlobExpiration.objects.filter(id=expire.id).delete)

            self.assertIsNotNone(self.db.get(key=blob_key))
            with patch('corehq.blobs.tasks._utcnow',
                       return_value=now + timedelta(minutes=61)):
                bytes_deleted = delete_expired_blobs()

            self.assertEqual(bytes_deleted, len('content'))

            with self.assertRaises(NotFound):
                self.db.get(key=blob_key)

            self.assertEqual(BlobExpiration.objects.all().count(),
                             pre_expire_count)
            self.assertRegexpMatches(
                logs.get_output(),
                r"deleted expired blobs: .+'bucket/blob-identifier'",
            )
Beispiel #15
0
    def test_legacy_blob_expires(self):
        # this test can be removed when BlobExpiration is removed
        now = datetime(2017, 1, 1)
        pre_expire_count = BlobExpiration.objects.all().count()

        with capture_log_output(mod.__name__) as logs:
            args = self.args.copy()
            args["key"] = blob_key = "bucket/" + self.key
            meta = self.db.put(BytesIO(b'content'), **args)
            self.assertFalse(meta.expires_on, meta.expires_on)
            self.addCleanup(lambda: self.db.delete(key=blob_key))

            # create legacy BlobExpiration object
            expire = BlobExpiration(
                bucket="bucket",
                identifier=self.key,
                expires_on=now + timedelta(minutes=60),
                length=7,
            )
            expire.save()
            self.addCleanup(BlobExpiration.objects.filter(id=expire.id).delete)

            self.assertIsNotNone(self.db.get(key=blob_key))
            with patch('corehq.blobs.tasks._utcnow', return_value=now + timedelta(minutes=61)):
                bytes_deleted = delete_expired_blobs()

            self.assertEqual(bytes_deleted, len('content'))

            with self.assertRaises(NotFound):
                self.db.get(key=blob_key)

            self.assertEqual(BlobExpiration.objects.all().count(), pre_expire_count)
            self.assertRegexpMatches(
                logs.get_output(),
                r"deleted expired blobs: .+'bucket/blob-identifier'",
            )
Beispiel #16
0
    def test_blob_expires(self):
        now = datetime(2017, 1, 1)
        shard = get_db_alias_for_partitioned_doc(self.args["parent_id"])
        manager = BlobMeta.objects.using(shard)
        pre_expire_count = manager.count()

        with capture_log_output(mod.__name__) as logs:
            with patch('corehq.blobs.metadata._utcnow', return_value=now):
                self.db.put(BytesIO(b'content'), timeout=60, **self.args)

            self.assertIsNotNone(self.db.get(key=self.key))
            with patch('corehq.blobs.tasks._utcnow', return_value=now + timedelta(minutes=61)):
                bytes_deleted = delete_expired_blobs()

            self.assertEqual(bytes_deleted, len('content'))

            with self.assertRaises(NotFound):
                self.db.get(key=self.key)

            self.assertEqual(manager.all().count(), pre_expire_count)
            self.assertRegexpMatches(
                logs.get_output(),
                r"deleted expired blobs: .+'blob-identifier'",
            )