def test_unstore_order(self): msg1 = {"type": "perf", "value": "1"} self.bp.paused = True consumer = ConsumerStub() self.bp.consumer = consumer self.bp.queue.append(msg1) yield self.bp.processQueue() # Le message est maintenant en base de backup backup_size = yield self.bp.retry.qsize() self.assertEqual(backup_size, 1) # On se connecte self.bp.resumeProducing() # On attend un peu yield wait(0.5) # On en envoie un deuxième msg2 = {"type": "perf", "value": "2"} self.bp.queue.append(msg2) yield self.bp.processQueue() # On attend un peu yield wait(0.5) # On vérifie que les deux messages ont bien été envoyés dans le bon # ordre self.assertEqual(len(consumer.written), 2) for index, msg in enumerate([msg1, msg2]): msg_out = consumer.written[index] self.assertEqual(msg_out, msg)
def test_send_stats(self): """Relai d'un ensemble de statistiques""" client = ClientStub("testhost", None, None) self.settings["connector"]["status_service"] = "testsvc" sp = statuspublisher_factory(self.settings, client) sp.isConnected = lambda: True client.stub_connect() stats = {"key1": "value1", "key2": "value2", "key3": "value3"} msg = {"type": "perf"} sp._sendStats(stats, msg) def check(r_): output = client.channel.sent print output self.assertEqual(len(output), 3) msg_out = [ json.loads(m["content"].body) for m in output ] msg_in = [] for k, v in stats.iteritems(): m = msg.copy() m.update({"datasource": "testsvc-%s" % k, "value": v}) msg_in.append(m) self.assertEqual(msg_in, msg_out) d = wait(0.2) d.addCallback(check) return d
def test_vacuum(self): """ Teste le nettoyage de la base """ db = DbRetry(self.db_path, 'tmp_table') stub = ConnectionPoolStub(db._db) db._db = stub xml = '<abc foo="bar">def</abc>' yield db.put(xml) yield db.flush() # On récupère 2 fois un élément: une fois pour vider la base, et la # seconde fois déclenche un VACUUM yield db.get() yield db.get() # On attend un peu, le VACUUM est décalé yield wait(1) print stub.requests self.assertEqual( (("VACUUM", ), {}), stub.requests.pop() )