Exemplo n.º 1
0
        def check_it(result):
            # First determine the contact ID.
            key = ['rd.core.content', 'schema_id', 'rd.contact']
            result = yield get_doc_model().open_view(key=key, reduce=False,
                                             include_docs=True)

            rows = result['rows']
            # Should be exactly 1 record with a 'contact' schema.
            self.failUnlessEqual(len(rows), 1, str(rows))
            key_type, cid = rows[0]['doc']['rd_key']
            self.failUnlessEqual(key_type, 'contact')

            # should be exact 3 rd.identity.contacts records, each pointing
            # at my contact.
            key = ['rd.core.content', 'schema_id', 'rd.identity.contacts']
            result = yield get_doc_model().open_view(key=key,
                                             reduce=False,
                                             include_docs=True)

            rows = result['rows']
            self.failUnlessEqual(len(rows), 3, str(rows))
            docs = [r['doc'] for r in rows]
            for doc in docs:
                contacts = doc['contacts']
                self.failUnlessEqual(len(contacts), 1, contacts)
                this_id, this_rel = contacts[0]
                self.failUnlessEqual(this_id, cid)
                self.failUnless(this_rel in ['personal', 'public'], this_rel)
Exemplo n.º 2
0
    def verifyCounts(self, contact_count, identity_count):
        # First determine the contact ID.
        key = ['schema_id', 'rd.contact']
        result = get_doc_model().open_view(key=key, reduce=False)
        self.failUnlessEqual(len(result['rows']), contact_count, repr(result))

        # each identity should have got 2 schema instances.
        keys = [['schema_id', 'rd.identity.exists'],
                ['schema_id', 'rd.identity.contacts'],
               ]

        result = get_doc_model().open_view(keys=keys, reduce=False)
        self.failUnlessEqual(len(result['rows']), identity_count*2, repr(result))
Exemplo n.º 3
0
 def _prepare_test_doc(self):
     doc_model = get_doc_model()
     # abuse the schema API to write the outgoing smtp data and the
     # 'state' doc in one hit.
     body = 'subject: hello\r\n\r\nthe body'
     items = {'smtp_from' : '*****@*****.**',
              'smtp_to': ['*****@*****.**', '*****@*****.**'],
              # The 'state' bit...
              'sent_state': None,
              'outgoing_state': 'outgoing',
             }
     sis = [
             { 'rd_key': ['test', 'smtp_test'],
                'rd_ext_id': 'testsuite',
                'rd_schema_id': 'rd.some_src_schema',
                'items': {'outgoing_state': 'outgoing'},
             },
             {'rd_key': ['test', 'smtp_test'],
              'rd_ext_id': 'testsuite',
              'rd_schema_id': 'rd.msg.outgoing.smtp',
              'items': items,
              'attachments': {'smtp_body': {'data': body}},
             },
           ]
     
     doc_model.create_schema_items(sis)
     dids = [doc_model.get_doc_id_for_schema_item(si) for si in sis]
     return doc_model.open_documents_by_id(dids)
Exemplo n.º 4
0
 def setup_pipeline(whateva):
     global g_pipeline, g_conductor
     if g_pipeline is None:
         g_pipeline = pipeline.Pipeline(model.get_doc_model(), options)
         _ = yield g_pipeline.initialize()
     if g_conductor is None:
         g_conductor = yield get_conductor(g_pipeline)
Exemplo n.º 5
0
def delete_docs(result, parser, options):
    """Delete all documents of a particular type.  Use with caution or see
       the 'unprocess' command for an alternative.
    """
    # NOTE: This is for development only, until we get a way to say
    # 'reprocess stuff you've already done' - in the meantime deleting those
    # intermediate docs has the same result...
    def _del_docs(to_del):
        docs = []
        for id, rev in to_del:
            docs.append({'_id': id, '_rev': rev})
        return model.get_doc_model().delete_documents(docs)

    def _got_docs(result, dt):
        to_del = [(row['id'], row['value']['_rev']) for row in result['rows']]
        logger.info("Deleting %d documents of type %r", len(to_del), dt)
        return to_del

    if not options.schemas:
        parser.error("You must specify one or more --schema")
    deferreds = []
    for st in options.schemas:
        key = ['rd.core.content', 'schema_id', st]
        d = model.get_doc_model().open_view(key=key, reduce=False
                ).addCallback(_got_docs, st
                ).addCallback(_del_docs
                )
        deferreds.append(d)
    return defer.DeferredList(deferreds)
Exemplo n.º 6
0
 def _prepare_test_doc(self):
     doc_model = get_doc_model()
     # write a simple outgoing schema
     items = {'body' : 'hello there',
              'from' : ['email', '*****@*****.**'],
              'from_display': 'Sender Name',
              'to' : [
                         ['email', '*****@*****.**'],
                         ['email', '*****@*****.**'],
                     ],
              'to_display': ['recip 1', 'recip 2'],
              'cc' : [
                         ['email', '*****@*****.**'],
                 
                     ],
              'cc_display' : ['CC recip 1'],
              'subject': "the subject",
              # The 'state' bit...
              'sent_state': None,
              'outgoing_state': 'outgoing',
             }
     result = yield doc_model.create_schema_items([
                 {'rd_key': ['test', 'smtp_test'],
                  'rd_ext_id': 'testsuite',
                  'rd_schema_id': 'rd.msg.outgoing.simple',
                  'items': items,
                 }])
     src_doc = yield doc_model.db.openDoc(result[0]['id'])
     defer.returnValue(src_doc)
Exemplo n.º 7
0
def show_info(parser, options):
    """Print a list of all extensions, loggers etc"""
    dm = model.get_doc_model()
    print "Database:"
    info = dm.db.infoDB()
    fmt = "  %(doc_count)d docs total, %(doc_del_count)d deleted, " \
          "update seq at %(update_seq)d, %(disk_size)d bytes."
    print fmt % info
    # ouch - this seems a painful way of fetching total unique keys?
    results = dm.open_view(
                startkey=["key"],
                endkey=["key", {}],
                group_level=2)
    print "  %d unique raindrop keys" % len(results['rows'])

    print "API groupings:"
    from urllib import urlencode
    dbconfig = get_config().couches['local']
    try:
        summaries = _call_api(dbconfig, "_api/inflow/grouping/summary")
        print " %d groupings exist" % len(summaries)
        for gs in summaries:
            title = gs.get('title') or gs['rd_key']
            opts = {'limit': 60, 'message_limit': 2,
                    'keys': json.dumps([gs['rd_key']]),
                    }
            path = "_api/inflow/conversations/in_groups?" + urlencode(opts)
            this = _call_api(dbconfig, path)
            print "  %s: %d conversations" % (title, len(this))
    except dm.db.Error, exc:
        print "Failed to call the API:", exc
Exemplo n.º 8
0
def _call_api(dbconfig, path):
    db = model.get_doc_model().db
    host = dbconfig['host']
    port = dbconfig['port']
    dbname = dbconfig['name']
    uri = "http://%s:%s/%s/%s" % (host, port, dbname, path)
    return  db._request("GET", uri)
Exemplo n.º 9
0
def _init(req):
    global pipeline, conductor
    assert pipeline is None # don't call me twice!
    opts = options_from_request(req)
    pipeline = Pipeline(model.get_doc_model(), opts)
    _ = yield pipeline.initialize()
    conductor = yield raindrop.sync.get_conductor(pipeline)
Exemplo n.º 10
0
 def report():
     dm = model.get_doc_model()
     info = yield dm.db.infoDB()
     db_seq = info["update_seq"]
     proc_seq = g_pipeline.incoming_processor.current_seq
     left = db_seq - proc_seq
     print "still waiting for new raindrops (%d items to be processed) ..." % (left)
Exemplo n.º 11
0
def show_info(result, parser, options):
    """Print a list of all extensions, loggers etc"""
    dm = model.get_doc_model()
    print "Database:"
    info = yield dm.db.infoDB()
    fmt = "  %(doc_count)d docs total, %(doc_del_count)d deleted, " "update seq at %(update_seq)d, %(disk_size)d bytes."
    print fmt % info
    # ouch - this seems a painful way of fetching total unique keys?
    results = yield dm.open_view(
        startkey=["rd.core.content", "key"], endkey=["rd.core.content", "key", {}], group_level=3
    )
    print "  %d unique raindrop keys" % len(results["rows"])

    print "Document counts by schema:"
    results = yield dm.open_view(
        startkey=["rd.core.content", "schema_id"], endkey=["rd.core.content", "schema_id", {}], group_level=3
    )
    infos = []
    for row in results["rows"]:
        sch_id = row["key"][-1]
        infos.append((sch_id, row["value"]))
    for sch_id, count in sorted(infos):
        print "  %s: %d" % (sch_id, count)
    print

    print "Raindrop extensions:"
    exts = sorted((yield pipeline.load_extensions(dm)).items())  # sort by ID.
    for _, ext in exts:
        print "  %s: %s" % (ext.id, ext.doc["info"])
    print
    print "Loggers"
    # yuck - reach into impl - and hope all have been initialized by now
    # (they should have been as we loaded the extensions above)
    for name in logging.Logger.manager.loggerDict:
        print " ", name
Exemplo n.º 12
0
    def test_one_testmsg(self):
        # When processing a single test message we end up with 2 identies
        # both associated with the same contact

        result = self.process_doc()
        dm = get_doc_model()
        # First determine the contact ID.
        key = ['schema_id', 'rd.contact']
        result = dm.open_view(key=key, reduce=False, include_docs=True)

        rows = result['rows']
        # Should be exactly 1 record with a 'contact' schema.
        self.failUnlessEqual(len(rows), 1, str(rows))
        key_type, cid = rows[0]['doc']['rd_key']
        self.failUnlessEqual(key_type, 'contact')

        # should be exact 2 rd.identity.contacts records, each pointing
        # at my contact.
        key = ['schema_id', 'rd.identity.contacts']
        result = dm.open_view(key=key, reduce=False, include_docs=True)
        rows = result['rows']
        self.failUnlessEqual(len(rows), 2, str(rows))
        docs = [r['doc'] for r in rows]
        for doc in docs:
            contacts = doc['contacts']
            self.failUnlessEqual(len(contacts), 1, contacts)
            this_id, this_rel = contacts[0]
            self.failUnlessEqual(this_id, cid)
            self.failUnless(this_rel in ['personal', 'public'], this_rel)
Exemplo n.º 13
0
def go(_):
    try:
        _ = yield init_config()
        dm = get_doc_model()
        _ = yield make_graph(dm)
    finally:
        print "stopping"
        reactor.stop()
Exemplo n.º 14
0
 def _get_post_client(self, src_doc, raw_doc):
     acct = SMTPAccount(get_doc_model(), {})
     # we need a factory for error handling...
     factory = SMTPClientFactory(None, None, src_doc, raw_doc)
     c = SMTPPostingClient(acct, src_doc, raw_doc, 'secret', None, None, None)
     c.factory = factory
     c.deferred = defer.Deferred()
     return c
Exemplo n.º 15
0
 def test_simple_connection_failed(self):
     src_doc = yield self._prepare_test_doc()
     server = FakeSMTPServer()
     server.connection_made_resp = "452 Out of disk space; try later"
     client = self._get_post_client(src_doc, src_doc)
     _ = yield self.loopback(server, client)
     # now re-open the doc and check the state says 'error'
     src_doc = yield get_doc_model().db.openDoc(src_doc['_id'])
     self.failUnlessEqual(src_doc['sent_state'], 'error')
Exemplo n.º 16
0
 def setUp(self):
     _ = yield TestCaseWithDB.setUp(self)
     raindrop.config.CONFIG = None
     self.config = self.make_config()
     opts = self.get_options()
     self.doc_model = get_doc_model()
     self.pipeline = Pipeline(self.doc_model, opts)
     _ = yield self.prepare_test_db(self.config)
     _ = yield self.pipeline.initialize()
Exemplo n.º 17
0
 def test_simple(self):
     src_doc = yield self._prepare_test_doc()
     server = FakeSMTPServer()
     client = self._get_post_client(src_doc, src_doc)
     _ = yield self.loopback(server, client)
     # now re-open the doc and check the state says 'sent'
     src_doc = yield get_doc_model().db.openDoc(src_doc['_id'])
     self.failUnlessEqual(src_doc['sent_state'], 'sent')
     self.failUnless(server.buffer) # must have connected to the test server.
Exemplo n.º 18
0
 def test_simple_failed(self):
     src_doc = yield self._prepare_test_doc()
     server = FakeSMTPServer()
     client = self._get_post_client(src_doc, src_doc)
     client.requireAuthentication = True # this causes failure!
     _ = yield self.loopback(server, client)
     # now re-open the doc and check the state says 'error'
     src_doc = yield get_doc_model().db.openDoc(src_doc['_id'])
     self.failUnlessEqual(src_doc['sent_state'], 'error')
Exemplo n.º 19
0
 def setUp(self):
     TestCase.setUp(self)
     self._conductor = None
     raindrop.config.CONFIG = None
     self.config = self.make_config()
     opts = self.get_options()
     self.doc_model = get_doc_model()
     self.pipeline = raindrop.pipeline.Pipeline(self.doc_model, opts)
     self.prepare_test_db(self.config)
     self.pipeline.initialize()
Exemplo n.º 20
0
    def test_simple_rejected(self):
        src_doc = yield self._prepare_test_doc()
        server = FakeSMTPServer()
        server.responses["MAIL FROM:"] = "500 sook sook sook"

        client = self._get_post_client(src_doc, src_doc)
        _ = yield self.loopback(server, client)
        # now re-open the doc and check the state says 'error'
        src_doc = yield get_doc_model().db.openDoc(src_doc['_id'])
        self.failUnlessEqual(src_doc['sent_state'], 'error')
Exemplo n.º 21
0
 def test_outgoing_twice(self):
     doc_model = get_doc_model()
     src_doc = yield self._prepare_test_doc()
     conductor = yield self.get_conductor()
     nc = FakeSMTPServer.num_connections
     _ = yield conductor.sync(self.pipeline.options)
     self.failUnlessEqual(nc+1, FakeSMTPServer.num_connections)
     nc = FakeSMTPServer.num_connections
     # sync again - better not make a connection this time!
     _ = yield conductor.sync(self.pipeline.options)
     self.failUnlessEqual(nc, FakeSMTPServer.num_connections)
Exemplo n.º 22
0
 def test_simple(self):
     src_doc, out_doc = self._prepare_test_doc()
     self._send_test_doc(src_doc, out_doc)
     # now re-open the doc and check the state says 'sent'
     src_doc = get_doc_model().db.openDoc(src_doc['_id'])
     self.failUnlessEqual(src_doc['sent_state'], 'sent')
     self.failUnlessEqual(self.server.num_connections, 1) # must have connected to the test server.
     # check the protocol recorded the success
     status = self.acct.status
     self.failUnlessEqual(status.get('state'), Rat.GOOD, status)
     self.failUnlessEqual(status.get('what'), Rat.EVERYTHING, status)
Exemplo n.º 23
0
    def test_simple_connection_failed(self):
        def filter_log(rec):
            return "Out of disk space; try later" in rec.msg
        self.log_handler.ok_filters.append(filter_log)
        self.server.connection_made_resp = "452 Out of disk space; try later"

        src_doc, out_doc = self._prepare_test_doc()
        self._send_test_doc(src_doc, out_doc)
        # now re-open the doc and check the state says 'error'
        src_doc = get_doc_model().db.openDoc(src_doc['_id'])
        self.failUnlessEqual(src_doc['sent_state'], 'error')
Exemplo n.º 24
0
 def test_outgoing_twice(self):
     doc_model = get_doc_model()
     src_doc = self._prepare_test_doc()
     nc = self.server.num_connections
     self.ensure_pipeline_complete()
     self.failUnlessEqual(nc+1, self.server.num_connections)
     nc = self.server.num_connections
     # sync again - better not make a connection this time!
     # XXX - this isn't testing what it should - it *should* ensure
     # the pipeline does see the message again, but the conductor refusing
     # to re-send it due to the 'outgoing_state'.
     self.ensure_pipeline_complete()
     self.failUnlessEqual(nc, self.server.num_connections)
Exemplo n.º 25
0
    def test_one_testmsg_common(self):
        # Here we process 2 test messages which result in both messages
        # having an identity in common and one that is unique.  When we
        # process the second message we should notice the shared identity_id
        # is already associated with the contact we created first time round,
        # with the end result we still end up with a single contact, but now
        # have *three* identities for him
        self.test_one_testmsg()
        result = self.process_doc()
        # First determine the contact ID.
        key = ['schema_id', 'rd.contact']
        result = get_doc_model().open_view(key=key, reduce=False,
                                           include_docs=True)

        rows = result['rows']
        # Should be exactly 1 record with a 'contact' schema.
        self.failUnlessEqual(len(rows), 1, str(rows))
        key_type, cid = rows[0]['doc']['rd_key']
        self.failUnlessEqual(key_type, 'contact')

        # should be exact 3 rd.identity.contacts records, each pointing
        # at my contact.
        key = ['schema_id', 'rd.identity.contacts']
        result = get_doc_model().open_view(key=key,
                                           reduce=False,
                                           include_docs=True)

        rows = result['rows']
        self.failUnlessEqual(len(rows), 3, str(rows))
        docs = [r['doc'] for r in rows]
        for doc in docs:
            contacts = doc['contacts']
            self.failUnlessEqual(len(contacts), 1, contacts)
            this_id, this_rel = contacts[0]
            self.failUnlessEqual(this_id, cid)
            self.failUnless(this_rel in ['personal', 'public'], this_rel)

        self.verifyCounts(1, 3)
Exemplo n.º 26
0
 def prepare_corpus_environment(self, corpus_name):
     raindrop.config.CONFIG = None
     cd = self.get_corpus_dir(corpus_name)
     self.config = raindrop.config.init_config(os.path.join(cd, "raindrop"))
     # hack our couch server in
     dbinfo = self.config.couches['local']
     dbinfo['name'] = 'raindrop_test_suite'
     dbinfo['port'] = 5984
     opts = FakeOptions()
     self.doc_model = get_doc_model()
     self.pipeline = Pipeline(self.doc_model, opts)
     return self.prepare_test_db(self.config
         ).addCallback(lambda _: self.pipeline.initialize()
         )
Exemplo n.º 27
0
def retry_errors(parser, options):
    """Reprocess all conversions which previously resulted in an error."""
    g_pipeline.start_retry_errors()
    print "Error retry pipeline has finished..."
    # Now reset all outgoing items with a state of 'error' back to 'outgoing'
    dm = model.get_doc_model()
    result = dm.open_view(viewId="outgoing_by_state", key="error",
                          include_docs=True)
    docs = [r['doc'] for r in result['rows']]
    if docs:
        for d in docs:
            d['outgoing_state'] = 'outgoing'
        dm.update_documents(docs)
    print "Reset %d outgoing items for retry" % (len(docs),)
Exemplo n.º 28
0
    def test_one_testmsg_unique(self):
        # Here we process 2 test messages but none of the messages emit a
        # common identity ID.  The end result is we end up with 2 contacts;
        # one with 2 identities (from reusing test_one_testmsg), then a second
        # contact with only a single identity
        self.test_one_testmsg()
        result = self.process_doc(False)
        # First determine the 2 contact IDs.
        key = ['schema_id', 'rd.contact']
        result = get_doc_model().open_view(key=key, reduce=False,
                                           include_docs=True)

        rows = result['rows']
        # Should be exactly 2 records with a 'contact' schema.
        self.failUnlessEqual(len(rows), 2, pformat(rows))
        key_type, cid1 = rows[0]['doc']['rd_key']
        self.failUnlessEqual(key_type, 'contact')
        key_type, cid2 = rows[1]['doc']['rd_key']
        self.failUnlessEqual(key_type, 'contact')

        # should be exact 3 rd.identity.contacts records, each pointing
        # at my contact.
        key = ['schema_id', 'rd.identity.contacts']
        result = get_doc_model().open_view(key=key, reduce=False,
                                           include_docs=True)

        rows = result['rows']
        self.failUnlessEqual(len(rows), 3, str(rows))
        docs = [r['doc'] for r in rows]
        for doc in docs:
            contacts = doc['contacts']
            self.failUnlessEqual(len(contacts), 1, contacts)
            this_id, this_rel = contacts[0]
            self.failUnless(this_id in [cid1, cid2])
            self.failUnless(this_rel in ['personal', 'public'], this_rel)

        self.verifyCounts(2, 3)
Exemplo n.º 29
0
def add_schemas(result, parser, options, args):
    """Add one or more schema documents to the couch"""
    if not args:
        parser.error("You must supply filenames containing json for the docs")
    dm = model.get_doc_model()
    for arg in args:
        try:
            with open(arg) as f:
                try:
                    vals = json.load(f)
                except ValueError, why:
                    parser.error("file %r has invalid json: %s" % (arg, why))
        except IOError:
            parser.error("Failed to open json document %r" % arg)

        got = yield dm.create_schema_items([vals])
        print "Saved doc id %(id)r at rev %(rev)s" % got[0]
Exemplo n.º 30
0
    def test_simple_rejected(self):
        src_doc, out_doc = self._prepare_test_doc()
        def filter_log(rec):
            return "sook sook sook" in rec.msg
        self.log_handler.ok_filters.append(filter_log)
        self.server.responses["MAIL FROM:"] = "500 sook sook sook"

        self._send_test_doc(src_doc, out_doc)
        # now re-open the doc and check the state says 'error'
        src_doc = get_doc_model().db.openDoc(src_doc['_id'])
        self.failUnlessEqual(src_doc['sent_state'], 'error')

        # check the protocol recorded the error.
        status = self.acct.status
        self.failUnlessEqual(status.get('state'), Rat.BAD, status)
        self.failUnlessEqual(status.get('what'), Rat.SERVER, status)
        self.failUnless('sook' in status.get('message', ''), status)