Exemplo n.º 1
0
    def setup(self):
        from django.conf import settings
        if self.should_skip_test_setup():
            return

        log.info("overridding the couchdbkit database settings to use a test database!")

        # first pass: just implement this as a monkey-patch to the loading module
        # overriding all the existing couchdb settings
        databases = getattr(settings, "COUCHDB_DATABASES", [])

        # Convert old style to new style
        if isinstance(databases, (list, tuple)):
            databases = dict(
                (app_name, {'URL': uri}) for app_name, uri in databases
            )

        self.dbs = dict(
            (app, self.get_test_db(db)) for app, db in databases.items()
        )

        old_handler = loading.couchdbkit_handler
        couchdbkit_handler = loading.CouchdbkitHandler(self.dbs)
        loading.couchdbkit_handler = couchdbkit_handler
        loading.register_schema = couchdbkit_handler.register_schema
        loading.get_schema = couchdbkit_handler.get_schema
        loading.get_db = couchdbkit_handler.get_db

        # register our dbs with the extension document classes
        for app, value in old_handler.app_schema.items():
            for name, cls in value.items():
                cls.set_db(loading.get_db(app))

        sys.__stdout__.write("\n")  # newline for creating database message
        super(HqdbContext, self).setup()
Exemplo n.º 2
0
def _process_form(request, domain, app_id, user_id, authenticated,
                  auth_cls=AuthContext):
    instance, attachments = couchforms.get_instance_and_attachment(request)
    response = couchforms.SubmissionPost(
        instance=instance,
        attachments=attachments,
        domain=domain,
        app_id=app_id,
        auth_context=auth_cls(
            domain=domain,
            user_id=user_id,
            authenticated=authenticated,
        ),
        location=couchforms.get_location(request),
        received_on=couchforms.get_received_on(request),
        date_header=couchforms.get_date_header(request),
        path=couchforms.get_path(request),
        submit_ip=couchforms.get_submit_ip(request),
        last_sync_token=couchforms.get_last_sync_token(request),
        openrosa_headers=couchforms.get_openrosa_headers(request),
    ).get_response()
    if response.status_code == 400:
        db_response = get_db('couchlog').save_doc({
            'request': unicode(request),
            'response': unicode(response),
        })
        logging.error('Status code 400 for a form submission. '
                      'See couchlog db for more info: %s' % db_response['id'])
    return response
Exemplo n.º 3
0
    def extract(self, mapping, limit=None, date_range=None, status_callback=None):
        """
        Extract data from a CouchDb view into SQL
        """
        startkey, endkey = self.get_couch_keys(mapping, date_range=date_range)

        db = get_db(mapping.database) if mapping.database else self.db
        result = self.get_couch_rows(mapping.couch_view, startkey, endkey, db=db, limit=limit)

        total_rows = result.total_rows
        rows_with_value = 0
        if total_rows > 0:
            logger.info("Total rows: %d", total_rows)

            if status_callback:
                status_callback = functools.partial(status_callback, total_rows)

            rows = self.couch_rows_to_sql_rows(result, mapping, status_callback=status_callback)
            if limit:
                rows = list(rows)
                rows_with_value = len(rows)

            munged_rows = self.combine_rows(rows, mapping, chunksize=(limit or 250))
            self.write_rows_to_sql(munged_rows, mapping)

        return total_rows, rows_with_value
Exemplo n.º 4
0
def load_db(fileobj, dbname, ignore_errors=False):
    db = get_db(dbname)

    for headers, is_multipart, payload in read_multipart(fileobj):
        docid = headers['content-id']

        if is_multipart: # doc has attachments
            for headers, _, payload in payload:
                if 'content-id' not in headers:
                    doc = json.loads(payload)
                    doc['_attachments'] = {}
                else:
                    doc['_attachments'][headers['content-id']] = {
                        'data': b64encode(payload),
                        'content_type': headers['content-type'],
                        'length': len(payload)
                    }

        else: # no attachments, just the JSON
            doc = json.loads(payload)

        del doc['_rev']
        print>>sys.stderr, 'Loading document %r' % docid
        try:
            db[docid] = doc
        except Exception, e:
            if not ignore_errors:
                raise
            print>>sys.stderr, 'Error: %s' % e
Exemplo n.º 5
0
def sync_design_docs(temp=None):
    dir = os.path.abspath(os.path.dirname(__file__))
    for pillow in import_pillows(instantiate=False):
        if hasattr(pillow, 'indicator_class'):
            app_label = pillow.indicator_class._meta.app_label
            db = get_db(app_label)
            sync_docs.sync_design_docs(db, os.path.join(dir, "_design"), FLUFF, temp=temp)
Exemplo n.º 6
0
def delete_pl(sender, instance=None, **kwargs):
    """
    callback to signal "pre-delete",
    delete records in couuchdb. 
    
    Note: Obviously a way to chain views on CouchDB 
    side would be better but waiting that just get 
    results using _all_docs and do bulk delete at
    the end.
    """
    db = get_db('aimpl')
    docs = []
    # get all revisions of this pl
    for row in db.all_docs(startkey=instance.path, include_docs=True):
        docs.append(row['doc'])
        # get all sections
        sections = db.all_docs(keys=row['doc']['section_ids'], include_docs=True)
        for sec in sections:
            docs.append(sec["doc"])
            # get all pb blocks
            pblocks = db.all_docs(keys=sec["doc"]["pblock_ids"], include_docs=True)
            for block in pblocks:
                docs.append(block["doc"])
        # get all web remarks
        remarks = db.view("aimpl/web_remarks")
        for remark in remarks:
            docs.append(remark['value'])
    
    db.bulk_delete(docs,  all_or_nothing=True)
Exemplo n.º 7
0
def iter_couch_audit_events(params, chunksize=10000):
    if not (params.get("start_date") or params.get("user")):
        raise NotImplementedError("auditcare queries on Couch have not "
                                  "been designed for unbounded queries")
    if params.get("start_date"):
        sql_start = get_sql_start_date()
        if params["start_date"] > sql_start:
            return
    db = get_db("auditcare")
    if "user" in params:
        view_name = "auditcare/urlpath_by_user_date"
    else:
        view_name = "auditcare/all_events"
    startkey, endkey = _get_couch_view_keys(**params)
    doc_ids = {
        r["id"]
        for r in db.view(
            view_name,
            startkey=startkey,
            endkey=endkey,
            reduce=False,
            include_docs=False,
        )
    }
    for doc in iter_docs(db, doc_ids, chunksize=chunksize):
        yield CouchAuditEvent(doc)
Exemplo n.º 8
0
def save_couch_doc(doc_type, user, **doc):
    db = get_db("auditcare")
    doc.update(doc_type=doc_type,
               user=user,
               _id=uuid4().hex,
               base_type="AuditEvent")
    return db.save_doc(doc)["id"]
Exemplo n.º 9
0
    def teardown(self):
        if self.should_skip_test_setup():
            return

        self.blob_db.close()
        if self.optimize_migrations:
            self.optimizer.__exit__(None, None, None)

        if self.skip_teardown_for_reuse_db:
            return

        # delete couch databases
        deleted_databases = []
        for app, uri in self.apps:
            if uri in deleted_databases:
                continue
            app_label = app.split('.')[-1]
            db = loading.get_db(app_label)
            try:
                db.server.delete_db(db.dbname)
                deleted_databases.append(uri)
                log.info("deleted database %s for %s", db.dbname, app_label)
            except ResourceNotFound:
                log.info("database %s not found for %s! it was probably already deleted.",
                         db.dbname, app_label)

        # HACK clean up leaked database connections
        from corehq.sql_db.connections import connection_manager
        connection_manager.dispose_all()

        super(HqdbContext, self).teardown()
Exemplo n.º 10
0
 def get_db(cls):
     db = getattr(cls, '_db', None)
     if db is None:
         app_label = getattr(cls._meta, "app_label")
         db = get_db(app_label)
         cls._db = db
     return db
Exemplo n.º 11
0
def load_db(fileobj, dbname, ignore_errors=False):
    db = get_db(dbname)

    for headers, is_multipart, payload in read_multipart(fileobj):
        docid = headers['content-id']

        if is_multipart:  # doc has attachments
            for headers, _, payload in payload:
                if 'content-id' not in headers:
                    doc = json.loads(payload)
                    doc['_attachments'] = {}
                else:
                    doc['_attachments'][headers['content-id']] = {
                        'data': b64encode(payload),
                        'content_type': headers['content-type'],
                        'length': len(payload)
                    }

        else:  # no attachments, just the JSON
            doc = json.loads(payload)

        del doc['_rev']
        print >> sys.stderr, 'Loading document %r' % docid
        try:
            db[docid] = doc
        except Exception, e:
            if not ignore_errors:
                raise
            print >> sys.stderr, 'Error: %s' % e
Exemplo n.º 12
0
 def setUp(self):
     self.db = get_db('couchexport')
     self.custom_export = SavedExportSchema.wrap({
         'type':
         'demo',
         'default_format':
         Format.JSON,
         'index':
         json.dumps(['test_custom']),
         'tables': [{
             'index':
             '#',
             'display':
             'Export',
             'columns': [{
                 'index': 'multi',
                 'display': 'Split',
                 'doc_type': 'SplitColumn',
                 'options': ['a', 'b', 'c', 'd']
             }],
         }]
     })
     self.custom_export.filter_function = SerializableFunction()
     self.schema = [{
         '#export_tag': ['string'],
         'tag': 'string',
         'multi': 'string'
     }]
Exemplo n.º 13
0
    def teardown(self):
        if self.should_skip_test_setup():
            return

        self.blob_db.close()

        # delete couch databases
        deleted_databases = []
        skipcount = 0
        for app in self.apps:
            app_label = app.split('.')[-1]
            db = loading.get_db(app_label)
            if db.dbname in deleted_databases:
                skipcount += 1
                continue
            try:
                db.server.delete_db(db.dbname)
                deleted_databases.append(db.dbname)
                log.info("deleted database %s for %s", db.dbname, app_label)
            except ResourceNotFound:
                log.info("database %s not found for %s! it was probably already deleted.", db.dbname, app_label)
        if skipcount:
            log.info("skipped deleting %s app databases that were already deleted", skipcount)

        # HACK clean up leaked database connections
        from corehq.sql_db.connections import connection_manager
        connection_manager.dispose_all()

        super(HqdbContext, self).teardown()
Exemplo n.º 14
0
 def get_db(cls):
     db = getattr(cls, '_db', None)
     if db is None:
         app_label = getattr(cls._meta, "app_label")
         db = get_db(app_label)
         cls._db = db
     return db
Exemplo n.º 15
0
class Group(Document):
    _db = get_db("couchauth")
    name = schema.StringProperty()
    users = schema.ListProperty()
    max_loan_days = schema.IntegerProperty()
    get_id = property(lambda self: self['_id'])

    def add_user(self, user_id):
        if user_id not in self.users:
            self.users.append(user_id)
            self.save()
        return True

    def del_user(self, user_id):
        if user_id in self.users:
            del (self.users[self.users.index(user_id)])
            self.save()
        return True

    def update_users(self, users):
        for user_id in self.users:
            if user_id not in users:
                user = User.get(user_id)
                user.del_group(self._id)
        for user_id in users:
            user = User.get(user_id)
            user.add_group(self._id)

    def id(self):
        return self._id
Exemplo n.º 16
0
    def teardown(self):
        if self.should_skip_test_setup():
            return

        self.blob_db.close()
        if self.optimize_migrations:
            self.optimizer.__exit__(None, None, None)

        if self.skip_teardown_for_reuse_db:
            return

        # delete couch databases
        deleted_databases = []
        for app, uri in self.apps:
            if uri in deleted_databases:
                continue
            app_label = app.split('.')[-1]
            db = loading.get_db(app_label)
            try:
                db.server.delete_db(db.dbname)
                deleted_databases.append(uri)
                log.info("deleted database %s for %s", db.dbname, app_label)
            except ResourceNotFound:
                log.info(
                    "database %s not found for %s! it was probably already deleted.",
                    db.dbname, app_label)

        # HACK clean up leaked database connections
        from corehq.sql_db.connections import connection_manager
        connection_manager.dispose_all()

        super(HqdbContext, self).teardown()
Exemplo n.º 17
0
def _get_couch_docs(start_key, end_key, batch_size, start_doc_id=None):
    db = get_db("auditcare")
    if start_doc_id:
        kwargs = {"startkey_docid": start_doc_id, "skip": 1}
    else:
        # We are incrementing seconds by one for the first call
        # because matching records were not returned in descending order
        # due to the key structure: [2020, 2, 2, 0, 0, 0] comes after
        # [2020, 2, 2, 0, 0, 0, "AccessAudit", "system"] when descending
        # Records matching end_key will be start_key of another batch and may cause race conditions
        # We are adding 1 to end_key to avoid querying them.
        assert len(start_key) == 6, start_key
        assert len(end_key) == 6, end_key
        start_key[5] += 1
        end_key[5] += 1
        kwargs = {}
    result = db.view(
        "auditcare/all_events",
        startkey=start_key,
        endkey=end_key,
        reduce=False,
        include_docs=True,
        descending=True,
        limit=batch_size,
        **kwargs
    )
    return list(result)
Exemplo n.º 18
0
 def prime_everything(self, pool, verbose=False):
     unique_dbs = get_unique_dbs()
     for app in unique_dbs:
         try:
             db = get_db(app)
             design_docs = db.view(DESIGN_DOC_VIEW,
                                   startkey=DESIGN_SK,
                                   endkey=DESIGN_EK,
                                   include_docs=True).all()
             for res in design_docs:
                 design_doc = res['doc']
                 design_doc_name = design_doc['_id'].split('/')[
                     -1]  # _design/app_name
                 if design_doc_name.endswith('-tmp'):
                     #it's a dangling -tmp preindex view, skip
                     continue
                 else:
                     views = design_doc.get('views', {})
                     #get the first view
                     for view_name in views.keys():
                         pool.spawn(do_prime,
                                    app,
                                    design_doc_name,
                                    view_name,
                                    verbose=verbose)
                         break
         except Exception, ex:
             #print "Got an exception but ignoring: %s" % ex
             pass
Exemplo n.º 19
0
 def get_db(cls):
     """Makes damn sure that we get the correct DB for this particular app
     If cls._db has been set by a superclass, then the super method is
     going to grab the wrong db without this."""
     app_label = getattr(cls._meta, "app_label")
     db = get_db(app_label)
     cls._db = db
     return db
Exemplo n.º 20
0
 def recalculate_grains(self, grains, database):
     """
     Query CouchDB to get the updated value for the grains.
     """
     result = []
     for grain in grains:
         result.extend(self.get_couch_rows(fluff_view, grain, grain + [{}], db=get_db(database)))
     return result
Exemplo n.º 21
0
def couch_count(username):
    return get_db("auditcare").view(
        "auditcare/urlpath_by_user_date",
        startkey=[username],
        endkey=[username, {}],
        reduce=False,
        include_docs=False,
    ).count()
Exemplo n.º 22
0
 def get_db(cls):
     """Makes damn sure that we get the correct DB for this particular app
     If cls._db has been set by a superclass, then the super method is
     going to grab the wrong db without this."""
     app_label = getattr(cls._meta, "app_label")
     db = get_db(app_label)
     cls._db = db
     return db
Exemplo n.º 23
0
    def handle(self, *args, **options):
        for dbname in ('couchflow', 'couchauth',
                 'couchsessions', 'circulation'):
            db  = get_db(dbname)
            path = settings.PROJECT_PATH + '/apps/'+dbname+'/_design/' 
            loader = FileSystemDocsLoader(path)
            loader.sync(db)

            sys.stdout.write('Successfully updated "%s"\n' % dbname)
Exemplo n.º 24
0
    def get_db(self):
        from django.conf import settings
        if self._db is None:
            DB_PAGES = getattr(settings, "DB_PAGES")
            if not DB_PAGES:
                raise AttributeError("DB_PAGES isn't set.")
            self._db = loading.get_db(DB_PAGES)

        return self._db
Exemplo n.º 25
0
def _process_form(request, domain, app_id, user_id, authenticated,
                  auth_cls=AuthContext):
    try:
        instance, attachments = couchforms.get_instance_and_attachment(request)
    except MultimediaBug as e:
        try:
            instance = request.FILES[MAGIC_PROPERTY].read()
            xform = convert_xform_to_json(instance)
            meta = xform.get("meta", {})
        except:
            meta = {}

        details = {
            "domain": domain,
            "app_id": app_id,
            "user_id": user_id,
            "authenticated": authenticated,
            "form_meta": meta,
        }
        log_counter(MULTIMEDIA_SUBMISSION_ERROR_COUNT, details)
        notify_exception(None, "Received a submission with POST.keys()", details)
        return HttpResponseBadRequest(e.message)

    app_id, build_id = get_app_and_build_ids(domain, app_id)
    response = SubmissionPost(
        instance=instance,
        attachments=attachments,
        domain=domain,
        app_id=app_id,
        build_id=build_id,
        auth_context=auth_cls(
            domain=domain,
            user_id=user_id,
            authenticated=authenticated,
        ),
        location=couchforms.get_location(request),
        received_on=couchforms.get_received_on(request),
        date_header=couchforms.get_date_header(request),
        path=couchforms.get_path(request),
        submit_ip=couchforms.get_submit_ip(request),
        last_sync_token=couchforms.get_last_sync_token(request),
        openrosa_headers=couchforms.get_openrosa_headers(request),
    ).get_response()
    if response.status_code == 400:
        db_response = get_db('couchlog').save_doc({
            'request': unicode(request),
            'response': unicode(response),
        })
        logging.error(
            'Status code 400 for a form submission. '
            'Response is: \n{0}\n'
            'See couchlog db for more info: {1}'.format(
                unicode(response),
                db_response['id'],
            )
        )
    return response
Exemplo n.º 26
0
 def teardown(self):
     """Enable database access"""
     from django.conf import settings
     settings.DB_ENABLED = self.original_db_enabled
     for cls in self.db_classes:
         db = loading.get_db(cls._meta.app_label)
         cls.set_db(db)
     couchlog.signals.got_request_exception.connect(
         couchlog.signals.log_request_exception)
     self.db_patch.stop()
Exemplo n.º 27
0
Arquivo: artist.py Projeto: lidel/mmda
def get_recent_artists():
    """
    Get recently cached artists required by mmda.artists.index

    @return: a list of dicts with artist name and mbid
    """
    view = get_db('artists').view('artists/recent_artists', limit=10, descending=True)
    recent_artists = [group['value'] for group in view.all()]

    return recent_artists
Exemplo n.º 28
0
def sync_design_docs(temp=None):
    dir = os.path.abspath(os.path.dirname(__file__))
    for pillow in import_pillows(instantiate=False):
        if hasattr(pillow, 'indicator_class'):
            app_label = pillow.indicator_class._meta.app_label
            db = get_db(app_label)
            sync_docs.sync_design_docs(db,
                                       os.path.join(dir, "_design"),
                                       FLUFF,
                                       temp=temp)
Exemplo n.º 29
0
class CirculationLog(Document):
    _db = get_db("couchflow")

    type = schema.StringProperty(choices=['loan', 'return', 'renew'])
    loan_type = schema.StringProperty(choices=['room', 'home', 'interbiblio'])
    item_type = schema.StringProperty()
    date = schema.DateProperty(default=datetime.date.today)
    length = schema.IntegerProperty()
    item_id = schema.StringProperty()
    user_id = schema.StringProperty()
    timestamp_added = schema.DateTimeProperty(default=datetime.datetime.now)
Exemplo n.º 30
0
 def _get_designs(self):
     designs = []
     for pillow in get_all_pillow_classes():
         if hasattr(pillow, 'indicator_class'):
             app_label = pillow.indicator_class._meta.app_label
             designs.append(DesignInfo(
                 app_label=self.app_label,
                 db=get_db(app_label),
                 design_path=os.path.join(self.dir, "_design")
             ))
     return designs
Exemplo n.º 31
0
def do_prime(app_label, design_doc_name, view_name, verbose=False):
    db = get_db(app_label)
    try:
        list(db.view('%s/%s' % (design_doc_name, view_name), limit=0))
        if verbose:
            sys.stdout.write('.')
            sys.stdout.flush()
    except ResourceNotFound:
        if verbose:
            sys.stdout.write('!=>%s/%s/%s' % (app_label, design_doc_name, view_name))
            sys.stdout.flush()
Exemplo n.º 32
0
def do_prime(app_label, design_doc_name, view_name, verbose=False):
    db = get_db(app_label)
    try:
        list(db.view('%s/%s' % (design_doc_name, view_name), limit=0))
        if verbose:
            sys.stdout.write('.')
            sys.stdout.flush()
    except ResourceNotFound:
        if verbose:
            sys.stdout.write('!=>%s/%s/%s' % (app_label, design_doc_name, view_name))
            sys.stdout.flush()
Exemplo n.º 33
0
 def _teardown_couchdb(self):
     deleted_databases = []
     for app, url in getattr(settings, "COUCHDB_DATABASES", []):
         app_label = app.split('.')[-1]
         db = loading.get_db(app_label)
         if db.dbname in deleted_databases:
             continue
         try:
             db.server.delete_db(db.dbname)
             deleted_databases.append(db.dbname)
         except ResourceNotFound:
             pass
Exemplo n.º 34
0
 def _get_designs(self):
     from fluff.pillow import get_fluff_pillow_configs
     designs = []
     for config in get_fluff_pillow_configs():
         pillow = config.get_instance()
         app_label = pillow.indicator_class._meta.app_label
         designs.append(DesignInfo(
             app_label=self.app_label,
             db=get_db(app_label),
             design_path=os.path.join(self.dir, "_design")
         ))
     return designs
Exemplo n.º 35
0
Arquivo: artist.py Projeto: lidel/mmda
def get_artist_primary_releases(mbid):
    """
    Get a list of official, primary releases by an artist specified by MusicBrainzID.

    @param mbid:    a string containing a MusicBrainz ID of an artist

    @return: a list of dicts with basic primary release meta-data
    """
    view = get_db('artists').view('artists/release_groups', key=mbid)
    primary_releases = [group['value'] for group in view.all()]

    return primary_releases
Exemplo n.º 36
0
def create_couch_user(username, password):
    db = get_db("couchauth")
    User.set_db(db)
    users = User.view("couchauth/username")
    query = users[username]
    user = query.one()
    if not user:
        user = User()
        user.username = username
        user.set_password(password)
        user.save()
    return user
Exemplo n.º 37
0
 def _get_designs(self):
     from fluff.pillow import get_fluff_pillow_configs
     designs = []
     for config in get_fluff_pillow_configs():
         pillow = config.get_instance()
         for processor in pillow.processors:
             app_label = processor.indicator_class._meta.app_label
             designs.append(
                 DesignInfo(app_label=self.app_label,
                            db=get_db(app_label),
                            design_path=os.path.join(self.dir, "_design")))
     return designs
Exemplo n.º 38
0
Arquivo: utils.py Projeto: fk-lx/mygpo
def sync_design_docs():
    """ synchronize the design docs for all databases """

    base_dir = settings.BASE_DIR

    for part, label in settings.COUCHDB_DDOC_MAPPING.items():
            path = os.path.join(base_dir, '..', 'couchdb', part, '_design')

            logger.info('syncing ddocs for "%s" from "%s"', label, path)

            db = loading.get_db(label)
            loader = FileSystemDocsLoader(path)
            loader.sync(db, verbose=True)
Exemplo n.º 39
0
def get_db_for_instance(instance):
    """
    Get the database for a model instance, even if it's not
    an explicit couch model, based on the definition in
    the app's settings.  
    
    Returns None if no database is found.
    """
    content_type = ContentType.objects.get_for_model(instance)
    try:
        return get_db(content_type.app_label)
    except KeyError:
        return None
Exemplo n.º 40
0
Arquivo: artist.py Projeto: lidel/mmda
def get_artist_best_pictures(mbid):
    """
    Get artist pictures required by mmda.artists.show_artist

    @param mbid:    a string containing a MusicBrainz ID of an artist

    @return: a list of dicts with artist picture meta-data
    """
    view = get_db('pictures').view('pictures/best_pictures', key=mbid, limit=4)
    best_pictures = [group['value'] for group in view.all()]

    shuffle(best_pictures)

    return best_pictures
Exemplo n.º 41
0
def get_attach(request, db_name, document_id, name):
    """
    returns a file from couchdb
    """
    # TODO: move out of couchflow to somewhere more generic
    try:
        db = get_db(db_name)
        response = db.fetch_attachment(document_id, name, stream=True)
        mimetype = response.resp.headers.get('Content-Type')
    except (KeyError, ResourceNotFound):
        _path = settings.MEDIA_ROOT + '/images/book.jpg'
        response = open(_path)
        mimetype = "image/jpeg"
    return HttpResponse(response, mimetype=mimetype)
Exemplo n.º 42
0
    def handle(self, *args, **options):
        dbs = ("couchauth", "couchflow", "couchsearch", "circulation", "couchsessions", "config")
        dbs_dir = options["dbs_dir"]

        uris = []
        for dbname in dbs:
            db = get_db(dbname)
            uriname = db.uri.rsplit("/", 1)[1]
            if uriname in uris:
                continue
            uris.append(uriname)
            with open(os.path.join(dbs_dir, uriname + ".json"), "w") as fileobj:
                dump_db(db, output=fileobj)
            sys.stdout.write('Successfully dumped "%s"\n' % uriname)
Exemplo n.º 43
0
Arquivo: views.py Projeto: GaloC/gplib
def get_attach(request, db_name, document_id, name):
    """
    returns a file from couchdb
    """
    # TODO: move out of couchflow to somewhere more generic
    try:
        db = get_db(db_name)
        response = db.fetch_attachment(document_id, name, stream=True)
        mimetype = response.resp.headers.get('Content-Type')
    except (KeyError, ResourceNotFound):
        _path = settings.MEDIA_ROOT + '/images/book.jpg'
        response = open(_path)
        mimetype = "image/jpeg"
    return HttpResponse(response, mimetype=mimetype)
Exemplo n.º 44
0
Arquivo: views.py Projeto: lidel/mmda
def show_artist_refresh(request, uri_artist, mbid):
    """
    Show reset page of an artist specified by mbid.

    If request contains POST data, perform reset.

    @param mbid:        a string containing a MusicBrainz ID of an artist
    @param uri_artist:  a string containing SEO-friendly artist name

    @return: a rendered artist page
    """
    if request.POST:
        mbid            = request.POST['mbid']
        reset           = request.POST.getlist('reset')

        rc_ip           = request.META['REMOTE_ADDR']
        rc_token        = request.POST['recaptcha_challenge_field']
        rc_input        = request.POST['recaptcha_response_field']
        captcha = rc.submit(rc_token, rc_input, settings.RECAPTCHA_PRIV_KEY, rc_ip)

        if captcha.is_valid:
            delete_memcached_keys(get_basic_artist(mbid))
            for db in reset:
                try:
                    del get_db(db)[mbid]
                except:
                    continue
            if 'artists' in reset:
                release_groups = get_db('artists').view('artists/release_groups', key=mbid)
                for group in release_groups:
                    del get_db('artists')[group['id']]
            # TODO: remove 'lastfm' from artist cache_state if pictures were removed.

    artist              = get_basic_artist(mbid)
    artist_cache_state  = get_artist_cache_state(mbid)
    html_for_captcha    = get_captcha_html(rc.API_SERVER)
    return render_to_response('artists/show_artist_refresh.html', locals())
Exemplo n.º 45
0
class Session(Document):
    _db = get_db("couchsessions")
    session_key = StringProperty()
    session_data = StringProperty()
    expire_date = DateTimeProperty()

    @classmethod
    def get_session(cls, session_key):
        session = cls.view('couchsessions/sessions_by_key',
                           key=session_key,
                           include_docs=True)
        try:
            return session.first()
        except ResourceNotFound:
            return None
Exemplo n.º 46
0
Arquivo: utils.py Projeto: fk-lx/mygpo
def view_cleanup():
    """ do a view-cleanup for all databases """

    logger.info('Doing view cleanup for all databases')
    for label in settings.COUCHDB_DDOC_MAPPING.values():
        logger.info('Doing view cleanup for database "%s"', label)
        db = loading.get_db(label)
        res = db.view_cleanup()

        if res.get('ok', False):
            log = logger.info
        else:
            log = logger.warn

        log('Result of view cleanup for database "%s": %s', label, res)
Exemplo n.º 47
0
class Conector(Document):
    _db = get_db("couchflow")
    workflow_id = schema.StringProperty()
    conector_type = schema.BooleanProperty(default=True)
    name = schema.StringProperty(default="Conector")
    status = schema.StringProperty()
    step = schema.IntegerProperty(default=0)
    is_clone = schema.BooleanProperty(default=False)
    start = schema.BooleanProperty(default=True)
    end = schema.BooleanProperty(default=False)
    active = schema.BooleanProperty()
    previous_tasks = schema.ListProperty()
    next_tasks = schema.ListProperty()
    get_id = property(lambda self: self['_id'])
    wfitems_ids = schema.ListProperty()

    def put_step(self):
        if not self.previous_tasks:
            self.step = 1
            self.save()
            workflow = WorkFlow.get(self.workflow_id)
            workflow.steps = self.step
            return True
        prev_task = get_task(self.previous_tasks[0])
        self.step = prev_task.step + 1
        workflow = WorkFlow.get(self.workflow_id)
        workflow.steps = self.step
        workflow.save()
        self.save()
        return True

    def back_to_top(self):
        previous_tasks = []
        if self.previous_tasks:
            for prev_task_id in self.previous_tasks:
                prev_task = Task.get(prev_task_id)
                previous_tasks.append(prev_task)
            return previous_tasks
        return None

    def go_deep(self):
        next_tasks = []
        if self.next_tasks:
            for next_task_id in self.next_tasks:
                next_task = get_task(next_task_id)
                next_tasks.append(next_task)
            return next_tasks
        return None
Exemplo n.º 48
0
 def setUp(self):
     self.db = get_db('couchexport')
     self.custom_export = SavedExportSchema.wrap({
         'type': 'demo',
         'default_format': Format.JSON,
         'index': json.dumps(['test_custom']),
         'tables': [{
             'index': '#',
             'display': 'Export',
             'columns': [
                 {'index': 'multi', 'display': 'Split', 'doc_type': 'SplitColumn', 'options': ['a', 'b', 'c', 'd']}
             ],
         }]
     })
     self.custom_export.filter_function = SerializableFunction()
     self.schema = [{'#export_tag': [u'string'], 'tag': u'string', 'multi': u'string'}]
Exemplo n.º 49
0
    def handle(self, *args, **options):
        dbs = ('couchauth', 'couchflow', 'couchsearch', 'circulation',
               'couchsessions', 'config')
        dbs_dir = options["dbs_dir"]

        uris = []
        for dbname in dbs:
            db = get_db(dbname)
            uriname = db.uri.rsplit("/", 1)[1]
            if uriname in uris:
                continue
            uris.append(uriname)
            with open(os.path.join(dbs_dir, uriname + ".json"),
                      "w") as fileobj:
                dump_db(db, output=fileobj)
            sys.stdout.write('Successfully dumped "%s"\n' % uriname)
Exemplo n.º 50
0
    def scrub_legacy_couch_events(self, username):
        from couchdbkit.ext.django.loading import get_db
        from corehq.util.couch import DocUpdate, iter_update
        from corehq.util.log import with_progress_bar

        def update_username(event_dict):
            event_dict['user'] = self.new_username
            return DocUpdate(doc=event_dict)

        db = get_db("auditcare")
        results = db.view(
            'auditcare/urlpath_by_user_date',
            startkey=[username],
            endkey=[username, {}],
            reduce=False,
            include_docs=False,
        )
        doc_ids = {r["id"] for r in results}
        iter_update(db, update_username, with_progress_bar(doc_ids))
Exemplo n.º 51
0
def search(view, **params):
    ''' Ugly hack to support new couchdb-lucene search '''

    dbname = 'couchsearch'
    if 'database' in params:
        dbname = params.pop('database')
    db1 = get_db(dbname)
    db = db1.server.get_or_create_db(db1.uri.rsplit("/", 1)[-1])
    uri = db.res.uri

    if not uri.endswith('/'):
        db.res.uri = uri.rsplit('/', 1)[0] + '/'

    db_name = db.uri.rsplit('/', 1)[1]

    if settings.OLD_COUCH:
        view = '%s/_fti/_design/%s' % (db_name, view)
    else:
        view = '_fti/local/%s/_design/%s' % (db_name, view)
    return View(db, view)(**params)
Exemplo n.º 52
0
def _process_form(request,
                  domain,
                  app_id,
                  user_id,
                  authenticated,
                  auth_cls=AuthContext):
    instance, attachments = couchforms.get_instance_and_attachment(request)
    app_id, build_id = get_app_and_build_ids(domain, app_id)
    response = couchforms.SubmissionPost(
        instance=instance,
        attachments=attachments,
        domain=domain,
        app_id=app_id,
        build_id=build_id,
        auth_context=auth_cls(
            domain=domain,
            user_id=user_id,
            authenticated=authenticated,
        ),
        location=couchforms.get_location(request),
        received_on=couchforms.get_received_on(request),
        date_header=couchforms.get_date_header(request),
        path=couchforms.get_path(request),
        submit_ip=couchforms.get_submit_ip(request),
        last_sync_token=couchforms.get_last_sync_token(request),
        openrosa_headers=couchforms.get_openrosa_headers(request),
    ).get_response()
    if response.status_code == 400:
        db_response = get_db('couchlog').save_doc({
            'request':
            unicode(request),
            'response':
            unicode(response),
        })
        logging.error('Status code 400 for a form submission. '
                      'Response is: \n{0}\n'
                      'See couchlog db for more info: {1}'.format(
                          unicode(response),
                          db_response['id'],
                      ))
    return response
Exemplo n.º 53
0
    def handle(self, *args, **options):
        if len(args) != 2:
            print "Usage: split_dbs [params] source target"
            return False

        if options['server']:
            server = Server(options['server'])
            print "Using user specified server"
        else:
            server = get_db("couchflow").server
            print "Using default couchflow server (override with --server)"
        print "Server uri:", server.uri

        params = {}
        qparams = {}

        if options['couchflow']:
            print "Couchflow pass"
            qparams['couchflow'] = '1'
        if options['no_couchflow']:
            print "No couchflow pass"
            qparams['couchflow'] = '0'
        if options['no_clones']:
            print "Filter out clones"
            qparams['no_clones'] = '1'

        if 'couchflow' in qparams or 'no_clones' in qparams:
            print "Using filter"
            params['filter'] = 'filters/couchflow'
            params['query_params'] = qparams

        if options['create_target']:
            params['create_target'] = True

        print "Starting replication"
        print "Check %s/_utils/status.html for status" % server.uri
        server.replicate(*args, **params)
        print "Done"
Exemplo n.º 54
0
class WFItem(Document):
    """
    WFItem could be any material of the system, like a book or a movie
    """
    _db = get_db("couchflow")
    name = schema.StringProperty()
    item_type = schema.StringProperty()
    is_clone = schema.BooleanProperty(default=False)

    fields_properties = schema.SchemaDictProperty(Fields)

    # circulation
    loan = schema.SchemaProperty(Loan)
    reserves = schema.SchemaListProperty(Reserve)

    # Unified Resource Name
    # We use a sum of fields to generate urn field
    urn_config = schema.SchemaListProperty(UrnConfig)

    # Not every item in the system can be loanable
    loanable = schema.BooleanProperty(default=True)

    # Order Number to track orders between workflows
    order_nbr = schema.IntegerProperty()

    # Reference Item
    # if it's a reference, it is used to complete fields from other items
    reference = schema.BooleanProperty(default=False)

    comments = schema.StringProperty()

    @property
    def urn(self):
        """
        Returns the urn based on urn_config property
        or None if it have not a valid urn
        """

        return get_urn(self)

    @property
    def inventory_nbr(self):
        """
        Returns inventory number (876_a) or None
        """

        field = self.get_field("876", "a")
        if field:
            return field[0]

    @property
    def title(self):
        """
        Returns item title, here for consistency
        """

        title = self.get_field("245", "a")
        if not title:
            title = "Unknown title"
        else:
            title = title[0]
        return title

    def get_field(self, field, subfield=None, first=True):
        """
        Helper that returns field or subfield, or None if can't find it
        """
        return get_field(self, field, subfield, first)

    def get_info(self):
        fields = []
        for k in self.fields_properties:
            field_prop = self.fields_properties[k]
            # TODO: support to more fields
            first_field_prop = field_prop.first
            if first_field_prop['exec_value']:
                value = first_field_prop['exec_value']
                k = k + ' - ' + field_prop['field_name']
                field = (k, value)
                fields.append(field)

        return fields

    def show_fields_properties(self):
        """
        returns [(sequence number, Fields), ...]
        """
        return enumerate(
            sorted(self.fields_properties.values(),
                   key=lambda x: x.id and int(x.id)))

    def fields_properties_items_sorted(self):
        return sorted(self.fields_properties.items())

    def check_form(self, post, task=None):
        errors = []

        def validate_field(number, value, field_type):
            """
            spec is the DocumentSchema for either Field or SubField
            """

            if field_type == 'int':
                if not value.isdigit():
                    errors.append([number, field_type])
            elif field_type == 'float':
                try:
                    float(value)
                except ValueError:
                    errors.append((number, field_type))

        for field_id, fields in self.fields_properties.iteritems():
            # forms don't support repeated fields yet (TODO elsewhere)
            field = fields.first
            field_name = fields.field_name
            if field_id in post:
                validate_field(field_name, post[field_id], fields.type)
                for subfield_id, subfield in field.subfields.iteritems():
                    sf_full_id = "%s_%s" % (field_id, subfield_id)
                    # TODO: check
                    if "sf_input" in post:
                        validate_field(sf_full_id, post["sf_" + sf_full_id],
                                       subfield)

        if task:
            for field in task.item_required_fields:
                if "_" in field:
                    value = post.get("sf_" + field, '')
                else:
                    value = post.get(field, '')

                if not value:
                    errors.append([field, 'required'])

        return errors

    def marc_record(self):
        """
        Returns the item as a pymarc Record
        """
        record = pymarc.Record()

        for tag, fprop in self.fields_properties.iteritems():
            try:
                tag = int(tag)
            except Exception, error:
                continue

            # only marc fields
            if tag > 999:
                continue

            if fprop.first.subfields:
                sfields = []
                indicators = [
                    fprop.first.indicator1 or "#", fprop.first.indicator2
                    or "#"
                ]
                for sf in fprop.first.subfields.values():
                    for val in sf.exec_value:
                        sfields.append(sf.field_name)
                        sfields.append(val)
                field = pymarc.Field(tag, indicators, sfields)
                record.add_field(field)
            else:
                try:
                    exec_value = fprop.first.exec_value
                except Exception:
                    exec_value = []
                indicators = [
                    fprop.first.indicator1 or "#", fprop.first.indicator2
                    or "#"
                ]
                for val in exec_value:
                    record.add_field(
                        pymarc.Field(tag, indicators, data=str(val)))
        return record
Exemplo n.º 55
0
def reference_complete(request):
    """
    Returns data for reference complete
    """

    query = request.GET.get("term")
    qfilter = sanitize_lucene(query)
    search_config = Config.get_or_create("search")
    # autoridades
    query = make_query(search_config, qfilter, True,
                       "05a721a33096563ec44d8da885fa1a30")

    show_config = {}

    result = []

    # Result Display Configuration
    for item in search_config.values.values():
        item_type = item['type']

        fields = OrderedDict()
        for field in item['fields']:
            if field['show']:
                more = field.get('more', False)
                fields[field['field']] = (field['name'], more)

        show_config[item_type] = fields

    try:
        docs = search('search/by_field',
                      q=query,
                      include_fields="022_a,020_a,urn,_id",
                      limit=133742)
        docs = list(docs)
    except RequestFailed:
        print "Fail!"
        print "QQ", query

    # group uniq docs by urn
    uniq_docs = {}

    for doc in docs:
        try:
            urn = doc['fields']['urn']
        except KeyError:
            print "Item should have urn"
            continue

        # TODO: check if should be a list
        if type(urn) is list:
            urn = urn[0]

        uniq_docs.setdefault(urn, {'count': 0, 'id': None})
        uniq_docs[urn]['id'] = doc['id']
        uniq_docs[urn]['count'] += 1

    db = get_db('couchflow')
    keys = [doc['id'] for doc in uniq_docs.values()]

    def _get_field(field, doc):
        subfield = ""
        if '_' in field:
            field, subfield = field.split('_')

        #field_value = doc.fields_properties.get(field, None)
        try:
            field_value = doc["fields_properties"][field]
        except KeyError:
            field_value = ""

        if field_value and len(field_value['list']):
            field_value = field_value['list'][0]
            if subfield:
                field_value = field_value['subfields'].get(subfield, "")

            if field_value and 'exec_value' in field_value and field_value[
                    'exec_value']:
                exec_val = field_value['exec_value'][0]
                if not isinstance(exec_val, basestring):
                    if exec_val == None:
                        exec_val = ""
                    exec_val = str(exec_val)
                return exec_val
        return ""

    for doc in db.view('_all_docs', keys=keys, include_docs=True):
        #doc = WFItem.wrap(doc['doc'])
        doc = doc["doc"]

        show_item_config = show_config.get(doc['item_type'], None)
        if not show_item_config:
            print 'Unknown', doc['item_type']
            continue

        field = _get_field('700_a', doc)
        result.append({'label': field})
        #data = ''
        #for field, (name, more) in show_item_config.iteritems():
        #    row_value = '%s: %s' % (name, _get_field(field, doc))

        #    row_value = row_value.replace('/', '').replace(',', '')
        #    data += '<div>%s</div>' %  row_value

        #doc_urn = get_urn(doc)
        #if not doc_urn:
        #    print "Invalid Item, need a urn", doc["_id"]
        #    continue

        #result.append(data)

    return HttpResponse(simplejson.dumps(result),\
                    mimetype='application/javascript')
Exemplo n.º 56
0
def data(request):
    """
    Returns data for datatables
    """
    user = request.user

    secho = int(request.POST.get('sEcho', 0)) + 1
    return_dict = {
        "aaData": [],
        "iTotalRecords": 0,
        "iTotalDisplayRecords": 0,
        "sEcho": secho
    }
    query = None
    sort = None
    sort_reverse = False
    qfilter = ''
    basic_greenstone = True
    search_config = Config.get_or_create("search")

    if 'raw_query' in request.POST:
        query, sort_raw, query_greenstone = \
            request.POST['raw_query'].split("||", 2)
        if sort_raw:
            sort = sort_raw[1:]
            sort_reverse = (sort_raw[0] == '\\')

        # make_advanced_query() doesn't handle greenstone filtering.
        # Instead, it leaves a placeholder that is replaced here.
        # This is to leave all the querying to the second request
        # (this one), and only lucene query building to the first.

        query = query.replace(
            GREENSTONE_NEWSEARCH_PLACEHOLDER, "(%s)" % (' OR '.join([
                "urn:%s" % x['nodeID']
                for x in greenstone_query("", "", query_greenstone)
            ])))
        basic_greenstone = False

    elif 'filter' in request.POST:
        qfilter = sanitize_lucene(request.POST['filter'])
        qfilter = request.POST['filter']
        reference = request.POST.get('reference', 0)
        filter_item_type = request.POST.get('item_type', None)

        query = make_query(search_config, qfilter, reference, filter_item_type)
    elif 'filtered' in request.POST:
        qfilter = sanitize_lucene(request.POST['filtered'])
        reference = request.POST.get('reference', 0)
        filter_item_type = request.POST.get('item_type', None)

        query = make_query(search_config, qfilter, reference, filter_item_type)

    if not query:
        print "search failed: query = %s" % query
        return HttpResponse(simplejson.dumps(return_dict),\
                    mimetype='application/javascript')

    show_config = {}

    # Result Display Configuration
    for item in search_config.values.values():
        item_type = item['type']

        fields = OrderedDict()
        for field in item['fields']:
            if field['show']:
                more = field.get('more', False)
                existence = field.get('exist', False)
                fields[field['field']] = (field['name'], more, existence)

        show_config[item_type] = fields

    try:
        docs = search('search/by_field',
                      q=query,
                      include_fields="022_a,020_a,urn,_id,existence",
                      limit=133742)
        docs = list(docs)
    except RequestFailed:
        print "search failed: request failed"
        return HttpResponse(simplejson.dumps(return_dict),\
                    mimetype='application/javascript')

    db = get_db('couchflow')

    # group uniq docs by urn
    uniq_docs = {}

    if basic_greenstone:
        greenstone_urns = [
            x['nodeID'] for x in greenstone_query("", "", qfilter)
        ]
        greenstone_docs = db.view("couchflow/by_urn", keys=greenstone_urns)

        for doc in greenstone_docs:
            urn = doc['key']
            uniq_docs.setdefault(urn, {
                'count': 0,
                'id': None,
                "existences": []
            })
            uniq_docs[urn]['id'] = doc['id']
            #uniq_docs[urn]['count'] += 1
            uniq_docs[urn]['greenstone'] = True

    for doc in docs:
        try:
            urn = doc['fields']['urn']
        except KeyError:
            urn = None

        if urn is None or urn == 'undefined':
            print "Item should have urn", doc['id']
            continue

        # TODO: check if should be a list
        if type(urn) is list:
            urn = urn[0]

        uniq_docs.setdefault(urn, {'count': 0, 'id': None, "existences": []})
        if doc['fields']['existence'] != "false":
            uniq_docs[urn]['existences'].append(doc['id'])
            uniq_docs[urn]['count'] += 1
        else:
            uniq_docs[urn]['id'] = doc['id']

    columns = []

    start = int(request.POST['iDisplayStart'])
    length = int(request.POST['iDisplayLength'])

    #sort_col = int(request.POST['iSortCol_0'])
    #sort_dir = request.POST['sSortDir_0']

    count = len([u for u in uniq_docs.values() if u["id"]])

    sorted_uniq_docs = uniq_docs.values()
    if basic_greenstone:
        sorted_uniq_docs.sort(key=lambda x: 'greenstone' not in x)

    keys = [doc['id'] for doc in sorted_uniq_docs[start:start + length]]
    keys_exist = [doc['existences']\
            for doc in sorted_uniq_docs[start:start+length]]
    keys_exist = [item for sublist in keys_exist for item in sublist]

    def _get_field(field, doc):
        subfield = ""
        if '_' in field:
            field, subfield = field.split('_')

        #field_value = doc.fields_properties.get(field, None)
        try:
            field_value = doc["fields_properties"][field]
        except KeyError:
            field_value = ""

        if field_value and len(field_value['list']):
            field_value = field_value['list'][0]
            if subfield:
                field_value = field_value['subfields'].get(subfield, "")

            if field_value and 'exec_value' in field_value and field_value[
                    'exec_value']:
                exec_val = field_value['exec_value'][0]
                if not isinstance(exec_val, basestring):
                    if exec_val == None:
                        exec_val = ""
                    exec_val = str(exec_val)
                return exec_val
        return ""

    # get existences
    existences = {}
    for doc in db.view('_all_docs', keys=keys_exist, include_docs=True):
        existences[doc["doc"]["_id"]] = doc["doc"]

    for doc in db.view('_all_docs', keys=keys, include_docs=True):
        #doc = WFItem.wrap(doc['doc'])
        if not "doc" in doc:
            continue
        doc = doc["doc"]

        show_item_config = show_config.get(doc['item_type'], None)
        if not show_item_config:
            print 'Search config missing for', doc['item_type']
            continue

        try:
            img_name = doc['fields_properties']['5000']['list'][0][
                'exec_value'][0]
        except Exception, error:
            print 'Image not found', error
            img_name = 'none.png'

        img_path = "/couchflow/get_attach/couchflow/%s/%s" % (doc['_id'],
                                                              img_name)
        row = [doc['_id'], '<img style="width:80px" src="%s"/>' % img_path]

        data = ''
        for field, (name, more, existence) in show_item_config.iteritems():
            if existence: continue
            field_value = _get_field(field, doc)
            if not field_value:
                continue
            row_value = '%s: %s' % (name, field_value)

            row_value = row_value.replace('/', '').replace(',', '')
            more_class = ' class="search_more"' if more else ''
            data += '<div%s>%s</div>' % (more_class, row_value)

        doc_urn = get_urn(doc)
        if not doc_urn:
            print "Invalid Item, need a urn", doc["_id"]
            continue

        if not doc['reference']:
            data += 'Disponibles: %s<br>' % uniq_docs[doc_urn]['count']

        if uniq_docs[doc_urn]["existences"]:
            data += "<br><h3 class='search_more'>Ejemplares</h3>"
        # Add Existences
        for e in uniq_docs[doc_urn]["existences"]:
            if existences.get(e, False):
                data += "<div id='%s' class='existence search_more'>" % e
                for field, (name, more, exist_conf) in\
                        show_item_config.iteritems():
                    if exist_conf:
                        field_value = _get_field(field, existences[e])
                        if not field_value:
                            field_value = ""

                        row_value = '%s: %s' % (name, field_value)
                        row_value = row_value.replace('/', '').replace(',', '')
                        more_class = ' class="search_more"' if more else ''
                        data += '<div%s>%s</div>' % (more_class, row_value)
                data += "</div>"

        row.append(data)
        row.append('')

        sort_value = None
        if sort:
            sort_value = _get_field(sort, doc)

        columns.append((sort_value, row))
Exemplo n.º 57
0
class WorkFlow(Document):
    _db = get_db("couchflow")
    name = schema.StringProperty()
    workflow_type = schema.StringProperty()
    item_type = schema.StringProperty()
    user_id = schema.StringProperty()
    conectors = schema.DictProperty()
    nro_pedido = schema.IntegerProperty(default=0)
    tasks = schema.DictProperty()
    merge_conectors = schema.DictProperty()
    original_task_ids = schema.DictProperty()
    enabled = schema.BooleanProperty(default=False)
    steps = schema.IntegerProperty(default=0)
    is_clone = schema.BooleanProperty(default=False)
    get_id = property(lambda self: self['_id'])
    # text shown in the task
    description = schema.StringProperty()
    path = schema.ListProperty(schema.StringProperty())
    # keep track of starting point of workflow
    # usefull to get all workflows that starts
    # with X task type, for example FilterItems
    first_task_type = schema.StringProperty()
    first_task_id = schema.StringProperty()
    visible = schema.BooleanProperty(default=True)

    def get_item(self):
        if not self.item_type:
            return False
        items = WFItem.view("couchflow/item_names", limit=1, include_docs=True)
        item = items[self.item_type]
        item = item.one()
        return item

    def get_items(self):
        item_query = WFItem.view("couchflow/items", include_docs=True)
        items = {}
        for item in item_query.all():
            items[item.item_type] = (item.name, False)

        if self.item_type in items:
            items[self.item_type] = (items[self.item_type][0], True)

        items = [(key, items[key][0], items[key][1]) for key in items]
        return items

    def remove_relations(self):
        conectors = WorkFlow.view("couchflow/flowconector", include_docs=True)
        conectors = conectors[self._id]
        # TODO: bulk delete
        for conector in conectors:
            conector.delete()
        tasks = Task.view("couchflow/flowtask", include_docs=True)
        tasks = tasks[self._id]
        for task in tasks:
            task.delete()
        return True

    def set_all_inactive(self):
        conectors = WorkFlow.view("couchflow/flowconector", include_docs=True)
        conectors = conectors[self._id]
        # TODO: bulk save
        for conector in conectors:
            conector.active = False
            conector.save()
        tasks = Task.view("couchflow/flowtask", include_docs=True)
        tasks = tasks[self._id]
        for task in tasks:
            task.finish_task(None)
            task.save()
        return True

    def get_docs(self):
        documents = WorkFlow.view("couchflow/flowdocs", include_docs=True)
        documents = documents[self._id]
        documents = documents.all()
        documents.reverse()
        return documents

    def set_enabled(self):
        tasks = Task.view("couchflow/flowtask", include_docs=True)
        tasks = tasks[self._id]
        flag = True
        for task in tasks:
            task.enabled = flag
            task.save()
        return True

    def set_disabled(self):
        tasks = Task.view("couchflow/flowtask", include_docs=True)
        tasks = tasks[self._id]
        flag = True
        for task in tasks:
            task.enabled = flag
            task.save()
        return True

    def get_first_conector(self):
        conectors = WorkFlow.view("couchflow/firstconector", include_docs=True)
        conectors = conectors[self._id]
        return conectors.one()

    def get_active_tasks(self):
        tasks = Task.view("couchflow/activetask", include_docs=True)
        tasks = tasks[self._id]
        return tasks

    def conector_tasks(self, conector):
        if len(conector.next_tasks) > 0:
            tasks = []
            # TODO: use bulk api to get tasks
            if not conector.doc_type == "UntilConector":
                for task_id in conector.next_tasks:
                    task = Task.get(task_id)
                    tasks.append(task)
            else:
                task = Task.get(conector.next_tasks[0])
                tasks.append(task)
            return tasks
        return False

    def tasks_conectors(self, tasks):
        conectors = []
        for task in tasks:
            if task.conector_id:
                conector = get_conector(task.conector_id)
                #if conector.doc_type == "SequenceConector" and
                #conector.active:
                #    sequence_conector = SequenceConector.get(conector._id)
                conectors.append(conector)
        if len(conectors) > 0:
            return conectors
        return False

    def tasks_tree(self):
        first_conector = self.get_first_conector()
        tasks_tree = [
            [first_conector],
        ]

        tasks = self.conector_tasks(first_conector)

        while tasks:
            tasks_tree.append(tasks)
            conectors = self.tasks_conectors(tasks)
            if conectors:
                tasks_tree.append(conectors)
                tasks = []
                for conector in conectors:
                    try:
                        tasks += self.conector_tasks(conector)
                    except:
                        pass
            else:
                tasks = False
        if len(tasks_tree) > 1:
            return tasks_tree
        return False

    def add_conector(self, conector, active=False):
        self.conectors[conector._id] = active
        self.save()
        return True

    def add_task(self, task, active=False):
        self.tasks[task._id] = active
        self.save()
        return True

    def remove_tree(self, task):
        return True
Exemplo n.º 58
0
def delete_couch_docs(couch_ids):
    db = get_db("auditcare")
    for doc_id in couch_ids:
        db.delete_doc(doc_id)