def add_revfbk_event(self, cur_user, the_thread, the_post, notifier_dat):

        user_qb = Item_Query_Builder(self.qb.db, cur_user, self.qb.branch_hier,
                                     self.qb.revision)
        # NO?: Query_Overlord.finalize_query(user_qb)
        threads = threads.Many()
        threads.search_by_stack_id(the_thread.stack_id, user_qb)
        if not threads:
            log.error('mk_evts_revfbk_tds: user cannot see thread: %s / %s' % (
                cur_user,
                the_thread,
            ))
        else:
            g.assurt(len(threads) == 1)
            g.assurt(threads[0].system_id == the_thread.system_id)
            # NOTE: For simplicity, we're not checking enable_watchers_email.
            #       Since this is an immediate event, cron is about to run
            #       and it'll check that it's okay to email the user then.
            insert_rev_sql = (
                """
            INSERT INTO item_event_alert
               (  username
                , latest_rid
                , branch_id
                , item_id
                , item_stack_id
                , watcher_stack_id
                , msg_type_id
                , service_delay
                , notifier_dat)
            VALUES
               (  '%s'  -- username
                ,  %d   -- latest_rid
                ,  %d   -- branch_id
                ,  %d   -- item_id
                ,  %d   -- item_stack_id
                ,  %d   -- watcher_stack_id
                ,  %d   -- msg_type_id
                ,  %d   -- service_delay
                , '%s') -- notifier_dat
            """ % (
                    cur_user,
                    # Store the post values, not the thread's.
                    the_post.valid_start_rid,
                    the_post.branch_id,  # ==? self.qb.branch_hier[0][0]
                    the_post.system_id,
                    the_post.stack_id,
                    0,  # No link_value watcher, since
                    # revision.alert_on_activity
                    Watcher_Parts_Base.MSG_TYPE_REV_FEEDBACK,
                    Watcher_Frequency.immediately,
                    notifier_dat,
                ))

            rows = self.qb.db.sql(insert_rev_sql)

            log.debug('make_events_revfbk_threads: rowcount: %s' %
                      (self.qb.db.curs.rowcount, ))
Exemple #2
0
   def get_qb_for_branch(self, branch_id):

      try:

         branch_qb, the_branch = self.branch_qbs[branch_id]
         log.debug('get_qb_for_branch: existing qb: user: %s / branch_id: %s'
                   % (self.username, branch_id,))

      except KeyError:

         rev = revision.Current()
         branch_hier = branch.Many.branch_hier_build(self.db, branch_id, rev)
         branch_qb = Item_Query_Builder(self.db, self.username, branch_hier,
                                        rev)
         g.assurt(branch_id == branch_hier[0][0])

         access_ok = True
         the_branch = None
         branches = branch.Many()
         branches.search_by_stack_id(branch_id, branch_qb)
         if branches:
            g.assurt(len(branches) == 1)
            the_branch = branches[0]
            if not the_branch.can_view():
               log.warning(
                  'send_email_: user cannot access leafy branch: %s / %s'
                  % (self.username, branch_id,))
               access_ok = False
         parent_branches = branch_hier[1:]
         for parent in parent_branches:
            branches = branch.Many()
            branches.search_by_stack_id(parent[0], branch_qb)
            if branches:
               g.assurt(len(branches) == 1)
               if not branches[0].can_view():
                  log.warning(
                     'send_email_: user cannot access parent branch: %s / %s'
                     % (self.username, branches[0].stack_id,))
                  access_ok = False

         if (the_branch is None) or (not access_ok):
            branch_qb = None
         self.branch_qbs[branch_id] = (branch_qb, the_branch,)

         log.debug('get_qb_for_branch: new qb: user: %s / branch_id: %s / %s'
                   % (self.username, branch_id, branch_qb,))

      return (branch_qb, the_branch,)
Exemple #3
0
 def branch_hier_enforce_revision(self, branch_tup, rev, min_access=None):
     #branch.Many.branch_enforce_permissions(self.req.db,
     #                                       self.req.client.username,
     #                                       branch_id,
     #                                       rev,
     #                                       min_access)
     branch_hier = [
         (
             branch_tup[0],
             rev,
             branch_tup[2],
         ),
     ]
     qb = Item_Query_Builder(self.req.db,
                             self.req.client.username,
                             branch_hier,
                             rev,
                             viewport=None,
                             filters=None,
                             user_id=None)
     Query_Overlord.finalize_query(qb)
     branch.Many.branch_enforce_permissions(qb, min_access)
    def output_cfg_for_branch(self, branch_):

        username = conf.anonymous_username
        rev = revision.Current()
        branch_hier = branch.Many.branch_hier_build(self.qb.db,
                                                    branch_.stack_id, rev)
        qb = Item_Query_Builder(self.qb.db, username, branch_hier, rev)

        # See byway.Many.branch_coverage_area_update and item_mgr.revision_save:
        # Computing the rubber-band polygon -- the so-called coverage area --
        # takes a number of seconds to compute. So best not to block a user's
        # GWIS call but to update the coverate area separately.
        # MAYBE: Move this to Mr. Do!
        # FIXME: revision.Revision.geosummary_update should move here, too?
        #        Or maybe to new Mr. Do! job?

        # See if we should bother doing this.

        needs_update = self.check_branch_last_rid_changed(qb, branch_)
        if self.cli_opts.force:
            needs_update = True

        if needs_update:
            self.branch_coverage_area_update(qb, branch_)

        # Now that we've got the branch bbox, generate the layers for each
        # branch-skin combo.

        skin_names = branch_.get_skin_names()
        for skin_name in skin_names:
            log.debug('Processing branch named "%s" (%d) / skin "%s"' % (
                branch_.name,
                branch_.stack_id,
                skin_name,
            ))
            self.out_cfg_for_brskin(qb, branch_, skin_name)

        if needs_update:
            self.update_branch_last_rid(qb, branch_)
Exemple #5
0
   def setup_qbs(self):

      username = self.wtem.created_by

      db = db_glue.new()

      if self.wtem.revision_id is None:
         rev = revision.Current()
      else:
         rev = revision.Historic(self.wtem.revision_id, allow_deleted=False)

      (branch_id, branch_hier) = branch.Many.branch_id_resolve(db, 
                           self.wtem.branch_id, branch_hier_rev=rev)
      if not branch_hier:
         raise Exception(
            'Branch with stack_id %d not found in database at %s.'
            % (self.wtem.branch_id, rev.short_name(),))

      self.qb = Item_Query_Builder(db, username, branch_hier, rev)

      self.qb.item_mgr = self.qb.item_mgr

      Query_Overlord.finalize_query(self.qb)
    def setup_qb_cur(self, all_errs, min_acl=Access_Level.viewer):

        # For both import and export, qb_src is used to retrieve items from the
        # database, and qb_cur is used to check the user's group accesses and
        # maybe to search for regions if a restrictive bbox is being imposed.
        # But qb_cur is also used during import to save changes to the database;
        # qb_cur is not used during export to save anything to the database.
        #
        # NOTE: On import, we row-lock on the grac tables, group_membership
        # and new_item_policy. We also row-lock the destination branch.
        # So other operations might block while this code runs.
        # CONFIRM: We don't lock anything on export, right?

        qb_cur = None

        username = self.mjob.wtem.created_by

        db = db_glue.new()

        rev = revision.Current(allow_deleted=False)
        (branch_id,
         branch_hier) = branch.Many.branch_id_resolve(db,
                                                      self.mjob.wtem.branch_id,
                                                      branch_hier_rev=rev)

        if branch_id is None:
            # EXPLAIN: How come we don't raise here, like we do in the else?
            #          Or, why doesn't the else block use all_errs?
            #          See: raise_error_on_branch.
            #          And if you look at export_cyclop.substage_initialize,
            #          you'll see that it assurts not all_errs, so I guess
            #          it expects us to raise.
            all_errs.append('setup_qb_cur: not a branch: %s at %s' % (
                self.mjob.wtem.branch_id,
                str(rev),
            ))
        else:

            g.assurt(branch_hier)
            g.assurt(branch_id == branch_hier[0][0])

            raise_error_on_branch = False

            if not self.spf_conf.branch_name:
                # This happens on export, since export_cyclop.substage_initialize
                # only sets branch_id when setting up the qbs. This is because it
                # uses the merge_job's branch_id, and since merge_job is just an
                # item_versioned item, all it has is its branch_id, as items do
                # not also store the branch name.
                self.spf_conf.branch_name = branch_hier[0][2]
            elif self.spf_conf.branch_name != branch_hier[0][2]:
                # The branch name in the shapefile should match.
                log.error('setup_qb_cur: branch_name mismatch: %s / %s' % (
                    self.spf_conf.branch_name,
                    branch_hier[0][2],
                ))
                raise_error_on_branch = True
            # else, the branch_name in the conf matches the one we loaded by ID.
            #
            if self.spf_conf.branch_id != branch_id:
                # But the branch ID we can tolerate being wrong.
                log.warning('setup_qb_cur: unexpected spf_conf.branch_id: %s' %
                            (self.spf_conf.branch_id, ))
                # For the Metc Bikeways shapefile, this just means [lb] hasn't
                # update the branch ID attribute in the shapefile...
                g.assurt(self.spf_conf.branch_name)
                (try_branch_id,
                 try_branch_hier) = branch.Many.branch_id_resolve(
                     db, self.spf_conf.branch_name, branch_hier_rev=rev)
                if try_branch_id == branch_id:
                    log.warning('setup_qb_cur: ok: overriding branch_id: %s' %
                                (branch_id, ))
                    self.spf_conf.branch_id = branch_id
                else:
                    log.error(
                        'setup_qb_cur: try_branch_id != branch_id: %s != %s' %
                        (
                            try_branch_id,
                            branch_id,
                        ))
                    raise_error_on_branch = True

            if raise_error_on_branch:
                if conf.break_on_assurt:
                    import pdb
                    pdb.set_trace()
                raise GWIS_Error(
                    'Shapefile branch ID and name do not match job details: '
                    'work_item: %s/%s | shapefile: %s/%s' % (
                        branch_hier[0][2],
                        branch_hier[0][0],
                        self.spf_conf.branch_name,
                        self.spf_conf.branch_id,
                    ))

            qb_cur = Item_Query_Builder(db, username, branch_hier, rev)

            # Load both the raw geometry and the WKT geometry; we need to be
            # flexible.
            qb_cur.filters.skip_geometry_raw = False
            qb_cur.filters.skip_geometry_svg = True
            qb_cur.filters.skip_geometry_wkt = False

            # To save things, we need to set the group ID explicitly.
            self.user_group_id = User.private_group_id(qb_cur.db, username)
            qb_cur.user_group_id = self.user_group_id

            qb_cur.item_mgr = Item_Manager()
            # Load the attachment cache now. On import, if we create new
            # attributes (see metc_bikeways_defs.py), we'll keep it updated.
            qb_cur.item_mgr.load_cache_attachments(qb_cur)

            Query_Overlord.finalize_query(qb_cur)

            # FIXME: This comment. I like it. But it's not true... yet.
            #  Getting row lock in branches_prepare. So don't table lock.
            #
            # Start the transaction, since the grac_mgr does some row locking.
            # We'll keep the rows locked until we've verified permissions.
            # FIXME: Verify you rollback and start a new 'revision' lock...
            #        or maybe just start a new 'revision' lock? or can you
            #        write to a Shapfile first and zip through the Shapefile
            #        to save quickly and not hold the lock so long?
            # BUG nnnn: Investigate using a row-level branch lock; for now,
            #           just lock rev.
            qb_cur.db.transaction_begin_rw()

            qb_cur.grac_mgr = Grac_Manager()
            load_grp_mmbrshps = True
            qb_cur.grac_mgr.prepare_mgr('user', qb_cur, load_grp_mmbrshps)

            # FIXME: Does qb_src need grac_mgr?
            #self.qb_src.grac_mgr = qb_cur.grac_mgr

            # Check user's minimum access level.
            target_branch = self.verify_branch_access(qb_cur, min_acl,
                                                      all_errs)
            g.assurt(target_branch.stack_id == self.spf_conf.branch_id)
            if (self.spf_conf.branch_name and
                (self.spf_conf.branch_name != qb_cur.branch_hier[0][2])):
                log.warning('Unexpected spf_conf.branch_name: %s' %
                            (self.spf_conf.branch_name, ))
            self.spf_conf.branch_name = qb_cur.branch_hier[0][2]

        self.qb_cur = qb_cur

        log.debug('setup_qb_cur: spf_conf: %s' % (str(self.spf_conf), ))
    def setup_qb_src(self, all_errs):

        qb_src = None

        username = self.mjob.wtem.created_by

        # The source qb is just for reading...
        db = db_glue.new()
        # ... but we'll be making temporary tables of stack IDs, so start a
        # transaction.
        db.transaction_begin_rw()

        # The byways in the conflated were not marked deleted when they were
        # exported for conflation, so we don't need to look for deleted.
        # NOTE: The original MetC import script used based rev off
        #       self.target_branch.last_merge_rid rather than what's in the
        #       config file.
        g.assurt(self.spf_conf.revision_id)
        revision_id = self.spf_conf.revision_id
        rev = revision.Historic(revision_id, allow_deleted=False)

        # Make the branch_hier.
        (branch_id,
         branch_hier) = branch.Many.branch_id_resolve(db,
                                                      self.mjob.wtem.branch_id,
                                                      branch_hier_rev=rev)

        # Put it all together.
        if branch_id is None:
            all_errs.append('setup_qb_src: not a branch: %s at %s' % (
                self.mjob.wtem.branch_id,
                str(rev),
            ))
            # Don't forget to close. Not too big a deal, but oddly, if we don't,
            # the next attempt by this thread to get the db will result in the
            # same DB() object being created and the same self.conn returned,
            # and then db_glue complains that it's got that self and conn in
            # conn_lookup.
            db.close()
            db = None
        else:
            g.assurt(branch_hier)
            qb_src = Item_Query_Builder(db, username, branch_hier, rev)

            # It's nice to have both the raw, opaque hexadecimal geometry as well
            # as the WKT geometry, since not all APIs are that flexible, and also
            # because it's easier to work with WKT in Python and OSGeo (and also
            # because [lb] hasn't seen an OGR fcn. to convert raw PostGIS geom,
            # but he's probably not looking hard enough).
            qb_src.filters.skip_geometry_raw = False
            qb_src.filters.skip_geometry_svg = True
            qb_src.filters.skip_geometry_wkt = False

            qb_src.item_mgr = Item_Manager()
            # FIXME: Is this right? What about tgraph?
            qb_src.item_mgr.load_cache_attachments(qb_src)

            Query_Overlord.finalize_query(qb_src)

            # Check that user has viewer access on the source branch.
            source_branch = self.verify_branch_access(qb_src,
                                                      Access_Level.viewer,
                                                      all_errs)
            # NOTE: The job itself is already access-controlled, so generally the
            # user has arbiter access to the branch at the Current revision.

        self.qb_src = qb_src
Exemple #8
0
    def setup_links(self):

        # First count some table rows and double-check the upgrade so far. We
        # want to be confident we're getting all the CcpV1 records and making
        # appropriate CcpV2 records.
        try:
            self.setup_links_sanity_check()
        except:
            log.warning('setup_links: old CcpV1 already dropped; moving on...')

        # Now get the unique set of usernames. We're going to create items owned
        # by certain users, and we'll need to setup resources for each user, like
        # the query_builder and the grac_mgr.

        usernames_sql = ("""
         SELECT DISTINCT (username)
         FROM item_watcher_bug_nnnn
         ORDER BY username
         """)

        # NOTE: We're not bothering with dont_fetchall.
        #       There are only a few hundred rows...

        rows = self.qb.db.sql(usernames_sql)

        log.debug('setup_links: found %d unique users with watchers' %
                  (len(rows), ))

        if not rows:
            log.error('setup_links: nothing found')
            g.assurt(false)

        for row in rows:

            username = row['username']

            # Hmm. There's no user.One() class to load a user. It's all custom.
            user_rows = self.qb.db.sql(
                "SELECT login_permitted FROM user_ WHERE username = %s" %
                (self.qb.db.quoted(username), ))
            g.assurt(len(user_rows) == 1)
            if not user_rows[0]['login_permitted']:
                log.debug('setup_links: skipping: !user_.login_permitted: %s' %
                          (username, ))
                continue

            log.verbose2('setup_links: processing username: %s' % (username, ))

            g.assurt(isinstance(self.qb.revision, revision.Current))
            rev_cur = revision.Current()

            user_qb = Item_Query_Builder(self.qb.db, username,
                                         self.qb.branch_hier, rev_cur)
            user_qb.grac_mgr = Grac_Manager()
            user_qb.grac_mgr.prepare_mgr('user', user_qb)
            #
            g.assurt(user_qb.username
                     and (user_qb.username != conf.anonymous_username))
            user_qb.user_group_id = User.private_group_id(
                user_qb.db, user_qb.username)
            #
            # Use the same item_mgr so we pull client stack IDs from the same
            # pool.
            user_qb.item_mgr = self.qb.item_mgr

            # Finalize the query. This sets revision.gids so it'll include the
            # user's private group (and the anonymous and All Users groups).
            Query_Overlord.finalize_query(user_qb)

            # We can still get deleted regions and add links for them.
            user_qb.revision.allow_deleted = True

            # Finally, update the database. Oi, there's a lot of setup!
            self.setup_links_for_user(user_qb)

            # The way Item_Query_Builder works, it usually wires the branch_hier
            # revision to the revision revision.
            g.assurt(self.qb.branch_hier[0][1] == rev_cur)
            # We'll reuse the branch_hier so clear this user's gids.
            self.qb.branch_hier[0][1].gids = None
Exemple #9
0
    def branch_iterate(self, qb, branch_id, branch_callback, debug_limit=None):

        log.debug('branch_iterate: getting tmp db')
        # Get a new qb, and rather than clone the db, get a new connection, lest
        # we cannot commit ("Cannot commit when multiple cursors open").
        db = db_glue.new()
        username = ''  # Using gia_userless, so not really needed.
        branch_hier = copy.copy(qb.branch_hier)
        qb_branches = Item_Query_Builder(db, username, branch_hier,
                                         qb.revision)

        if branch_id:
            # Find just the one.
            qb_branches.branch_hier_limit = 1

        # Indicate our non-pyserverness so that gia_userless works.
        qb_branches.request_is_local = True
        qb_branches.request_is_script = True

        # Get all active branches, regardless of user rights.
        qb_branches.filters.gia_userless = True

        # If debugging, just grab a handful of results.
        if debug_limit:
            qb_branches.use_limit_and_offset = True
            qb_branches.filters.pagin_count = int(debug_limit)
        g.assurt(qb_branches.sql_clauses is None)

        # For whatever reason, use a generator. So, in the future, when there are
        # millions of branches, this script runs peacefully.
        g.assurt(not qb_branches.db.dont_fetchall)
        qb_branches.db.dont_fetchall = True

        # Leaving as client: qb_branches.filters.min_access_level

        qb_branches.sql_clauses = branch.Many.sql_clauses_cols_all.clone()

        Query_Overlord.finalize_query(qb_branches)

        branches = branch.Many()
        branches.search_get_items(qb_branches)

        log.info('branch_iterate: found %d branches.' %
                 (qb_branches.db.curs.rowcount, ))

        # Skipping:
        # prog_log = Debug_Progress_Logger(copy_this=debug_prog_log)
        # prog_log.log_freq = 1
        # prog_log.loop_max = qb_branches.db.curs.rowcount

        generator = branches.results_get_iter(qb_branches)

        for branch_ in generator:

            # NOTE: We don't correct self.qb, so callers should be sure not to use
            #       its branch_hier thinking it represents this branch_.

            branch_callback(branch_)

            # Skipping:
            # if prog_log.loops_inc():
            #    break

        # Skipping prog_log.loops_fin()

        generator.close()

        log.debug('branch_iterate: closing tmp db')
        qb_branches.db.close()
Exemple #10
0
   def setup_qb_cur(self, all_errs, min_acl=Access_Level.viewer):

      # For both import and export, qb_src is used to retrieve items from the
      # database, and qb_cur is used to check the user's group accesses and
      # maybe to search for regions if a restrictive bbox is being imposed.
      # But qb_cur is also used during import to save changes to the database;
      # qb_cur is not used during export to save anything to the database.
      #
      # NOTE: On import, we row-lock on the grac tables, group_membership 
      # and new_item_policy. We also row-lock the destination branch.
      # So other operations might block while this code runs.
      # CONFIRM: We don't lock anything on export, right?

      qb_cur = None

      username = self.mjob.wtem.created_by

      db = db_glue.new()

      rev = revision.Current(allow_deleted=False)
      (branch_id, branch_hier) = branch.Many.branch_id_resolve(db, 
                     self.mjob.wtem.branch_id, branch_hier_rev=rev)

      if branch_id is None:
         # EXPLAIN: How come we don't raise here, like we do in the else?
         #          Or, why doesn't the else block use all_errs?
         #          See: raise_error_on_branch.
         #          And if you look at export_cyclop.substage_initialize,
         #          you'll see that it assurts not all_errs, so I guess
         #          it expects us to raise.
         all_errs.append(
            'setup_qb_cur: not a branch: %s at %s' 
            % (self.mjob.wtem.branch_id, str(rev),))
      else:

         g.assurt(branch_hier)
         g.assurt(branch_id == branch_hier[0][0])

         raise_error_on_branch = False

         if not self.spf_conf.branch_name:
            # This happens on export, since export_cyclop.substage_initialize
            # only sets branch_id when setting up the qbs. This is because it
            # uses the merge_job's branch_id, and since merge_job is just an
            # item_versioned item, all it has is its branch_id, as items do
            # not also store the branch name.
            self.spf_conf.branch_name = branch_hier[0][2]
         elif self.spf_conf.branch_name != branch_hier[0][2]:
            # The branch name in the shapefile should match.
            log.error('setup_qb_cur: branch_name mismatch: %s / %s'
                      % (self.spf_conf.branch_name, branch_hier[0][2],))
            raise_error_on_branch = True
         # else, the branch_name in the conf matches the one we loaded by ID.
         #
         if self.spf_conf.branch_id != branch_id:
            # But the branch ID we can tolerate being wrong.
            log.warning('setup_qb_cur: unexpected spf_conf.branch_id: %s'
                        % (self.spf_conf.branch_id,))
            # For the Metc Bikeways shapefile, this just means [lb] hasn't
            # update the branch ID attribute in the shapefile...
            g.assurt(self.spf_conf.branch_name)
            (try_branch_id, try_branch_hier) = branch.Many.branch_id_resolve(
                           db, self.spf_conf.branch_name, branch_hier_rev=rev)
            if try_branch_id == branch_id:
               log.warning('setup_qb_cur: ok: overriding branch_id: %s'
                           % (branch_id,))
               self.spf_conf.branch_id = branch_id
            else:
               log.error('setup_qb_cur: try_branch_id != branch_id: %s != %s'
                         % (try_branch_id, branch_id,))
               raise_error_on_branch = True

         if raise_error_on_branch:
            if conf.break_on_assurt:
               import pdb;pdb.set_trace()
            raise GWIS_Error(
               'Shapefile branch ID and name do not match job details: '
               'work_item: %s/%s | shapefile: %s/%s'
               % (branch_hier[0][2],
                  branch_hier[0][0],
                  self.spf_conf.branch_name,
                  self.spf_conf.branch_id,))

         qb_cur = Item_Query_Builder(db, username, branch_hier, rev)

         # Load both the raw geometry and the WKT geometry; we need to be
         # flexible.
         qb_cur.filters.skip_geometry_raw = False
         qb_cur.filters.skip_geometry_svg = True
         qb_cur.filters.skip_geometry_wkt = False

         # To save things, we need to set the group ID explicitly.
         self.user_group_id = User.private_group_id(qb_cur.db, username)
         qb_cur.user_group_id = self.user_group_id

         qb_cur.item_mgr = Item_Manager()
         # Load the attachment cache now. On import, if we create new
         # attributes (see metc_bikeways_defs.py), we'll keep it updated.
         qb_cur.item_mgr.load_cache_attachments(qb_cur)

         Query_Overlord.finalize_query(qb_cur)

         # FIXME: This comment. I like it. But it's not true... yet.
         #  Getting row lock in branches_prepare. So don't table lock.
         #
         # Start the transaction, since the grac_mgr does some row locking.
         # We'll keep the rows locked until we've verified permissions.
      # FIXME: Verify you rollback and start a new 'revision' lock...
      #        or maybe just start a new 'revision' lock? or can you 
      #        write to a Shapfile first and zip through the Shapefile 
      #        to save quickly and not hold the lock so long?
         # BUG nnnn: Investigate using a row-level branch lock; for now, 
         #           just lock rev.
         qb_cur.db.transaction_begin_rw()

         qb_cur.grac_mgr = Grac_Manager()
         load_grp_mmbrshps = True
         qb_cur.grac_mgr.prepare_mgr('user', qb_cur, load_grp_mmbrshps)

         # FIXME: Does qb_src need grac_mgr?
         #self.qb_src.grac_mgr = qb_cur.grac_mgr

         # Check user's minimum access level.
         target_branch = self.verify_branch_access(qb_cur, min_acl, all_errs)
         g.assurt(target_branch.stack_id == self.spf_conf.branch_id)
         if (self.spf_conf.branch_name
             and (self.spf_conf.branch_name != qb_cur.branch_hier[0][2])):
            log.warning('Unexpected spf_conf.branch_name: %s'
                        % (self.spf_conf.branch_name,))
         self.spf_conf.branch_name = qb_cur.branch_hier[0][2]

      self.qb_cur = qb_cur

      log.debug('setup_qb_cur: spf_conf: %s' % (str(self.spf_conf),))
Exemple #11
0
   def load_make_qb_new(self, rid_latest):

      g.assurt(rid_latest > 0)

      rid_min = self.rid_max
      self.rid_max = rid_latest
      if isinstance(self.revision, revision.Current):
         if rid_min > 0:
            update_only = True
      else:
         g.assurt(isinstance(self.revision, revision.Historic))
         self.rid_max = self.revision.rid

      rev = None
      branch_hier = None
      if rid_min == self.rid_max:
         # The caller should already have checked that we have work to do.
         log.error('load_make_qb_new: rid_min == self.rid_max')
         rev_hist = None
      else:
         # We always need a historic revision, since we always update the attr
         # and tag cache.
         rev_hist = revision.Historic(self.rid_max)
         # If rid_min is already set, do an Update.
         if rid_min > 0:
            log.debug('load_make_qb_new: fr. %d to %d'
                      % (rid_min, self.rid_max,))
            g.assurt(isinstance(self.revision, revision.Current))
            # If we've already loaded byways, we're updating the map,
            # and we want to fetch changed byways, including deleted or
            # restricted-access byways, so we can remove those edges from the
            # transportation graph.
            rev_fetch = revision.Updated(rid_min, self.rid_max)
         else:
            # We're loading the map for the first time.
            rev_fetch = rev_hist

      qb_fetch = None
      if rev_hist is not None:
         branch_hier = branch.Many.branch_hier_build(self.update_db,
                                    self.branch_hier[0][0], rev_hist)
         qb_fetch = Item_Query_Builder(self.update_db, self.username,
                                       branch_hier, rev_fetch)

         # The Item_Manager class will make a table of all changed items by
         # stack_id, and it'll join that against a normal Historic query, so
         # we need to keep the username for the Historic query.
         # NO:
         #     if isinstance(rev_fetch, revision.Updated):
         #        qb_fetch.username = None
         #        qb_fetch.filters.gia_userless = True

         # Because we're using revision.Updated, we need to tell search_get_sql
         # not to worry.
         qb_fetch.request_is_local = True
         qb_fetch.request_is_script = False # True if user running it.
         # This populates the user gids and sets up geometry queries. Neither
         # or which should be necessary.
         Query_Overlord.finalize_query(qb_fetch)
         if rev_fetch != rev_hist:
            qb_hist = Item_Query_Builder(self.update_db, self.username,
                                         branch_hier, rev_hist)
            Query_Overlord.finalize_query(qb_hist)
         else:
            qb_hist = qb_fetch
         # Load the link_value caches for the byways, since we need tags and
         # attributes for the cost function.
         qb_fetch.item_mgr = Item_Manager()
         # NOTE: Whether rev is Historic or Updated, we'll load attrs and tags
         # for a specific revision ID. For Historic, we'll load them for the
         # historic rev ID, and for Updated, we'll load 'em for rid_max.
         # BUG nnnn: With too many tags... we'll want to have a service
         #   running to handle web requests (so they can always be resident)?
         #   bahh...
         qb_fetch.item_mgr.load_cache_attachments(qb_hist)

      return qb_fetch
Exemple #12
0
 def query_builderer(self, *args, **kwargs):
     g.assurt(not (args and kwargs))
     try:
         qb = kwargs['qb']
     except KeyError:
         argn = len(args)
         if argn == 1:
             qb = args[0]
             g.assurt(isinstance(qb, Item_Query_Builder))
         else:
             g.assurt((argn >= 4) and (argn <= 6))
             db = args[0]
             g.assurt(isinstance(db, db_glue.DB))
             username = args[1]
             g.assurt(isinstance(username, str))
             branch_hier = args[2]
             if isinstance(branch_hier, int):
                 # NOTE: Using args[3], which is rev.
                 # DEPRECATE: This is so weird not using branch_hier_build...
                 #        I really think the qb should be built outside the item
                 #        classes, so replace all calls to, e.g.,
                 #    search_by_stack_id(db, username, branch... _id/_hier, rev)
                 # There shouldn't be any callers left using this.... ....right?
                 log.warning('Deprecated: reducing branch_hier to leafiest')
                 branch_hier = [(
                     branch_hier,
                     args[3],
                     '',
                 )]
             elif isinstance(branch_hier, tuple):
                 g.assurt(isinstance(branch_hier[0], int))
                 g.assurt(isinstance(branch_hier[1],
                                     revision.Revision_Base))
                 g.assurt(isinstance(branch_hier[2], basestring))
                 branch_hier = [
                     branch_hier,
                 ]
                 log.debug(
                     'query_builderer: making single-tuple branch_hier: %s'
                     % (branch_hier, ))
             else:
                 log.debug('query_builderer: leaving branch_hier: %s' %
                           (branch_hier, ))
             g.assurt(
                 isinstance(branch_hier, list)
                 and isinstance(branch_hier[0], tuple))
             rev = args[3]
             # For Diff or Updated, make the qb and call finalize on it.
             g.assurt(
                 isinstance(rev, revision.Current)
                 or isinstance(rev, revision.Historic))
             try:
                 viewport = args[4]
                 #g.assurt(isinstance(viewport, Query_Viewport))
             except IndexError:
                 viewport = None
             try:
                 filters = args[5]
                 #g.assurt(isinstance(filters, Query_Filters))
             except IndexError:
                 filters = None
             if (viewport is None) and (filters is None):
                 # A little hack since this fcn. predates the Query_Overlord, so
                 # we don't have to refactor old code.
                 finalized = True
             else:
                 # Who uses this still?
                 g.assurt(False)
                 finalized = False
             qb = Item_Query_Builder(db, username, branch_hier, rev,
                                     viewport, filters)
             qb.finalize()
             #qb.finalized = finalized
             qb.finalized = True
             # NOTE: I think we don't have to worry about calling
             #       Query_Overlord.finalize_query(qb) because the multi
             #       geometry should already have been computed and
             #       stored as part of qb.filters.
     return qb
Exemple #13
0
    def load_make_qb_new(self, rid_latest):

        g.assurt(rid_latest > 0)

        rid_min = self.rid_max
        self.rid_max = rid_latest
        if isinstance(self.revision, revision.Current):
            if rid_min > 0:
                update_only = True
        else:
            g.assurt(isinstance(self.revision, revision.Historic))
            self.rid_max = self.revision.rid

        rev = None
        branch_hier = None
        if rid_min == self.rid_max:
            # The caller should already have checked that we have work to do.
            log.error('load_make_qb_new: rid_min == self.rid_max')
            rev_hist = None
        else:
            # We always need a historic revision, since we always update the attr
            # and tag cache.
            rev_hist = revision.Historic(self.rid_max)
            # If rid_min is already set, do an Update.
            if rid_min > 0:
                log.debug('load_make_qb_new: fr. %d to %d' % (
                    rid_min,
                    self.rid_max,
                ))
                g.assurt(isinstance(self.revision, revision.Current))
                # If we've already loaded byways, we're updating the map,
                # and we want to fetch changed byways, including deleted or
                # restricted-access byways, so we can remove those edges from the
                # transportation graph.
                rev_fetch = revision.Updated(rid_min, self.rid_max)
            else:
                # We're loading the map for the first time.
                rev_fetch = rev_hist

        qb_fetch = None
        if rev_hist is not None:
            branch_hier = branch.Many.branch_hier_build(
                self.update_db, self.branch_hier[0][0], rev_hist)
            qb_fetch = Item_Query_Builder(self.update_db, self.username,
                                          branch_hier, rev_fetch)

            # The Item_Manager class will make a table of all changed items by
            # stack_id, and it'll join that against a normal Historic query, so
            # we need to keep the username for the Historic query.
            # NO:
            #     if isinstance(rev_fetch, revision.Updated):
            #        qb_fetch.username = None
            #        qb_fetch.filters.gia_userless = True

            # Because we're using revision.Updated, we need to tell search_get_sql
            # not to worry.
            qb_fetch.request_is_local = True
            qb_fetch.request_is_script = False  # True if user running it.
            # This populates the user gids and sets up geometry queries. Neither
            # or which should be necessary.
            Query_Overlord.finalize_query(qb_fetch)
            if rev_fetch != rev_hist:
                qb_hist = Item_Query_Builder(self.update_db, self.username,
                                             branch_hier, rev_hist)
                Query_Overlord.finalize_query(qb_hist)
            else:
                qb_hist = qb_fetch
            # Load the link_value caches for the byways, since we need tags and
            # attributes for the cost function.
            qb_fetch.item_mgr = Item_Manager()
            # NOTE: Whether rev is Historic or Updated, we'll load attrs and tags
            # for a specific revision ID. For Historic, we'll load them for the
            # historic rev ID, and for Updated, we'll load 'em for rid_max.
            # BUG nnnn: With too many tags... we'll want to have a service
            #   running to handle web requests (so they can always be resident)?
            #   bahh...
            qb_fetch.item_mgr.load_cache_attachments(qb_hist)

        return qb_fetch
   def setup_links(self):

      # First count some table rows and double-check the upgrade so far. We
      # want to be confident we're getting all the CcpV1 records and making
      # appropriate CcpV2 records.
      try:
         self.setup_links_sanity_check()
      except:
         log.warning('setup_links: old CcpV1 already dropped; moving on...')

      # Now get the unique set of usernames. We're going to create items owned
      # by certain users, and we'll need to setup resources for each user, like
      # the query_builder and the grac_mgr.

      usernames_sql = (
         """
         SELECT DISTINCT (username)
         FROM item_watcher_bug_nnnn
         ORDER BY username
         """)

      # NOTE: We're not bothering with dont_fetchall.
      #       There are only a few hundred rows...

      rows = self.qb.db.sql(usernames_sql)

      log.debug('setup_links: found %d unique users with watchers'
                % (len(rows),))

      if not rows:
         log.error('setup_links: nothing found')
         g.assurt(false)

      for row in rows:

         username = row['username']

         # Hmm. There's no user.One() class to load a user. It's all custom.
         user_rows = self.qb.db.sql(
            "SELECT login_permitted FROM user_ WHERE username = %s"
            % (self.qb.db.quoted(username),))
         g.assurt(len(user_rows) == 1)
         if not user_rows[0]['login_permitted']:
            log.debug('setup_links: skipping: !user_.login_permitted: %s'
                      % (username,))
            continue

         log.verbose2('setup_links: processing username: %s' % (username,))

         g.assurt(isinstance(self.qb.revision, revision.Current))
         rev_cur = revision.Current()

         user_qb = Item_Query_Builder(
            self.qb.db, username, self.qb.branch_hier, rev_cur)
         user_qb.grac_mgr = Grac_Manager()
         user_qb.grac_mgr.prepare_mgr('user', user_qb)
         #
         g.assurt(
            user_qb.username and (user_qb.username != conf.anonymous_username))
         user_qb.user_group_id = User.private_group_id(user_qb.db, 
                                                       user_qb.username)
         #
         # Use the same item_mgr so we pull client stack IDs from the same
         # pool.
         user_qb.item_mgr = self.qb.item_mgr

         # Finalize the query. This sets revision.gids so it'll include the
         # user's private group (and the anonymous and All Users groups).
         Query_Overlord.finalize_query(user_qb)

         # We can still get deleted regions and add links for them.
         user_qb.revision.allow_deleted = True

         # Finally, update the database. Oi, there's a lot of setup!
         self.setup_links_for_user(user_qb)

         # The way Item_Query_Builder works, it usually wires the branch_hier
         # revision to the revision revision.
         g.assurt(self.qb.branch_hier[0][1] == rev_cur)
         # We'll reuse the branch_hier so clear this user's gids.
         self.qb.branch_hier[0][1].gids = None
Exemple #15
0
   def regions_coalesce_geometry(req=None, from_qb=None):

      # Calculate the region-filter geometry. Do it now so we can cache the
      # value and use an explicit value in the SQL where clause. (This is 
      # bug nnnn, don't use SQL fcns. in WHERE clauses).
      #
      # Note that the region-filter is either the geometry of one or more
      # regions the client specifically indicate, or the geometry is the
      # geometry of regions that the user is watching.
      #
      # Note also that we use the Current revision of the regions and ignore
      # whatever revision the client might really be requesting. This is by
      # design, so users can make new regions and use them on historical
      # queries. It also makes working with the autocomplete edit box in
      # the client easier, since it only has to get the list of current
      # regions.

      g.assurt((req is not None) ^ (from_qb is not None))

      if from_qb is None:
         g.assurt(False) # Deprecated.
         db = req.db
         username = req.client.username
         branch_hier = req.branch.branch_hier
         the_rev = req.revision.rev
         filters = req.filters
         # NOTE: Cannot call as_iqb(addons=False), because that fcn. calls
         #       finalize_query(). So we make our own qb, below.
      else:
         db = from_qb.db
         username = from_qb.username
         branch_hier = from_qb.branch_hier
         the_rev = from_qb.revision
         filters = from_qb.filters

      if not isinstance(the_rev, revision.Current):
         the_rev = revision.Current()
         branch_hier = branch.Many.branch_hier_build(
                        db, branch_hier[0][0], the_rev)
         g.assurt(id(the_rev) == id(branch_hier[0][1]))
      else:
         # [lb] is not sure this is always true. Just curious...
         g.assurt(id(the_rev) == id(branch_hier[0][1]))
         g.assurt(not isinstance(the_rev, revision.Comprehensive))

      qb = Item_Query_Builder(db, username, branch_hier, the_rev)

      g.assurt(qb.revision == the_rev)

      # Use from_qb.item_mgr?
      # Cannot import Item_Manager:
      # Nope: qb.item_mgr = Item_Manager()
      g.assurt(from_qb is not None)
      g.assurt(from_qb.item_mgr is not None)
      qb.item_mgr = from_qb.item_mgr

      # Create a Many() object to perform the search
      # 2013.03.29: MAYBE: We're really fetching regions...
      region_geoms = region.Many()
      # Tell the sql builder not to double-check the multi geom
      region_geoms.search_for_geom(qb, filters.filter_by_regions,
                                       filters.filter_by_watch_geom)

      g.assurt(len(region_geoms) == 1) # Because of the aggregrate, ST_Union.
      # Normally geometry_svg gets set but only for an outer select.
      # Our select had nothing nested, so geometry is raw only.
      geom = region_geoms[0].geometry
      if geom is None:
         # If there's no geometry, we won't find anything, so short-circuit.
         # MAYBE: Alert user that region name was not found, or no watched
         #        regions were found? Except we're called from a filter...
         #        so the lack of results should be clue enough.
         log.debug('fetch_n_save: GWIS_Nothing_Found: nothing found')
         raise GWIS_Nothing_Found()
      log.debug('fetch_n_save: len(geom): %s' % (len(geom),))

      # Set the geometry we calculated in the source qb.
      filters.only_in_multi_geometry = geom
Exemple #16
0
   def setup_qb_src(self, all_errs):

      qb_src = None

      username = self.mjob.wtem.created_by

      # The source qb is just for reading...
      db = db_glue.new()
      # ... but we'll be making temporary tables of stack IDs, so start a
      # transaction.
      db.transaction_begin_rw()

      # The byways in the conflated were not marked deleted when they were
      # exported for conflation, so we don't need to look for deleted.
      # NOTE: The original MetC import script used based rev off
      #       self.target_branch.last_merge_rid rather than what's in the
      #       config file.
      g.assurt(self.spf_conf.revision_id)
      revision_id = self.spf_conf.revision_id
      rev = revision.Historic(revision_id, allow_deleted=False)

      # Make the branch_hier.
      (branch_id, branch_hier) = branch.Many.branch_id_resolve(db, 
                           self.mjob.wtem.branch_id, branch_hier_rev=rev)

      # Put it all together.
      if branch_id is None:
         all_errs.append(
            'setup_qb_src: not a branch: %s at %s' 
            % (self.mjob.wtem.branch_id, str(rev),))
         # Don't forget to close. Not too big a deal, but oddly, if we don't,
         # the next attempt by this thread to get the db will result in the
         # same DB() object being created and the same self.conn returned, 
         # and then db_glue complains that it's got that self and conn in
         # conn_lookup.
         db.close()
         db = None
      else:
         g.assurt(branch_hier)
         qb_src = Item_Query_Builder(db, username, branch_hier, rev)

         # It's nice to have both the raw, opaque hexadecimal geometry as well
         # as the WKT geometry, since not all APIs are that flexible, and also 
         # because it's easier to work with WKT in Python and OSGeo (and also
         # because [lb] hasn't seen an OGR fcn. to convert raw PostGIS geom, 
         # but he's probably not looking hard enough).
         qb_src.filters.skip_geometry_raw = False
         qb_src.filters.skip_geometry_svg = True
         qb_src.filters.skip_geometry_wkt = False

         qb_src.item_mgr = Item_Manager()
# FIXME: Is this right? What about tgraph?
         qb_src.item_mgr.load_cache_attachments(qb_src)

         Query_Overlord.finalize_query(qb_src)

         # Check that user has viewer access on the source branch.
         source_branch = self.verify_branch_access(qb_src, 
                              Access_Level.viewer, all_errs)
         # NOTE: The job itself is already access-controlled, so generally the 
         # user has arbiter access to the branch at the Current revision.

      self.qb_src = qb_src
Exemple #17
0
 def query_builderer(self, *args, **kwargs):
    g.assurt(not (args and kwargs))
    try:
       qb = kwargs['qb']
    except KeyError:
       argn = len(args)
       if argn == 1:
          qb = args[0]
          g.assurt(isinstance(qb, Item_Query_Builder))
       else:
          g.assurt((argn >= 4) and (argn <= 6))
          db = args[0]
          g.assurt(isinstance(db, db_glue.DB))
          username = args[1]
          g.assurt(isinstance(username, str))
          branch_hier = args[2]
          if isinstance(branch_hier, int):
             # NOTE: Using args[3], which is rev.
             # DEPRECATE: This is so weird not using branch_hier_build...
             #        I really think the qb should be built outside the item
             #        classes, so replace all calls to, e.g., 
             #    search_by_stack_id(db, username, branch... _id/_hier, rev)
             # There shouldn't be any callers left using this.... ....right?
             log.warning('Deprecated: reducing branch_hier to leafiest')
             branch_hier = [(branch_hier, args[3], '',)]
          elif isinstance(branch_hier, tuple):
             g.assurt(isinstance(branch_hier[0], int))
             g.assurt(isinstance(branch_hier[1], revision.Revision_Base))
             g.assurt(isinstance(branch_hier[2], basestring))
             branch_hier = [branch_hier,]
             log.debug('query_builderer: making single-tuple branch_hier: %s'
                       % (branch_hier,))
          else:
             log.debug('query_builderer: leaving branch_hier: %s' 
                       % (branch_hier,))
          g.assurt(isinstance(branch_hier, list) 
                   and isinstance(branch_hier[0], tuple))
          rev = args[3]
          # For Diff or Updated, make the qb and call finalize on it.
          g.assurt(isinstance(rev, revision.Current)
                   or isinstance(rev, revision.Historic))
          try:
             viewport = args[4]
             #g.assurt(isinstance(viewport, Query_Viewport))
          except IndexError:
             viewport = None
          try:
             filters = args[5]
             #g.assurt(isinstance(filters, Query_Filters))
          except IndexError:
             filters = None
          if (viewport is None) and (filters is None):
             # A little hack since this fcn. predates the Query_Overlord, so
             # we don't have to refactor old code.
             finalized = True
          else:
             # Who uses this still?
             g.assurt(False)
             finalized = False
          qb = Item_Query_Builder(db, username, branch_hier, rev,
                                  viewport, filters)
          qb.finalize()
          #qb.finalized = finalized
          qb.finalized = True
          # NOTE: I think we don't have to worry about calling 
          #       Query_Overlord.finalize_query(qb) because the multi 
          #       geometry should already have been computed and 
          #       stored as part of qb.filters.
    return qb
Exemple #18
0
   def branch_iterate(self, qb, branch_id, branch_callback, debug_limit=None):

      log.debug('branch_iterate: getting tmp db')
      # Get a new qb, and rather than clone the db, get a new connection, lest
      # we cannot commit ("Cannot commit when multiple cursors open").
      db = db_glue.new()
      username = '' # Using gia_userless, so not really needed.
      branch_hier = copy.copy(qb.branch_hier)
      qb_branches = Item_Query_Builder(db, username, branch_hier, qb.revision)

      if branch_id:
         # Find just the one.
         qb_branches.branch_hier_limit = 1

      # Indicate our non-pyserverness so that gia_userless works.
      qb_branches.request_is_local = True
      qb_branches.request_is_script = True

      # Get all active branches, regardless of user rights.
      qb_branches.filters.gia_userless = True

      # If debugging, just grab a handful of results.
      if debug_limit:
         qb_branches.use_limit_and_offset = True
         qb_branches.filters.pagin_count = int(debug_limit)
      g.assurt(qb_branches.sql_clauses is None)

      # For whatever reason, use a generator. So, in the future, when there are
      # millions of branches, this script runs peacefully.
      g.assurt(not qb_branches.db.dont_fetchall)
      qb_branches.db.dont_fetchall = True

      # Leaving as client: qb_branches.filters.min_access_level

      qb_branches.sql_clauses = branch.Many.sql_clauses_cols_all.clone()

      Query_Overlord.finalize_query(qb_branches)

      branches = branch.Many()
      branches.search_get_items(qb_branches)

      log.info('branch_iterate: found %d branches.'
               % (qb_branches.db.curs.rowcount,))

      # Skipping:
      # prog_log = Debug_Progress_Logger(copy_this=debug_prog_log)
      # prog_log.log_freq = 1
      # prog_log.loop_max = qb_branches.db.curs.rowcount

      generator = branches.results_get_iter(qb_branches)

      for branch_ in generator:

         # NOTE: We don't correct self.qb, so callers should be sure not to use
         #       its branch_hier thinking it represents this branch_.

         branch_callback(branch_)

         # Skipping:
         # if prog_log.loops_inc():
         #    break

      # Skipping prog_log.loops_fin()

      generator.close()

      log.debug('branch_iterate: closing tmp db')
      qb_branches.db.close()