Esempio n. 1
0
    def add_revfbk_user(self, alert_users, alert_ok, cur_user):

        if not alert_ok:
            # This means user did not ask for revision.alert_on_activity.
            log.debug(
                'add_revfbk_user: alert_on_activity says no: u:%s / t:%s / p:%s'
                % (
                    cur_user,
                    the_thread,
                    the_post,
                ))
        elif cur_user == conf.anonymous_username:
            # Ignore revisions created by ACs (anonymous cowards) since no email.
            log.debug('mk_evts_revfbk_tds: rev. from anon coward.')
            alert_ok = False
        elif User.user_is_script(cur_user):
            # 2012.08.17: Check that the revision wasn't made by a robot.
            log.debug('mk_evts_revfbk_tds: rev. from silly robot.')
            alert_ok = False
        # One might not think that we should email the feedbacker, but we do!
        #  elif cur_user == self.qb.username:
        #     log.debug(
        #       'mk_evts_revfbk_tds: skipping: poster == revver: %s / rid: %d'
        #        % (self.qb.username, row['revision_id'],))
        elif cur_user == self.qb.username:
            log.debug('mk_evts_revfbk_tds: user is reverter: %s' %
                      (cur_user, ))
            alert_ok = True
        else:
            log.debug('mk_evts_revfbk_tds: user is revertee: %s' %
                      (cur_user, ))
            alert_ok = True

        alert_users[cur_user] = alert_ok
Esempio n. 2
0
   def add_members_to_group(self, basemap_qb, common_row,
                                  group_sid, usernames):

      log.debug('add_members_to_group: group_sid: %d.' % (group_sid,))

      grp_mmbs = group_membership.Many()
      grp_mmbs.search_by_group_id(basemap_qb, group_sid)

      group_uids = {}
      for gm in grp_mmbs:
         group_uids[gm.user_id] = gm

      for uname in usernames:
         try:
            user_id = User.user_id_from_username(basemap_qb.db, uname)
         except GWIS_Warning, e:
            user_id = None
            log.warning('add_members_to_group: no such user: %s' % (uname,))
         if user_id:
            if not (user_id in group_uids):
               common_row.update({
                  'stack_id'  : basemap_qb.item_mgr.get_next_client_id(),
                  'group_id'  : group_sid,
                  'user_id'   : user_id,
                  'username'  : uname,
                  })
               new_mmbrship = group_membership.One(qb=basemap_qb,
                                                   row=common_row)
               self.add_members_save_mmbrship(basemap_qb, new_mmbrship)
            else:
               existing_gm = group_uids[user_id]
               g.assurt(existing_gm.access_level_id == Access_Level.editor)
               log.info('add_members: user already member: %s in %s'
                        % (existing_gm.username, existing_gm.group_name,))
Esempio n. 3
0
    def user_validate(self, variant=None):
        '''
      Check the username and password/token included in the GWIS request.

         - If not provided          set self.username to anonymous username
         - If provided and valid    set self.username to validated username
         - If provided and invalid  raise GWIS_Error.

      '''

        log.verbose1('user_validate: variant: %s' % (str(variant), ))
        user = None
        if self.req.doc_in is not None:
            user = self.req.doc_in.find('metadata/user')
        if user is None:
            # No auth data; set username to the anonymous user
            log.info('user_validate: anon: %s / %s' % (
                conf.anonymous_username,
                self.str_remote_ip_and_host_and_local_host(),
            ))
            self.username = conf.anonymous_username
            self.user_group_id = User.private_group_id(self.req.db,
                                                       conf.anonymous_username)
            g.assurt(self.user_group_id > 0)
        else:
            # Parse and validate the username and credentials; raises on error.
            self.user_validate_parse(user, variant)
        if self.username is not None:
            # Check user's access to branch. Raises GWIS_Error if access denied.
            self.req.branch.branch_hier_enforce()
Esempio n. 4
0
   def add_members_to_group(self, basemap_qb, common_row,
                                  group_sid, usernames):

      log.debug('add_members_to_group: group_sid: %d.' % (group_sid,))

      grp_mmbs = group_membership.Many()
      grp_mmbs.search_by_group_id(basemap_qb, group_sid)

      group_uids = {}
      for gm in grp_mmbs:
         group_uids[gm.user_id] = gm

      for uname in usernames:
         try:
            user_id = User.user_id_from_username(basemap_qb.db, uname)
         except GWIS_Warning, e:
            user_id = None
            log.warning('add_members_to_group: no such user: %s' % (uname,))
         if user_id:
            if not (user_id in group_uids):
               common_row.update({
                  'stack_id'  : basemap_qb.item_mgr.get_next_client_id(),
                  'group_id'  : group_sid,
                  'user_id'   : user_id,
                  'username'  : uname,
                  })
               new_mmbrship = group_membership.One(qb=basemap_qb,
                                                   row=common_row)
               self.add_members_save_mmbrship(basemap_qb, new_mmbrship)
            else:
               existing_gm = group_uids[user_id]
               g.assurt(existing_gm.access_level_id == Access_Level.editor)
               log.info('add_members: user already member: %s in %s'
                        % (existing_gm.username, existing_gm.group_name,))
Esempio n. 5
0
   def add_revfbk_user(self, alert_users, alert_ok, cur_user):

      if not alert_ok:
         # This means user did not ask for revision.alert_on_activity.
         log.debug(
            'add_revfbk_user: alert_on_activity says no: u:%s / t:%s / p:%s'
            % (cur_user, the_thread, the_post,))
      elif cur_user == conf.anonymous_username:
         # Ignore revisions created by ACs (anonymous cowards) since no email.
         log.debug('mk_evts_revfbk_tds: rev. from anon coward.')
         alert_ok = False
      elif User.user_is_script(cur_user):
         # 2012.08.17: Check that the revision wasn't made by a robot.
         log.debug('mk_evts_revfbk_tds: rev. from silly robot.')
         alert_ok = False
      # One might not think that we should email the feedbacker, but we do!
      #  elif cur_user == self.qb.username:
      #     log.debug(
      #       'mk_evts_revfbk_tds: skipping: poster == revver: %s / rid: %d'
      #        % (self.qb.username, row['revision_id'],))
      elif cur_user == self.qb.username:
         log.debug('mk_evts_revfbk_tds: user is reverter: %s' % (cur_user,))
         alert_ok = True
      else:
         log.debug('mk_evts_revfbk_tds: user is revertee: %s' % (cur_user,))
         alert_ok = True

      alert_users[cur_user] = alert_ok
Esempio n. 6
0
   def user_validate(self, variant=None):
      '''
      Check the username and password/token included in the GWIS request.

         - If not provided          set self.username to anonymous username
         - If provided and valid    set self.username to validated username
         - If provided and invalid  raise GWIS_Error.

      '''

      log.verbose1('user_validate: variant: %s' % (str(variant),))
      user = None
      if self.req.doc_in is not None:
         user = self.req.doc_in.find('metadata/user')
      if user is None:
         # No auth data; set username to the anonymous user
         log.info('user_validate: anon: %s / %s'
                  % (conf.anonymous_username,
                     self.str_remote_ip_and_host_and_local_host(),))
         self.username = conf.anonymous_username
         self.user_group_id = User.private_group_id(self.req.db,
                                                    conf.anonymous_username)
         g.assurt(self.user_group_id > 0)
      else:
         # Parse and validate the username and credentials; raises on error.
         self.user_validate_parse(user, variant)
      if self.username is not None:
         # Check user's access to branch. Raises GWIS_Error if access denied.
         self.req.branch.branch_hier_enforce()
 def __init__(self,
              db,
              username,
              branch_hier,
              rev,
              viewport=None,
              filters=None,
              user_id=None):
     g.assurt(db)
     self.db = db
     g.assurt(isinstance(username, basestring))
     self.username = username
     #
     if user_id:
         self.user_id = user_id
     else:
         if self.username:
             try:
                 self.user_id = User.user_id_from_username(db, username)
             except Exception, e:
                 log.debug('User ID not found for user %s: %s' % (
                     username,
                     str(e),
                 ))
                 raise GWIS_Error('User ID not found for user "%s".' %
                                  (username, ))
         else:
Esempio n. 8
0
   def go_main(self):

      do_commit = False

      try:

         #g.assurt(len(self.cli_args.group_ids) == 1) # See: groups_expect_one
         #log.debug('Creating NIPs for group_name %s [%d] / branch %s'
         #          % (self.cli_args.group_names[0],
         #             self.cli_args.group_ids[0],
         #             self.cli_args.branch_id,))

         log.debug('Creating NIPs for branch: %s / policy_profile: %s'
                   % (self.cli_args.branch_id,
                      self.cli_opts.policy_profile,))

         # MAYBE: In most other scripts, we get the revision lock in
         # query_builder_prepare, but this seems more better, i.e.,
         # get it when you need it, not just willy-nilly on script startup.
         log.debug('go_main: getting exclusive revision lock...')
         revision.Revision.revision_lock_dance(
            self.qb.db, caller='new_item_policy_init.py')
         log.debug('go_main: database is locked.')

         # MAYBE: There seems to be an awful lot of boilerplate code here.
         self.qb.grac_mgr = Grac_Manager()
         self.qb.grac_mgr.prepare_mgr('user', self.qb)
         #
         # NOTE: I'm not sure we need user_group_id... but it's part of the
         #       boilerplate code... maybe put all this in script_base.py.
         g.assurt(self.qb.username
                  and (self.qb.username != conf.anonymous_username))
         self.qb.user_group_id = User.private_group_id(self.qb.db,
                                                       self.qb.username)

         # Get a new revision ID. Using revision_peek than revision_create (we
         # used to use revision_create because our SQL called CURRVAL, but
         # we've fixed our SQL since).
         self.qb.item_mgr.start_new_revision(self.qb.db)
         log.debug('Got rid_new: %d' % (self.qb.item_mgr.rid_new,))

         # Create the new new item policies.
         self.install_nips(self.qb, self.cli_opts.policy_profile)

         # Save the new revision and finalize the sequence numbers.
         group_names_or_ids = ['Public',]
         #group_names_or_ids = [self.cli_args.group_ids[0],]
         self.finish_script_save_revision(group_names_or_ids)

         log.debug('Committing transaction')

         if debug_skip_commit:
            raise Exception('DEBUG: Skipping commit: Debugging')
         do_commit = True

      except Exception, e:

         # FIXME: g.assurt()s that are caught here have empty msgs?
         log.error('Exception!: "%s" / %s' % (str(e), traceback.format_exc(),))
Esempio n. 9
0
   def get_user_ids_by_where(self, db, extra_where=''):

      (user_ids, invalid_ids, not_okay, user_infos, info_lookup,
         ) = User.spam_get_user_info(db,
                                     extra_where,
                                     self.cli_opts.sort_mode,
                                     make_lookup=False,
                                     ignore_flags=self.cli_opts.ignore_flags)

      self.user_ids += user_ids
      self.invalid_ids += invalid_ids
      self.not_okay += not_okay
      self.user_infos += user_infos
Esempio n. 10
0
   def compose_email_revision(self, rev_rid, rev_row):

#      conf.break_here('ccpv3')

      if rev_row is not None:

         # 2014.07.02: FIXME: test changes to what_username:
         # FIXME: Should use 'host' and 'addr' instead of 'raw_username'.
         rev_username = User.what_username([rev_row['username'],
                                            rev_row['raw_username'],])

         self.msg_text += (
'''
Revision %d by %s at %s
Change note: %s
''' % (rev_row['revision_id'],
       rev_username,
       rev_row['timestamp'],
       rev_row['comment'],))

         self.msg_html += (
'''
<p>
Revision %d by %s at %s
<br/>
Change note: %s
</p>
''' % (rev_row['revision_id'],
       rev_username,
       rev_row['timestamp'],
       rev_row['comment'],))

      else:

         # rev_row is None is user does not have at least one group_revision
         # record for the revision.

         self.msg_text += (
'''
Revision %d (hidden)
''' % (rev_rid,))

         self.msg_html += (
'''
<p>
Revision %d (hidden)
</p>
''' % (rev_rid,))
Esempio n. 11
0
 def from_gml(self, qb, elem):
    groupy_base.One.from_gml(self, qb, elem)
    # Resolve the user_id
    if self.user_id and self.username:
       raise GWIS_Error(
          'Attr. confusions: Please specify just "user_id" or "username".')
    elif (not self.user_id) and (not self.username):
       raise GWIS_Error('Missing mandatory attr: "user_id" or "username".')
    elif not self.user_id:
       # FIXME: Should we have a qb passed in, e.g., qb.db and qb.username?
       self.user_id = User.user_id_from_username(self.req.db, 
                                                 self.username)
       log.debug('from_gml: resolved user_id %d from username "%s".' 
                 % (self.user_id, self.username,))
    # Resolve the group_id
    self.from_gml_group_id(qb)
Esempio n. 12
0
    def get_user_ids_by_where(self, db, extra_where=''):

        (
            user_ids,
            invalid_ids,
            not_okay,
            user_infos,
            info_lookup,
        ) = User.spam_get_user_info(db,
                                    extra_where,
                                    self.cli_opts.sort_mode,
                                    make_lookup=False,
                                    ignore_flags=self.cli_opts.ignore_flags)

        self.user_ids += user_ids
        self.invalid_ids += invalid_ids
        self.not_okay += not_okay
        self.user_infos += user_infos
Esempio n. 13
0
 def __init__(self, db, username, branch_hier, rev,
                    viewport=None, filters=None, user_id=None):
    g.assurt(db)
    self.db = db
    g.assurt(isinstance(username, basestring))
    self.username = username
    #
    if user_id:
       self.user_id = user_id
    else:
       if self.username:
          try:
             self.user_id = User.user_id_from_username(db, username)
          except Exception, e:
             log.debug('User ID not found for user %s: %s'
                       % (username, str(e),))
             raise GWIS_Error('User ID not found for user "%s".'
                              % (username,))
       else:
Esempio n. 14
0
 def from_gml(self, qb, elem):
     groupy_base.One.from_gml(self, qb, elem)
     # Resolve the user_id
     if self.user_id and self.username:
         raise GWIS_Error(
             'Attr. confusions: Please specify just "user_id" or "username".'
         )
     elif (not self.user_id) and (not self.username):
         raise GWIS_Error(
             'Missing mandatory attr: "user_id" or "username".')
     elif not self.user_id:
         # FIXME: Should we have a qb passed in, e.g., qb.db and qb.username?
         self.user_id = User.user_id_from_username(self.req.db,
                                                   self.username)
         log.debug('from_gml: resolved user_id %d from username "%s".' % (
             self.user_id,
             self.username,
         ))
     # Resolve the group_id
     self.from_gml_group_id(qb)
Esempio n. 15
0
    def setup_qb_cur(self, all_errs, min_acl=Access_Level.viewer):

        # For both import and export, qb_src is used to retrieve items from the
        # database, and qb_cur is used to check the user's group accesses and
        # maybe to search for regions if a restrictive bbox is being imposed.
        # But qb_cur is also used during import to save changes to the database;
        # qb_cur is not used during export to save anything to the database.
        #
        # NOTE: On import, we row-lock on the grac tables, group_membership
        # and new_item_policy. We also row-lock the destination branch.
        # So other operations might block while this code runs.
        # CONFIRM: We don't lock anything on export, right?

        qb_cur = None

        username = self.mjob.wtem.created_by

        db = db_glue.new()

        rev = revision.Current(allow_deleted=False)
        (branch_id,
         branch_hier) = branch.Many.branch_id_resolve(db,
                                                      self.mjob.wtem.branch_id,
                                                      branch_hier_rev=rev)

        if branch_id is None:
            # EXPLAIN: How come we don't raise here, like we do in the else?
            #          Or, why doesn't the else block use all_errs?
            #          See: raise_error_on_branch.
            #          And if you look at export_cyclop.substage_initialize,
            #          you'll see that it assurts not all_errs, so I guess
            #          it expects us to raise.
            all_errs.append('setup_qb_cur: not a branch: %s at %s' % (
                self.mjob.wtem.branch_id,
                str(rev),
            ))
        else:

            g.assurt(branch_hier)
            g.assurt(branch_id == branch_hier[0][0])

            raise_error_on_branch = False

            if not self.spf_conf.branch_name:
                # This happens on export, since export_cyclop.substage_initialize
                # only sets branch_id when setting up the qbs. This is because it
                # uses the merge_job's branch_id, and since merge_job is just an
                # item_versioned item, all it has is its branch_id, as items do
                # not also store the branch name.
                self.spf_conf.branch_name = branch_hier[0][2]
            elif self.spf_conf.branch_name != branch_hier[0][2]:
                # The branch name in the shapefile should match.
                log.error('setup_qb_cur: branch_name mismatch: %s / %s' % (
                    self.spf_conf.branch_name,
                    branch_hier[0][2],
                ))
                raise_error_on_branch = True
            # else, the branch_name in the conf matches the one we loaded by ID.
            #
            if self.spf_conf.branch_id != branch_id:
                # But the branch ID we can tolerate being wrong.
                log.warning('setup_qb_cur: unexpected spf_conf.branch_id: %s' %
                            (self.spf_conf.branch_id, ))
                # For the Metc Bikeways shapefile, this just means [lb] hasn't
                # update the branch ID attribute in the shapefile...
                g.assurt(self.spf_conf.branch_name)
                (try_branch_id,
                 try_branch_hier) = branch.Many.branch_id_resolve(
                     db, self.spf_conf.branch_name, branch_hier_rev=rev)
                if try_branch_id == branch_id:
                    log.warning('setup_qb_cur: ok: overriding branch_id: %s' %
                                (branch_id, ))
                    self.spf_conf.branch_id = branch_id
                else:
                    log.error(
                        'setup_qb_cur: try_branch_id != branch_id: %s != %s' %
                        (
                            try_branch_id,
                            branch_id,
                        ))
                    raise_error_on_branch = True

            if raise_error_on_branch:
                if conf.break_on_assurt:
                    import pdb
                    pdb.set_trace()
                raise GWIS_Error(
                    'Shapefile branch ID and name do not match job details: '
                    'work_item: %s/%s | shapefile: %s/%s' % (
                        branch_hier[0][2],
                        branch_hier[0][0],
                        self.spf_conf.branch_name,
                        self.spf_conf.branch_id,
                    ))

            qb_cur = Item_Query_Builder(db, username, branch_hier, rev)

            # Load both the raw geometry and the WKT geometry; we need to be
            # flexible.
            qb_cur.filters.skip_geometry_raw = False
            qb_cur.filters.skip_geometry_svg = True
            qb_cur.filters.skip_geometry_wkt = False

            # To save things, we need to set the group ID explicitly.
            self.user_group_id = User.private_group_id(qb_cur.db, username)
            qb_cur.user_group_id = self.user_group_id

            qb_cur.item_mgr = Item_Manager()
            # Load the attachment cache now. On import, if we create new
            # attributes (see metc_bikeways_defs.py), we'll keep it updated.
            qb_cur.item_mgr.load_cache_attachments(qb_cur)

            Query_Overlord.finalize_query(qb_cur)

            # FIXME: This comment. I like it. But it's not true... yet.
            #  Getting row lock in branches_prepare. So don't table lock.
            #
            # Start the transaction, since the grac_mgr does some row locking.
            # We'll keep the rows locked until we've verified permissions.
            # FIXME: Verify you rollback and start a new 'revision' lock...
            #        or maybe just start a new 'revision' lock? or can you
            #        write to a Shapfile first and zip through the Shapefile
            #        to save quickly and not hold the lock so long?
            # BUG nnnn: Investigate using a row-level branch lock; for now,
            #           just lock rev.
            qb_cur.db.transaction_begin_rw()

            qb_cur.grac_mgr = Grac_Manager()
            load_grp_mmbrshps = True
            qb_cur.grac_mgr.prepare_mgr('user', qb_cur, load_grp_mmbrshps)

            # FIXME: Does qb_src need grac_mgr?
            #self.qb_src.grac_mgr = qb_cur.grac_mgr

            # Check user's minimum access level.
            target_branch = self.verify_branch_access(qb_cur, min_acl,
                                                      all_errs)
            g.assurt(target_branch.stack_id == self.spf_conf.branch_id)
            if (self.spf_conf.branch_name and
                (self.spf_conf.branch_name != qb_cur.branch_hier[0][2])):
                log.warning('Unexpected spf_conf.branch_name: %s' %
                            (self.spf_conf.branch_name, ))
            self.spf_conf.branch_name = qb_cur.branch_hier[0][2]

        self.qb_cur = qb_cur

        log.debug('setup_qb_cur: spf_conf: %s' % (str(self.spf_conf), ))
Esempio n. 16
0
    def setup_links(self):

        # First count some table rows and double-check the upgrade so far. We
        # want to be confident we're getting all the CcpV1 records and making
        # appropriate CcpV2 records.
        try:
            self.setup_links_sanity_check()
        except:
            log.warning('setup_links: old CcpV1 already dropped; moving on...')

        # Now get the unique set of usernames. We're going to create items owned
        # by certain users, and we'll need to setup resources for each user, like
        # the query_builder and the grac_mgr.

        usernames_sql = ("""
         SELECT DISTINCT (username)
         FROM item_watcher_bug_nnnn
         ORDER BY username
         """)

        # NOTE: We're not bothering with dont_fetchall.
        #       There are only a few hundred rows...

        rows = self.qb.db.sql(usernames_sql)

        log.debug('setup_links: found %d unique users with watchers' %
                  (len(rows), ))

        if not rows:
            log.error('setup_links: nothing found')
            g.assurt(false)

        for row in rows:

            username = row['username']

            # Hmm. There's no user.One() class to load a user. It's all custom.
            user_rows = self.qb.db.sql(
                "SELECT login_permitted FROM user_ WHERE username = %s" %
                (self.qb.db.quoted(username), ))
            g.assurt(len(user_rows) == 1)
            if not user_rows[0]['login_permitted']:
                log.debug('setup_links: skipping: !user_.login_permitted: %s' %
                          (username, ))
                continue

            log.verbose2('setup_links: processing username: %s' % (username, ))

            g.assurt(isinstance(self.qb.revision, revision.Current))
            rev_cur = revision.Current()

            user_qb = Item_Query_Builder(self.qb.db, username,
                                         self.qb.branch_hier, rev_cur)
            user_qb.grac_mgr = Grac_Manager()
            user_qb.grac_mgr.prepare_mgr('user', user_qb)
            #
            g.assurt(user_qb.username
                     and (user_qb.username != conf.anonymous_username))
            user_qb.user_group_id = User.private_group_id(
                user_qb.db, user_qb.username)
            #
            # Use the same item_mgr so we pull client stack IDs from the same
            # pool.
            user_qb.item_mgr = self.qb.item_mgr

            # Finalize the query. This sets revision.gids so it'll include the
            # user's private group (and the anonymous and All Users groups).
            Query_Overlord.finalize_query(user_qb)

            # We can still get deleted regions and add links for them.
            user_qb.revision.allow_deleted = True

            # Finally, update the database. Oi, there's a lot of setup!
            self.setup_links_for_user(user_qb)

            # The way Item_Query_Builder works, it usually wires the branch_hier
            # revision to the revision revision.
            g.assurt(self.qb.branch_hier[0][1] == rev_cur)
            # We'll reuse the branch_hier so clear this user's gids.
            self.qb.branch_hier[0][1].gids = None
Esempio n. 17
0
   def send_alerts(self, service_delay):

      #log.debug('send_alerts: service_delay: %s (%s)'
      #   % (service_delay,
      #      Watcher_Frequency.get_watcher_frequency_name(service_delay),))

      # Select item alerts to make it easy to make emails:
      # 1. We email one user at a time, so order by user firstly.
      # 2. Events for some message types can be coalesced but
      #    for other message types, we send one alert per email.

      self.qb.db.dont_fetchall = True

      fetch_events_sql = (
         """
         SELECT
              iea.messaging_id
            , iea.username
            , iea.branch_id
            , iea.msg_type_id
            , iea.latest_rid
            , iea.item_stack_id
            , iea.service_delay
            , iea.watcher_stack_id
            , iea.notifier_dat
            , u.email
            , u.enable_watchers_email
            , u.unsubscribe_proof
         FROM item_event_alert AS iea
         JOIN user_ AS u
            ON (iea.username = u.username)
         WHERE
                (date_alerted IS NULL)
            AND (service_delay = %d)
            AND ((ripens_at IS NULL)
                 OR (ripens_at <= NOW()))
         ORDER BY
              iea.username ASC
            , iea.msg_type_id DESC
            , iea.branch_id ASC
            , iea.latest_rid DESC
            , iea.item_stack_id DESC
            , iea.item_id DESC
         """ % (service_delay,))

      rows = self.qb.db.sql(fetch_events_sql)
      g.assurt(rows is None)

      if not self.qb.db.curs.rowcount:
         log.debug('send_alerts: zero events for service_delay: %s'
            % (Watcher_Frequency.get_watcher_frequency_name(service_delay),))
      else:
         log.debug('send_alerts: found %d events (%s)'
            % (self.qb.db.curs.rowcount,
               Watcher_Frequency.get_watcher_frequency_name(service_delay),))

      # Get a cursor for the processing fcns, since we're iterator over ours.
      event_db = self.qb.db.clone()
      event_db.dont_fetchall = False

      db = self.qb.db.clone()
      db.dont_fetchall = False
      #log.debug('send_alerts: self.qb.db.curs: %s / db.curs: %s'
      #          % (id(self.qb.db.curs), id(db.curs),))

      cur_email = None

      all_msgng_ids = []

      generator = self.qb.db.get_row_iter()
      for row in generator:

         messaging_id = row['messaging_id']
         username = row['username']
         branch_id = row['branch_id']
         msg_type_id = row['msg_type_id']
         latest_rid = row['latest_rid']
         item_stack_id = row['item_stack_id']
         service_delay = row['service_delay']
         watcher_stack_id = row['watcher_stack_id']
         notifier_dat = row['notifier_dat']
         email_addy = row['email']
      # FIXME: Delete column user_.enable_watchers_digest
         enable_watchers_email = row['enable_watchers_email']
         unsubscribe_proof = row['unsubscribe_proof']

         all_msgng_ids.append(messaging_id)

         g.assurt(username)
         if (cur_email is None) or (cur_email.username != username):

            if cur_email is not None:
               cur_email.emails_send_coalesced_events()
               cur_email = None

            users_group_id = User.private_group_id(event_db, username)

            cur_email = Watcher_Composer(
               event_db,
               username,
               users_group_id,
               email_addy,
               service_delay,
               unsubscribe_proof)

         do_add_event = False
         if watcher_stack_id:
            still_active_sql = (
               """
               SELECT lv.value_integer
                 FROM group_item_access AS gia
                 JOIN link_value AS lv
                   ON (gia.item_id = lv.system_id)
                WHERE
                      gia.stack_id = %d
                  AND gia.group_id = %d
                  AND gia.valid_until_rid = %d
                ORDER BY
                      gia.version DESC,
                      gia.acl_grouping DESC
                LIMIT 1
               """ % (watcher_stack_id,
                      users_group_id,
                      conf.rid_inf,))

            rows = event_db.sql(still_active_sql)
            if rows:
               watcher_freq = rows[0]['value_integer']
               log.debug(
                  'send_alerts: watcher_stack_id: %s / usr: %s / freq: %s'
                  % (watcher_stack_id,
                     username,
                     Watcher_Frequency.get_watcher_frequency_name(
                                                      watcher_freq),))
               g.assurt(len(rows) == 1)
               if watcher_freq != Watcher_Frequency.never:
                  do_add_event = True
            else:
               log.debug('send_alerts: watcher inactive: %s / usr: %s'
                         % (watcher_stack_id, username,))
         # end: if watcher_stack_id
         else:
            # No watcher_stack_id, meaning this is revision.alert_on_activity.
            g.assurt(msg_type_id in (
               Watcher_Parts_Base.MSG_TYPE_REV_FEEDBACK,
               Watcher_Parts_Base.MSG_TYPE_REV_REVERT,
               Watcher_Parts_Base.MSG_TYPE_RTE_REACTION,))
            do_add_event = True

         if do_add_event:
            if enable_watchers_email:
               # For watched items and watch regions, we'll coalesce events
               # into one email and we'll send the email via compone_and_send;
               # for all of msg_types, we'll send the email here.
               cur_email.process_event(messaging_id, msg_type_id, branch_id,
                                       latest_rid, item_stack_id, notifier_dat)
            else:
               log.debug('send_alerts: watchers disabled; skip evt: id: %d'
                         % (messaging_id,))
         else:
            log.debug('send_alerts: watcher is never; skip evt: id: %d'
                      % (messaging_id,))

      # end: for row in generator

      if cur_email is not None:
         # On the last user, we'll exit the loop before emailing, so,
         # special case, email the last user.
         cur_email.emails_send_coalesced_events()
         cur_email = None

      # The Watcher_Composer handled finalizing messaging IDs, so
      # this call should say 0 rows updated.
      if all_msgng_ids:
         row_count = Watcher_Composer.finalize_alerts(all_msgng_ids, None)
         if row_count:
            log.warning(
               'send_alerts: unexpected finalize_alerts row_count: %s'
               % (row_count,))

      # Cleanup in aisle db!
      #log.debug('send_alerts: cleanup: db.curs: %s' % (id(db.curs),))
      db.close()
      event_db.close()

      log.debug('send_alerts: emailed usernames: %s'
                % (Watcher_Composer.emailed_usernames,))
Esempio n. 18
0
    def send_alerts(self, service_delay):

        #log.debug('send_alerts: service_delay: %s (%s)'
        #   % (service_delay,
        #      Watcher_Frequency.get_watcher_frequency_name(service_delay),))

        # Select item alerts to make it easy to make emails:
        # 1. We email one user at a time, so order by user firstly.
        # 2. Events for some message types can be coalesced but
        #    for other message types, we send one alert per email.

        self.qb.db.dont_fetchall = True

        fetch_events_sql = ("""
         SELECT
              iea.messaging_id
            , iea.username
            , iea.branch_id
            , iea.msg_type_id
            , iea.latest_rid
            , iea.item_stack_id
            , iea.service_delay
            , iea.watcher_stack_id
            , iea.notifier_dat
            , u.email
            , u.enable_watchers_email
            , u.unsubscribe_proof
         FROM item_event_alert AS iea
         JOIN user_ AS u
            ON (iea.username = u.username)
         WHERE
                (date_alerted IS NULL)
            AND (service_delay = %d)
            AND ((ripens_at IS NULL)
                 OR (ripens_at <= NOW()))
         ORDER BY
              iea.username ASC
            , iea.msg_type_id DESC
            , iea.branch_id ASC
            , iea.latest_rid DESC
            , iea.item_stack_id DESC
            , iea.item_id DESC
         """ % (service_delay, ))

        rows = self.qb.db.sql(fetch_events_sql)
        g.assurt(rows is None)

        if not self.qb.db.curs.rowcount:
            log.debug('send_alerts: zero events for service_delay: %s' % (
                Watcher_Frequency.get_watcher_frequency_name(service_delay), ))
        else:
            log.debug('send_alerts: found %d events (%s)' % (
                self.qb.db.curs.rowcount,
                Watcher_Frequency.get_watcher_frequency_name(service_delay),
            ))

        # Get a cursor for the processing fcns, since we're iterator over ours.
        event_db = self.qb.db.clone()
        event_db.dont_fetchall = False

        db = self.qb.db.clone()
        db.dont_fetchall = False
        #log.debug('send_alerts: self.qb.db.curs: %s / db.curs: %s'
        #          % (id(self.qb.db.curs), id(db.curs),))

        cur_email = None

        all_msgng_ids = []

        generator = self.qb.db.get_row_iter()
        for row in generator:

            messaging_id = row['messaging_id']
            username = row['username']
            branch_id = row['branch_id']
            msg_type_id = row['msg_type_id']
            latest_rid = row['latest_rid']
            item_stack_id = row['item_stack_id']
            service_delay = row['service_delay']
            watcher_stack_id = row['watcher_stack_id']
            notifier_dat = row['notifier_dat']
            email_addy = row['email']
            # FIXME: Delete column user_.enable_watchers_digest
            enable_watchers_email = row['enable_watchers_email']
            unsubscribe_proof = row['unsubscribe_proof']

            all_msgng_ids.append(messaging_id)

            g.assurt(username)
            if (cur_email is None) or (cur_email.username != username):

                if cur_email is not None:
                    cur_email.emails_send_coalesced_events()
                    cur_email = None

                users_group_id = User.private_group_id(event_db, username)

                cur_email = Watcher_Composer(event_db, username,
                                             users_group_id, email_addy,
                                             service_delay, unsubscribe_proof)

            do_add_event = False
            if watcher_stack_id:
                still_active_sql = ("""
               SELECT lv.value_integer
                 FROM group_item_access AS gia
                 JOIN link_value AS lv
                   ON (gia.item_id = lv.system_id)
                WHERE
                      gia.stack_id = %d
                  AND gia.group_id = %d
                  AND gia.valid_until_rid = %d
                ORDER BY
                      gia.version DESC,
                      gia.acl_grouping DESC
                LIMIT 1
               """ % (
                    watcher_stack_id,
                    users_group_id,
                    conf.rid_inf,
                ))

                rows = event_db.sql(still_active_sql)
                if rows:
                    watcher_freq = rows[0]['value_integer']
                    log.debug(
                        'send_alerts: watcher_stack_id: %s / usr: %s / freq: %s'
                        % (
                            watcher_stack_id,
                            username,
                            Watcher_Frequency.get_watcher_frequency_name(
                                watcher_freq),
                        ))
                    g.assurt(len(rows) == 1)
                    if watcher_freq != Watcher_Frequency.never:
                        do_add_event = True
                else:
                    log.debug('send_alerts: watcher inactive: %s / usr: %s' % (
                        watcher_stack_id,
                        username,
                    ))
            # end: if watcher_stack_id
            else:
                # No watcher_stack_id, meaning this is revision.alert_on_activity.
                g.assurt(msg_type_id in (
                    Watcher_Parts_Base.MSG_TYPE_REV_FEEDBACK,
                    Watcher_Parts_Base.MSG_TYPE_REV_REVERT,
                    Watcher_Parts_Base.MSG_TYPE_RTE_REACTION,
                ))
                do_add_event = True

            if do_add_event:
                if enable_watchers_email:
                    # For watched items and watch regions, we'll coalesce events
                    # into one email and we'll send the email via compone_and_send;
                    # for all of msg_types, we'll send the email here.
                    cur_email.process_event(messaging_id, msg_type_id,
                                            branch_id, latest_rid,
                                            item_stack_id, notifier_dat)
                else:
                    log.debug(
                        'send_alerts: watchers disabled; skip evt: id: %d' %
                        (messaging_id, ))
            else:
                log.debug('send_alerts: watcher is never; skip evt: id: %d' %
                          (messaging_id, ))

        # end: for row in generator

        if cur_email is not None:
            # On the last user, we'll exit the loop before emailing, so,
            # special case, email the last user.
            cur_email.emails_send_coalesced_events()
            cur_email = None

        # The Watcher_Composer handled finalizing messaging IDs, so
        # this call should say 0 rows updated.
        if all_msgng_ids:
            row_count = Watcher_Composer.finalize_alerts(all_msgng_ids, None)
            if row_count:
                log.warning(
                    'send_alerts: unexpected finalize_alerts row_count: %s' %
                    (row_count, ))

        # Cleanup in aisle db!
        #log.debug('send_alerts: cleanup: db.curs: %s' % (id(db.curs),))
        db.close()
        event_db.close()

        log.debug('send_alerts: emailed usernames: %s' %
                  (Watcher_Composer.emailed_usernames, ))
Esempio n. 19
0
   def sql_apply_query_filters(self, qb, where_clause="", conjunction=""):

      g.assurt((not conjunction) or (conjunction == "AND"))

      # We can only call sql_where_filter_linked once per query. So we can't
      # support about_stack_ids or filter_by_watch_feat and a viewport query.
      sql_where_filter_linked_cnt = 0
      if qb.filters.about_stack_ids:
         sql_where_filter_linked_cnt += 1
      if qb.filters.filter_by_watch_feat:
         sql_where_filter_linked_cnt += 1
      if ((qb.viewport is not None) and (qb.viewport.include)
          or qb.filters.only_in_multi_geometry):
         sql_where_filter_linked_cnt += 1
      if qb.filters.filter_by_nearby_edits:
         sql_where_filter_linked_cnt += 1
      if sql_where_filter_linked_cnt > 1:
         raise GWIS_Error('Please choose just one: '
                          'about_stack_ids, filter_by_watch_feat or viewport.')

      if qb.filters.about_stack_ids:
         linked_items_where = self.sql_where_filter_about(qb)
         g.assurt(linked_items_where)
         where_clause += " %s %s " % (conjunction, linked_items_where,)
         conjunction = "AND"

      if qb.filters.filter_by_watch_feat:
         # FIXME: Debug, then combine handlers for filter_by_watch_feat
         #                                     and only_in_multi_geometry.
         feat_qb = qb.clone(skip_clauses=True, skip_filtport=True)
         feat_qb.filters = Query_Filters(req=None)
         qfs = feat_qb.filters
         # Set filter_by_watch_item=True and search for geofeatures
         # that the user is watching.
         qfs.filter_by_watch_item = qb.filters.filter_by_watch_feat
         g.assurt(not qb.filters.only_in_multi_geometry)
         g.assurt((qb.viewport is None) or (qb.viewport.include is None))
         feat_qb.finalize_query()
         feat_qb.sql_clauses = geofeature.Many.sql_clauses_cols_all.clone()
         feats = geofeature.Many()
         feats_sql = feats.search_get_sql(feat_qb)
         feat_stack_id_table_ref = 'temp_stack_id__watch_feat'
         thurrito_sql = (
            """
            SELECT
               stack_id
            INTO TEMPORARY TABLE
               %s
            FROM
               (%s) AS foo_feat_sid_1
            """ % (feat_stack_id_table_ref,
                   feats_sql,))
         rows = qb.db.sql(thurrito_sql)
         #
         join_on_to_self = self.sql_where_filter_linked_join_on_to_self(qb)
         where_on_other = ""
         join_on_temp = (
            """
            JOIN %s
               ON (flv.rhs_stack_id = %s.stack_id)
            """ % (feat_stack_id_table_ref,
                   feat_stack_id_table_ref,))
         linked_items_where = self.sql_where_filter_linked(qb, join_on_to_self,
                                                               where_on_other,
                                                               join_on_temp)
         #
         where_clause += " %s %s " % (conjunction, linked_items_where,)
         conjunction = "AND"

      # 2013.04.02: Freshly implemented in CcpV2. Not the quickest fcn., but it
      #             works.
      #             MAYBE: Disable this until we can find a better solution?
      #             MEH: [lb] got it under 15 seconds, I think. Good enough.
      if qb.filters.filter_by_nearby_edits:

         '''
         g.assurt(False) # FIXME: This code is broke!

         join = ' JOIN post_geo pg ON (p.id = pg.id)'
         where_clause += (
            """
            %s 
            -- FIXME: Instead of ST_Intersects/ST_Buffer, try: ST_DWithin
               (ST_Intersects(
                  pg.geometry,
                  (SELECT ST_Buffer(collect(rr.geometry), 0)
                   FROM revision rr
                   WHERE
                     rr.username = %s
                     AND NOT is_social_rev(rr.id)))
            """ % (conjunction,
                   qb.db.quoted(qb.username),))
         conjunction = "AND"
         '''

         # FIXME: This was the older SQL snippet used for this filter. It
         #        was waaaaaayyyyy too slow. The one I used instead is also
         #        slow, but it doesn't time out, at least.
         #        [lb] notes that his database is missing geometry indices,
         #        but this didn't quite halve my experience, from 52 secs.
         #        to 29 secs. We need to run db_load_add_constraints.sql on
         #        the db.
         #        
         # sql_or_sids = (
         #    """
         #    SELECT
         #       stack_id
         #    FROM
         #       geofeature AS gf_near
         #    WHERE
         #       ST_Intersects(
         #          gf_near.geometry,
         #          (
         #             SELECT
         #                ST_Buffer(collect(rr.geometry), 0)
         #             FROM
         #                revision rr
         #             WHERE
         #                rr.username = %s
         #                -- AND NOT is_social_rev(rr.id)
         #          )
         #       )
         #    """
         #    ) % (qb.db.quoted(qb.username),)

         # FIXME: Very slow query: ~ 42 sec.
         '''
         sql_or_sids = (
            """
            SELECT
               stack_id
            FROM
               geofeature AS gf_near
               JOIN revision AS rv_near
                  ON ST_Intersects(gf_near.geometry, rv_near.geometry)
            WHERE
               rv_near.username = %s
               -- AND NOT is_social_rev(rv_near.id)
            """
            ) % (qb.db.quoted(qb.username),)
         '''

         # MAYBE: Why isn't setting user_group_id part of finalize_query?
         #g.assurt(not qb.user_group_id)
         if not qb.user_group_id:
            qb.user_group_id = User.private_group_id(qb.db, qb.username)
            g.assurt(qb.user_group_id)

         geometry_table_ref = 'temp_geometry__edited_items'
         geometry_sql = (
            """
            SELECT
               ST_Buffer(ST_Collect(grev.geometry), 0) AS geometry
            INTO TEMPORARY TABLE
               %s
            FROM
               group_revision AS grev
            WHERE
               grev.group_id = %d
            """ % (geometry_table_ref,
                   qb.user_group_id,))
         # 2013.04.02: On [lb]: Time: 405.073 ms
         rows = qb.db.sql(geometry_sql)

         # NOTE: This is a broad query: if a revision contains edits far apart,
         #       we'll find all the geofeatures in between. E.g., for [lb], it
         #       finds hundreds of thousands of byways; not very useful.
         item_stack_id_table_ref = 'temp_stack_id__edited_items'
         about_stack_ids_sql = (
            """
            SELECT
               DISTINCT(stack_id)
            INTO TEMPORARY TABLE
               %s
            FROM
               geofeature AS feat
            JOIN
               %s AS grev
               ON ST_Intersects(feat.geometry, grev.geometry)
            """ % (item_stack_id_table_ref,
                   geometry_table_ref,
                   ))
         # 2013.04.02: On [lb]: Time: 13106.527 ms
         rows = qb.db.sql(about_stack_ids_sql)
         #
         join_on_to_self = self.sql_where_filter_linked_join_on_to_self(qb)
         where_on_other = ""
         join_on_temp = (
            """
            JOIN %s
               ON (flv.rhs_stack_id = %s.stack_id)
            """ % (item_stack_id_table_ref,
                   item_stack_id_table_ref,))
         linked_items_where = self.sql_where_filter_linked(qb, join_on_to_self,
                                                               where_on_other,
                                                               join_on_temp)
         #
         where_clause += " %s %s " % (conjunction, linked_items_where,)
         conjunction = "AND"

      # Only select posts whose name matches the user's search query.
      where_clause = item_user_watching.Many.sql_apply_query_filters(
                                 self, qb, where_clause, conjunction)

      return where_clause
   def go_main(self):

      # Skipping: Ccp_Script_Base.go_main(self)

      do_commit = False

      try:

         log.debug('go_main: getting exclusive revision lock...')
         revision.Revision.revision_lock_dance(
            self.qb.db, caller='bike_facility_populate__go_main')
         log.debug('go_main: database is locked.')

         # MAYBE: There seems to be an awful lot of boilerplate code here.
         self.qb.grac_mgr = Grac_Manager()
         self.qb.grac_mgr.prepare_mgr('user', self.qb)
         # A developer is running this script.
         g.assurt(self.qb.username
                  and (self.qb.username != conf.anonymous_username))
         self.qb.user_group_id = User.private_group_id(self.qb.db,
                                                       self.qb.username)

         # Get a new revision ID.
         self.qb.item_mgr.start_new_revision(self.qb.db)
         log.debug('Got rid_new: %d' % (self.qb.item_mgr.rid_new,))

         # Get the Bike Facility attribute.
         internal_name = '/byway/cycle_facil'
         self.attr_cycle_facil = attribute.Many.get_system_attr(
                                          self.qb, internal_name)
         g.assurt(self.attr_cycle_facil is not None)

         # Get the Controlled Access attribute.
         internal_name = '/byway/no_access'
         self.attr_no_access = attribute.Many.get_system_attr(
                                          self.qb, internal_name)
         g.assurt(self.attr_no_access is not None)

         # BUG nnnn: New Script: Populate '/byway/cycle_route' by...
         #                       stack IDs? byway names? maybe easier
         #                       just to do in flashclient....

         self.byways_suss_out_facilities()

         # Save the new revision and finalize the sequence numbers.
         log.debug('go_main: saving rev # %d' % (self.qb.item_mgr.rid_new,))

         # NOTE: We're cheating here: We know only the public group needs
         #       group_revision records, since all new items were only public.
         # Either of these should be acceptable:
         # group_names_or_ids = ['Public',]
         group_names_or_ids = [group.Many.public_group_id(self.qb),]
         # MAYBE: Give credit to user who runs this script, or _script?
         #        I.e., where is the accountability if user like/dislike
         #        application (calculation) of this new attribute?
         #
         #complain_to_this_user = '******'
         #complain_to_this_user = self.qb.username
         complain_to_this_user = '******'
         #
         changenote = ('Populated new bike facility attr. using existing '
                       + 'tags and attrs. (i.e., guessing!).')
         #
         self.finish_script_save_revision(group_names_or_ids,
                                          username=complain_to_this_user,
                                          changenote=changenote)

         self.print_stats()

         if debug_skip_commit:
            raise Exception('DEBUG: Skipping commit: Debugging')
         do_commit = True

      except Exception, e:

         log.error('Exception!: "%s" / %s' % (str(e), traceback.format_exc(),))
Esempio n. 21
0
   def user_validate_parse(self, user, variant):
      '''
      Called if the GWIS client includes a username. Here we verify the
      username and password or token. The fcn. raises GWIS_Error if the
      user cannot be authenticated.
      '''

      valid = False

      username = user.get('name').lower()
      password = user.get('pass')
      sdsecret = user.get('ssec')
      token = user.get('token')
      g.assurt((password is None) or (token is None))

      # NOTE: In CcpV1, route reactions adds save_anon. E.g.,
      #    # If save_anon is set, then perform the save as an anonymous user.
      #    save_anon = bool(int(self.decode_key('save_anon', False)))
      #    if save_anon:
      #       self.req.username = None
      # but this is hopefully unnecessary. And it seems... wrong, like, we
      # should set a bool that the user wants to save anonymously, otherwise
      # none of the code will know there's a real user here (e.g., for tracing
      # or storing stats).

      # *** MAGIC AUTH

      if ((conf.magic_localhost_auth)
          and (self.ip_addr == conf.magic_auth_remoteip)):

         # HACK: localhost can be whomever they like
         # This is similar to the ccp script's --no-password, but this is used
         # for spoofing another user via flashclient.
         # Logs an Apache warning.
         self.req.p_warning('user %s from localhost: magic auth' % (username,))
         # Redundantly log a pyserver warning.
         # FIXME: Do we really need to log Apache warnings at all?
         log.warning('SPOOFING USER: %s from %s' % (username, self.ip_addr,))

      # *** PASSWORD AUTH

      elif variant == 'password':

         if password:
            r = self.req.db.sql("SELECT login_ok(%s, %s)",
                                (username, password,))
            g.assurt(len(r) == 1)
            if not r[0]['login_ok']:
               self.auth_failure(username, 'password')
               raise GWIS_Warning(
                  'Incorrect username and/or password.',
                  tag=None, logger=log.info)
         elif not sdsecret:
            raise GWIS_Warning(
               'Please specify a password with that username.',
               tag=None, logger=log.info)

         log.info('user_validate: pwdd: %s / %s'
                  % (username,
                     self.str_remote_ip_and_host_and_local_host(),))

            # FIXME: Statewide UI: Cleanup Session IDs records on login/logout.
# FIXME: Use cron job to mark date_expired where last_modified is
#        some number of minutes old? flashclient log should keep
#        token active, right?
#        See the unimplemented: gwis/command_/user_goodbye.py
#        2014.09.09: The date_expired field in really old tokens
#                    is still NULL...

      # *** TOKEN AUTH

      elif variant == 'token':

         log.verbose1('user_validate_parse: token: %s / username: %s'
                      % (token, username,))

         if token is None:
            log.warning('user_validate_parse: EXPLAIN: Why is the token None?')
            raise GWIS_Warning('Token not found! Please login again.',
                               'badtoken')

         # Avoid transaction_retryable, at least so long as it expects a
         # specific TransactionRollbackError, but transaction_lock_row_logic
         # simply raises Exception.
         #   success = self.req.db.transaction_retryable(
         #      self.user_token_verify, self.req, token, username)
         # 2013.09.25: Using SELECT... FOR UPDATE NOWAIT seemed to work okay
         # until [lb] started running runic's daily cron job and also a
         # shapefile import script -- then all lock-rows came back failed.
         # But the scripts aren't even touching that database or the
         # user__token row! What gives?! I searched and couldn't find any
         # indication that NOWAIT and FOR UPDATE do anything other than on
         # the row on which they're suppose to behave... so this is truly
         # strange. So now db_glue uses STATEMENT_TIMEOUT instead of NOWAIT.
         found_row = self.user_token_verify(token, username)

         if found_row is None:
            log.info(
               'user_validate_parse: timeout on token verify: username: %s'
               % (username,))
            raise GWIS_Warning(
               'Please try your request again (server very busy).',
               'sadtoken',
               logger=log.info)
         elif found_row is False:
            log.warning(
               'user_validate_parse: not found_row: token: %s / username: %s'
               % (token, username,))
            raise GWIS_Warning(
               'Please log off and log back on (incorrect token).',
               'badtoken')
         # else, found_row is True

         # EXPLAIN: Does p_notice write to Apache log? We're fine, because
         #          GWIS_Warning writes to the pyserver log... right?
         #self.req.p_notice('tokens: %s %s' % (token, token_valid,))

         if not self.token_ok:
            # [lb] guessing this unreachable; would've raised exception by now.
            log.debug('user_validate_parse: token not ok: %s' % (token,))
            self.auth_failure(username, 'token')
            raise GWIS_Warning(
               'Please log off and log back on (incorrect token).',
               'madtoken')

      # *** MISSING AUTH

      else:
         # No match for variant.
         log.warning('user_validate_parse: unknown variant: %s' % (variant,))
         raise GWIS_Error('Unknown variant.', 'badvariant')

      # *** SHARED SECRET

      if sdsecret:
         log.debug('user_validate_parse: using shared_secret to login')
         if (   ('' == conf.gwis_shared_secret)
             or (sdsecret != conf.gwis_shared_secret)
             or (not self.request_is_local)):
            log.error('Expected: %s / Got: %s / Local: %s'
               % (conf.gwis_shared_secret, sdsecret, self.request_is_local,))
            raise GWIS_Error('Whatchutalkinboutwillis?', 'badssec')
         self.request_is_secret = True

      # *** And The Rest.

      # If we got and verified a token, the username was checked against what's
      # in the db, so it should be clean. But if the username contains a quote
      # in it, we want to make sure it's delimited properly.
      # This is the simplest form of SQL injection: add a single quote and
      # a true result and then terminate the statement, e.g., same username is:
      #     ' or 1=1;--
      # E.g., SELECT * FROM user_ WHERE username='******' AND password='******';
      #   could be turned into, with, e.g., "fake_username' OR 1=1; --"
      #       SELECT * FROM user_ 
      #        WHERE username='******' OR 1=1; -- AND password='******';
      # Of course, this is just a trivial example.

      self.username = urllib.quote(username).strip("'")
      if self.username != username:
         raise GWIS_Warning('Bad username mismatch problem.',
                            'badquoteusername')

      if self.req.areq is not None:
         # Update Apache request_rec struct so username is recorded in logs.
         self.req.areq.user = username

      # Get the user ID
      self.user_id = User.user_id_from_username(self.req.db, username)
      g.assurt(self.user_id > 0)

      # Get the user's private group ID
      self.user_group_id = User.private_group_id(self.req.db, self.username)
      g.assurt(self.user_group_id > 0)

      # FIXME: We checked 'metadata/device' above, and now 'metadata/user' --
      #        which one is it?
      # BUG nnnn: Don't rely on this value, since the client can spoof it.
      if not self.request_is_mobile:
         self.request_is_mobile = user.get('is_mobile', False)
Esempio n. 22
0
   def compose_email_item_list(self, qb, msg_type_id, items_fetched):

      for item in items_fetched:

         g.assurt(item.real_item_type_id == Item_Type.post)

         # The item is the post. Get the post and the thread.
         #qb.filters.context_stack_id = {thread_stack_id}
         posts = post.Many()
         posts.search_by_stack_id(item.stack_id, qb)
         if len(posts) > 0:
            g.assurt(len(posts) == 1)
            the_post = posts[0]
         else:
            the_post = None
            log.warning('_compose_item_list: cannot see post: %s / %s'
                        % (qb.username, item.stack_id,))

         if the_post is not None:

            threads = thread.Many()
            threads.search_by_stack_id(the_post.thread_stack_id, qb)

            if len(threads) > 0:
               g.assurt(len(threads) == 1)
               the_thread = threads[0]

               # 2014.07.02: FIXME: test changes to what_username:
               post_username = User.what_username([the_post.edited_user,
                                                   the_post.edited_host,
                                                   the_post.edited_addr,])

               # A CcpV1 link:
               deeplink_text_v1 = (
                  'http://%s/#discussion?thread_id=%d&post_id=%d'
                  % (conf.server_name,
                     the_thread.stack_id,
                     the_post.stack_id,))
               # MAYBE: A CcpV2 link so the user is asked to log on:
               deeplink_text_v2 = (
                  'http://%s/#private?type=post&link=%d'
                  % (conf.server_name,
                     the_post.stack_id,))
               log.debug('MAYBE: deeplink_text_v2: %s' % (deeplink_text_v2,))

               self.msg_text += (
'''Discussion: %s
Posted by:  %s
View post:  %s (Flash required)
+-----
%s
+-----
'''               % (the_thread.name,
                     post_username,
                     deeplink_text_v1,
                     the_post.body,))
               self.msg_html += (
'''<table>
<tr><td>Discussion:</td> <td>%s</td></tr>
<tr><td>Posted by:</td> <td>%s</td></tr>
<tr><td>View post:</td> <td><a href="%s">%s</a> (Flash required)</td></tr>
</table><br/>
+-----<br/>
%s<br/>
+-----<br/>
'''               % (the_thread.name,
                     post_username,
                     deeplink_text_v1, 
                     deeplink_text_v1, 
                     the_post.body,))

            else: # len(threads) == 0
               log.warning(
                  '_compose_item_list: cannot see thread: %s / %s'
                  % (qb.username, the_post.thread_stack_id,))
    def go_main(self):

        do_commit = False

        try:

            #g.assurt(len(self.cli_args.group_ids) == 1) # See: groups_expect_one
            #log.debug('Creating NIPs for group_name %s [%d] / branch %s'
            #          % (self.cli_args.group_names[0],
            #             self.cli_args.group_ids[0],
            #             self.cli_args.branch_id,))

            log.debug('Creating NIPs for branch: %s / policy_profile: %s' % (
                self.cli_args.branch_id,
                self.cli_opts.policy_profile,
            ))

            # MAYBE: In most other scripts, we get the revision lock in
            # query_builder_prepare, but this seems more better, i.e.,
            # get it when you need it, not just willy-nilly on script startup.
            log.debug('go_main: getting exclusive revision lock...')
            revision.Revision.revision_lock_dance(
                self.qb.db, caller='new_item_policy_init.py')
            log.debug('go_main: database is locked.')

            # MAYBE: There seems to be an awful lot of boilerplate code here.
            self.qb.grac_mgr = Grac_Manager()
            self.qb.grac_mgr.prepare_mgr('user', self.qb)
            #
            # NOTE: I'm not sure we need user_group_id... but it's part of the
            #       boilerplate code... maybe put all this in script_base.py.
            g.assurt(self.qb.username
                     and (self.qb.username != conf.anonymous_username))
            self.qb.user_group_id = User.private_group_id(
                self.qb.db, self.qb.username)

            # Get a new revision ID. Using revision_peek than revision_create (we
            # used to use revision_create because our SQL called CURRVAL, but
            # we've fixed our SQL since).
            self.qb.item_mgr.start_new_revision(self.qb.db)
            log.debug('Got rid_new: %d' % (self.qb.item_mgr.rid_new, ))

            # Create the new new item policies.
            self.install_nips(self.qb, self.cli_opts.policy_profile)

            # Save the new revision and finalize the sequence numbers.
            group_names_or_ids = [
                'Public',
            ]
            #group_names_or_ids = [self.cli_args.group_ids[0],]
            self.finish_script_save_revision(group_names_or_ids)

            log.debug('Committing transaction')

            if debug_skip_commit:
                raise Exception('DEBUG: Skipping commit: Debugging')
            do_commit = True

        except Exception, e:

            # FIXME: g.assurt()s that are caught here have empty msgs?
            log.error('Exception!: "%s" / %s' % (
                str(e),
                traceback.format_exc(),
            ))
Esempio n. 24
0
    def user_validate_parse(self, user, variant):
        '''
      Called if the GWIS client includes a username. Here we verify the
      username and password or token. The fcn. raises GWIS_Error if the
      user cannot be authenticated.
      '''

        valid = False

        username = user.get('name').lower()
        password = user.get('pass')
        sdsecret = user.get('ssec')
        token = user.get('token')
        g.assurt((password is None) or (token is None))

        # NOTE: In CcpV1, route reactions adds save_anon. E.g.,
        #    # If save_anon is set, then perform the save as an anonymous user.
        #    save_anon = bool(int(self.decode_key('save_anon', False)))
        #    if save_anon:
        #       self.req.username = None
        # but this is hopefully unnecessary. And it seems... wrong, like, we
        # should set a bool that the user wants to save anonymously, otherwise
        # none of the code will know there's a real user here (e.g., for tracing
        # or storing stats).

        # *** MAGIC AUTH

        if ((conf.magic_localhost_auth)
                and (self.ip_addr == conf.magic_auth_remoteip)):

            # HACK: localhost can be whomever they like
            # This is similar to the ccp script's --no-password, but this is used
            # for spoofing another user via flashclient.
            # Logs an Apache warning.
            self.req.p_warning('user %s from localhost: magic auth' %
                               (username, ))
            # Redundantly log a pyserver warning.
            # FIXME: Do we really need to log Apache warnings at all?
            log.warning('SPOOFING USER: %s from %s' % (
                username,
                self.ip_addr,
            ))

        # *** PASSWORD AUTH

        elif variant == 'password':

            if password:
                r = self.req.db.sql("SELECT login_ok(%s, %s)", (
                    username,
                    password,
                ))
                g.assurt(len(r) == 1)
                if not r[0]['login_ok']:
                    self.auth_failure(username, 'password')
                    raise GWIS_Warning('Incorrect username and/or password.',
                                       tag=None,
                                       logger=log.info)
            elif not sdsecret:
                raise GWIS_Warning(
                    'Please specify a password with that username.',
                    tag=None,
                    logger=log.info)

            log.info('user_validate: pwdd: %s / %s' % (
                username,
                self.str_remote_ip_and_host_and_local_host(),
            ))

            # FIXME: Statewide UI: Cleanup Session IDs records on login/logout.
# FIXME: Use cron job to mark date_expired where last_modified is
#        some number of minutes old? flashclient log should keep
#        token active, right?
#        See the unimplemented: gwis/command_/user_goodbye.py
#        2014.09.09: The date_expired field in really old tokens
#                    is still NULL...

# *** TOKEN AUTH

        elif variant == 'token':

            log.verbose1('user_validate_parse: token: %s / username: %s' % (
                token,
                username,
            ))

            if token is None:
                log.warning(
                    'user_validate_parse: EXPLAIN: Why is the token None?')
                raise GWIS_Warning('Token not found! Please login again.',
                                   'badtoken')

            # Avoid transaction_retryable, at least so long as it expects a
            # specific TransactionRollbackError, but transaction_lock_row_logic
            # simply raises Exception.
            #   success = self.req.db.transaction_retryable(
            #      self.user_token_verify, self.req, token, username)
            # 2013.09.25: Using SELECT... FOR UPDATE NOWAIT seemed to work okay
            # until [lb] started running runic's daily cron job and also a
            # shapefile import script -- then all lock-rows came back failed.
            # But the scripts aren't even touching that database or the
            # user__token row! What gives?! I searched and couldn't find any
            # indication that NOWAIT and FOR UPDATE do anything other than on
            # the row on which they're suppose to behave... so this is truly
            # strange. So now db_glue uses STATEMENT_TIMEOUT instead of NOWAIT.
            found_row = self.user_token_verify(token, username)

            if found_row is None:
                log.info(
                    'user_validate_parse: timeout on token verify: username: %s'
                    % (username, ))
                raise GWIS_Warning(
                    'Please try your request again (server very busy).',
                    'sadtoken',
                    logger=log.info)
            elif found_row is False:
                log.warning(
                    'user_validate_parse: not found_row: token: %s / username: %s'
                    % (
                        token,
                        username,
                    ))
                raise GWIS_Warning(
                    'Please log off and log back on (incorrect token).',
                    'badtoken')
            # else, found_row is True

            # EXPLAIN: Does p_notice write to Apache log? We're fine, because
            #          GWIS_Warning writes to the pyserver log... right?
            #self.req.p_notice('tokens: %s %s' % (token, token_valid,))

            if not self.token_ok:
                # [lb] guessing this unreachable; would've raised exception by now.
                log.debug('user_validate_parse: token not ok: %s' % (token, ))
                self.auth_failure(username, 'token')
                raise GWIS_Warning(
                    'Please log off and log back on (incorrect token).',
                    'madtoken')

        # *** MISSING AUTH

        else:
            # No match for variant.
            log.warning('user_validate_parse: unknown variant: %s' %
                        (variant, ))
            raise GWIS_Error('Unknown variant.', 'badvariant')

        # *** SHARED SECRET

        if sdsecret:
            log.debug('user_validate_parse: using shared_secret to login')
            if (('' == conf.gwis_shared_secret)
                    or (sdsecret != conf.gwis_shared_secret)
                    or (not self.request_is_local)):
                log.error('Expected: %s / Got: %s / Local: %s' % (
                    conf.gwis_shared_secret,
                    sdsecret,
                    self.request_is_local,
                ))
                raise GWIS_Error('Whatchutalkinboutwillis?', 'badssec')
            self.request_is_secret = True

        # *** And The Rest.

        # If we got and verified a token, the username was checked against what's
        # in the db, so it should be clean. But if the username contains a quote
        # in it, we want to make sure it's delimited properly.
        # This is the simplest form of SQL injection: add a single quote and
        # a true result and then terminate the statement, e.g., same username is:
        #     ' or 1=1;--
        # E.g., SELECT * FROM user_ WHERE username='******' AND password='******';
        #   could be turned into, with, e.g., "fake_username' OR 1=1; --"
        #       SELECT * FROM user_
        #        WHERE username='******' OR 1=1; -- AND password='******';
        # Of course, this is just a trivial example.

        self.username = urllib.quote(username).strip("'")
        if self.username != username:
            raise GWIS_Warning('Bad username mismatch problem.',
                               'badquoteusername')

        if self.req.areq is not None:
            # Update Apache request_rec struct so username is recorded in logs.
            self.req.areq.user = username

        # Get the user ID
        self.user_id = User.user_id_from_username(self.req.db, username)
        g.assurt(self.user_id > 0)

        # Get the user's private group ID
        self.user_group_id = User.private_group_id(self.req.db, self.username)
        g.assurt(self.user_group_id > 0)

        # FIXME: We checked 'metadata/device' above, and now 'metadata/user' --
        #        which one is it?
        # BUG nnnn: Don't rely on this value, since the client can spoof it.
        if not self.request_is_mobile:
            self.request_is_mobile = user.get('is_mobile', False)
Esempio n. 25
0
    def sql_apply_query_filters(self, qb, where_clause="", conjunction=""):

        g.assurt((not conjunction) or (conjunction == "AND"))

        # We can only call sql_where_filter_linked once per query. So we can't
        # support about_stack_ids or filter_by_watch_feat and a viewport query.
        sql_where_filter_linked_cnt = 0
        if qb.filters.about_stack_ids:
            sql_where_filter_linked_cnt += 1
        if qb.filters.filter_by_watch_feat:
            sql_where_filter_linked_cnt += 1
        if ((qb.viewport is not None) and (qb.viewport.include)
                or qb.filters.only_in_multi_geometry):
            sql_where_filter_linked_cnt += 1
        if qb.filters.filter_by_nearby_edits:
            sql_where_filter_linked_cnt += 1
        if sql_where_filter_linked_cnt > 1:
            raise GWIS_Error(
                'Please choose just one: '
                'about_stack_ids, filter_by_watch_feat or viewport.')

        if qb.filters.about_stack_ids:
            linked_items_where = self.sql_where_filter_about(qb)
            g.assurt(linked_items_where)
            where_clause += " %s %s " % (
                conjunction,
                linked_items_where,
            )
            conjunction = "AND"

        if qb.filters.filter_by_watch_feat:
            # FIXME: Debug, then combine handlers for filter_by_watch_feat
            #                                     and only_in_multi_geometry.
            feat_qb = qb.clone(skip_clauses=True, skip_filtport=True)
            feat_qb.filters = Query_Filters(req=None)
            qfs = feat_qb.filters
            # Set filter_by_watch_item=True and search for geofeatures
            # that the user is watching.
            qfs.filter_by_watch_item = qb.filters.filter_by_watch_feat
            g.assurt(not qb.filters.only_in_multi_geometry)
            g.assurt((qb.viewport is None) or (qb.viewport.include is None))
            feat_qb.finalize_query()
            feat_qb.sql_clauses = geofeature.Many.sql_clauses_cols_all.clone()
            feats = geofeature.Many()
            feats_sql = feats.search_get_sql(feat_qb)
            feat_stack_id_table_ref = 'temp_stack_id__watch_feat'
            thurrito_sql = ("""
            SELECT
               stack_id
            INTO TEMPORARY TABLE
               %s
            FROM
               (%s) AS foo_feat_sid_1
            """ % (
                feat_stack_id_table_ref,
                feats_sql,
            ))
            rows = qb.db.sql(thurrito_sql)
            #
            join_on_to_self = self.sql_where_filter_linked_join_on_to_self(qb)
            where_on_other = ""
            join_on_temp = ("""
            JOIN %s
               ON (flv.rhs_stack_id = %s.stack_id)
            """ % (
                feat_stack_id_table_ref,
                feat_stack_id_table_ref,
            ))
            linked_items_where = self.sql_where_filter_linked(
                qb, join_on_to_self, where_on_other, join_on_temp)
            #
            where_clause += " %s %s " % (
                conjunction,
                linked_items_where,
            )
            conjunction = "AND"

        # 2013.04.02: Freshly implemented in CcpV2. Not the quickest fcn., but it
        #             works.
        #             MAYBE: Disable this until we can find a better solution?
        #             MEH: [lb] got it under 15 seconds, I think. Good enough.
        if qb.filters.filter_by_nearby_edits:
            '''
         g.assurt(False) # FIXME: This code is broke!

         join = ' JOIN post_geo pg ON (p.id = pg.id)'
         where_clause += (
            """
            %s 
            -- FIXME: Instead of ST_Intersects/ST_Buffer, try: ST_DWithin
               (ST_Intersects(
                  pg.geometry,
                  (SELECT ST_Buffer(collect(rr.geometry), 0)
                   FROM revision rr
                   WHERE
                     rr.username = %s
                     AND NOT is_social_rev(rr.id)))
            """ % (conjunction,
                   qb.db.quoted(qb.username),))
         conjunction = "AND"
         '''

            # FIXME: This was the older SQL snippet used for this filter. It
            #        was waaaaaayyyyy too slow. The one I used instead is also
            #        slow, but it doesn't time out, at least.
            #        [lb] notes that his database is missing geometry indices,
            #        but this didn't quite halve my experience, from 52 secs.
            #        to 29 secs. We need to run db_load_add_constraints.sql on
            #        the db.
            #
            # sql_or_sids = (
            #    """
            #    SELECT
            #       stack_id
            #    FROM
            #       geofeature AS gf_near
            #    WHERE
            #       ST_Intersects(
            #          gf_near.geometry,
            #          (
            #             SELECT
            #                ST_Buffer(collect(rr.geometry), 0)
            #             FROM
            #                revision rr
            #             WHERE
            #                rr.username = %s
            #                -- AND NOT is_social_rev(rr.id)
            #          )
            #       )
            #    """
            #    ) % (qb.db.quoted(qb.username),)

            # FIXME: Very slow query: ~ 42 sec.
            '''
         sql_or_sids = (
            """
            SELECT
               stack_id
            FROM
               geofeature AS gf_near
               JOIN revision AS rv_near
                  ON ST_Intersects(gf_near.geometry, rv_near.geometry)
            WHERE
               rv_near.username = %s
               -- AND NOT is_social_rev(rv_near.id)
            """
            ) % (qb.db.quoted(qb.username),)
         '''

            # MAYBE: Why isn't setting user_group_id part of finalize_query?
            #g.assurt(not qb.user_group_id)
            if not qb.user_group_id:
                qb.user_group_id = User.private_group_id(qb.db, qb.username)
                g.assurt(qb.user_group_id)

            geometry_table_ref = 'temp_geometry__edited_items'
            geometry_sql = ("""
            SELECT
               ST_Buffer(ST_Collect(grev.geometry), 0) AS geometry
            INTO TEMPORARY TABLE
               %s
            FROM
               group_revision AS grev
            WHERE
               grev.group_id = %d
            """ % (
                geometry_table_ref,
                qb.user_group_id,
            ))
            # 2013.04.02: On [lb]: Time: 405.073 ms
            rows = qb.db.sql(geometry_sql)

            # NOTE: This is a broad query: if a revision contains edits far apart,
            #       we'll find all the geofeatures in between. E.g., for [lb], it
            #       finds hundreds of thousands of byways; not very useful.
            item_stack_id_table_ref = 'temp_stack_id__edited_items'
            about_stack_ids_sql = ("""
            SELECT
               DISTINCT(stack_id)
            INTO TEMPORARY TABLE
               %s
            FROM
               geofeature AS feat
            JOIN
               %s AS grev
               ON ST_Intersects(feat.geometry, grev.geometry)
            """ % (
                item_stack_id_table_ref,
                geometry_table_ref,
            ))
            # 2013.04.02: On [lb]: Time: 13106.527 ms
            rows = qb.db.sql(about_stack_ids_sql)
            #
            join_on_to_self = self.sql_where_filter_linked_join_on_to_self(qb)
            where_on_other = ""
            join_on_temp = ("""
            JOIN %s
               ON (flv.rhs_stack_id = %s.stack_id)
            """ % (
                item_stack_id_table_ref,
                item_stack_id_table_ref,
            ))
            linked_items_where = self.sql_where_filter_linked(
                qb, join_on_to_self, where_on_other, join_on_temp)
            #
            where_clause += " %s %s " % (
                conjunction,
                linked_items_where,
            )
            conjunction = "AND"

        # Only select posts whose name matches the user's search query.
        where_clause = item_user_watching.Many.sql_apply_query_filters(
            self, qb, where_clause, conjunction)

        return where_clause
Esempio n. 26
0
   def setup_qb_cur(self, all_errs, min_acl=Access_Level.viewer):

      # For both import and export, qb_src is used to retrieve items from the
      # database, and qb_cur is used to check the user's group accesses and
      # maybe to search for regions if a restrictive bbox is being imposed.
      # But qb_cur is also used during import to save changes to the database;
      # qb_cur is not used during export to save anything to the database.
      #
      # NOTE: On import, we row-lock on the grac tables, group_membership 
      # and new_item_policy. We also row-lock the destination branch.
      # So other operations might block while this code runs.
      # CONFIRM: We don't lock anything on export, right?

      qb_cur = None

      username = self.mjob.wtem.created_by

      db = db_glue.new()

      rev = revision.Current(allow_deleted=False)
      (branch_id, branch_hier) = branch.Many.branch_id_resolve(db, 
                     self.mjob.wtem.branch_id, branch_hier_rev=rev)

      if branch_id is None:
         # EXPLAIN: How come we don't raise here, like we do in the else?
         #          Or, why doesn't the else block use all_errs?
         #          See: raise_error_on_branch.
         #          And if you look at export_cyclop.substage_initialize,
         #          you'll see that it assurts not all_errs, so I guess
         #          it expects us to raise.
         all_errs.append(
            'setup_qb_cur: not a branch: %s at %s' 
            % (self.mjob.wtem.branch_id, str(rev),))
      else:

         g.assurt(branch_hier)
         g.assurt(branch_id == branch_hier[0][0])

         raise_error_on_branch = False

         if not self.spf_conf.branch_name:
            # This happens on export, since export_cyclop.substage_initialize
            # only sets branch_id when setting up the qbs. This is because it
            # uses the merge_job's branch_id, and since merge_job is just an
            # item_versioned item, all it has is its branch_id, as items do
            # not also store the branch name.
            self.spf_conf.branch_name = branch_hier[0][2]
         elif self.spf_conf.branch_name != branch_hier[0][2]:
            # The branch name in the shapefile should match.
            log.error('setup_qb_cur: branch_name mismatch: %s / %s'
                      % (self.spf_conf.branch_name, branch_hier[0][2],))
            raise_error_on_branch = True
         # else, the branch_name in the conf matches the one we loaded by ID.
         #
         if self.spf_conf.branch_id != branch_id:
            # But the branch ID we can tolerate being wrong.
            log.warning('setup_qb_cur: unexpected spf_conf.branch_id: %s'
                        % (self.spf_conf.branch_id,))
            # For the Metc Bikeways shapefile, this just means [lb] hasn't
            # update the branch ID attribute in the shapefile...
            g.assurt(self.spf_conf.branch_name)
            (try_branch_id, try_branch_hier) = branch.Many.branch_id_resolve(
                           db, self.spf_conf.branch_name, branch_hier_rev=rev)
            if try_branch_id == branch_id:
               log.warning('setup_qb_cur: ok: overriding branch_id: %s'
                           % (branch_id,))
               self.spf_conf.branch_id = branch_id
            else:
               log.error('setup_qb_cur: try_branch_id != branch_id: %s != %s'
                         % (try_branch_id, branch_id,))
               raise_error_on_branch = True

         if raise_error_on_branch:
            if conf.break_on_assurt:
               import pdb;pdb.set_trace()
            raise GWIS_Error(
               'Shapefile branch ID and name do not match job details: '
               'work_item: %s/%s | shapefile: %s/%s'
               % (branch_hier[0][2],
                  branch_hier[0][0],
                  self.spf_conf.branch_name,
                  self.spf_conf.branch_id,))

         qb_cur = Item_Query_Builder(db, username, branch_hier, rev)

         # Load both the raw geometry and the WKT geometry; we need to be
         # flexible.
         qb_cur.filters.skip_geometry_raw = False
         qb_cur.filters.skip_geometry_svg = True
         qb_cur.filters.skip_geometry_wkt = False

         # To save things, we need to set the group ID explicitly.
         self.user_group_id = User.private_group_id(qb_cur.db, username)
         qb_cur.user_group_id = self.user_group_id

         qb_cur.item_mgr = Item_Manager()
         # Load the attachment cache now. On import, if we create new
         # attributes (see metc_bikeways_defs.py), we'll keep it updated.
         qb_cur.item_mgr.load_cache_attachments(qb_cur)

         Query_Overlord.finalize_query(qb_cur)

         # FIXME: This comment. I like it. But it's not true... yet.
         #  Getting row lock in branches_prepare. So don't table lock.
         #
         # Start the transaction, since the grac_mgr does some row locking.
         # We'll keep the rows locked until we've verified permissions.
      # FIXME: Verify you rollback and start a new 'revision' lock...
      #        or maybe just start a new 'revision' lock? or can you 
      #        write to a Shapfile first and zip through the Shapefile 
      #        to save quickly and not hold the lock so long?
         # BUG nnnn: Investigate using a row-level branch lock; for now, 
         #           just lock rev.
         qb_cur.db.transaction_begin_rw()

         qb_cur.grac_mgr = Grac_Manager()
         load_grp_mmbrshps = True
         qb_cur.grac_mgr.prepare_mgr('user', qb_cur, load_grp_mmbrshps)

         # FIXME: Does qb_src need grac_mgr?
         #self.qb_src.grac_mgr = qb_cur.grac_mgr

         # Check user's minimum access level.
         target_branch = self.verify_branch_access(qb_cur, min_acl, all_errs)
         g.assurt(target_branch.stack_id == self.spf_conf.branch_id)
         if (self.spf_conf.branch_name
             and (self.spf_conf.branch_name != qb_cur.branch_hier[0][2])):
            log.warning('Unexpected spf_conf.branch_name: %s'
                        % (self.spf_conf.branch_name,))
         self.spf_conf.branch_name = qb_cur.branch_hier[0][2]

      self.qb_cur = qb_cur

      log.debug('setup_qb_cur: spf_conf: %s' % (str(self.spf_conf),))
Esempio n. 27
0
    def go_main(self):

        # Get the content templates.

        content_plain_f = open(self.cli_opts.content_plain)
        content_plain = content_plain_f.read()
        content_plain_f.close()

        content_html_f = open(self.cli_opts.content_html)
        content_html = content_html_f.read()
        content_html_f.close()

        # Assemble the recipients.

        # The file should be of the form
        #
        #   username\temail_address
        #
        # PERFORMANCE: Cyclopath circa 2012 doesn't have that many users (~5,000)
        # so we can load all the emails into memory. If we end up with lots more
        # users, this operation might take a sizeable bite of memory.

        recipients = []
        user_ids = []

        recipients_f = open(self.cli_opts.recipient_file)
        try:
            deprecation_warned = False
            for line in recipients_f:
                line = line.strip()
                # NOTE: Skip comment lines.
                if line and (not line.startswith('#')):
                    try:
                        fake_uid = 0
                        username, email = line.split('\t')
                        # NOTE: unsubscribe_proof is unknown since we don't
                        #       select from db, which is why this path is deprecated.
                        unsubscribe_proof = ''
                        recipients.append((
                            fake_uid,
                            username,
                            email,
                            unsubscribe_proof,
                        ))
                        if not deprecation_warned:
                            log.warning(
                                'Using username/email file is deprecated.')
                            deprecation_warned = True
                    except ValueError:
                        user_id = int(line)
                        user_ids.append(user_id)
        except ValueError:
            log.error(
                'The format of the recipient file is unexpected / line: %s' %
                (line, ))
            raise
        finally:
            recipients_f.close()

        if recipients and user_ids:
            log.error(
                'Please specify only "username, email" or "user IDs" but not both'
            )
            sys.exit(0)

        db = db_glue.new()

        if user_ids:
            extra_where = ("id IN (%s)" % (",".join([str(x)
                                                     for x in user_ids]), ))
            (valid_ids, invalid_ids, not_okay, user_infos,
             info_lookup) = (User.spam_get_user_info(
                 db,
                 extra_where,
                 sort_mode='id ASC',
                 make_lookup=True,
                 ignore_flags=self.cli_opts.ignore_flags))
            if invalid_ids or not_okay:
                log.error('%s%s' % ('Please recheck the user ID list: ',
                                    '%d okay / %d invalid / %d not_okay' % (
                                        len(valid_ids),
                                        len(invalid_ids),
                                        len(not_okay),
                                    )))
                log.error('not_okay: %s' % (not_okay, ))
                sys.exit(0)
            g.assurt(len(set(valid_ids)) == len(set(user_infos)))
            g.assurt(len(set(valid_ids)) == len(set(user_ids)))
            # Resort according to the input.
            for uid in user_ids:
                # NOTE: info_tuple is formatted: (user_id, username, email,)
                recipients.append(info_lookup[uid])

        all_okay = True
        for info_tuple in recipients:
            if not User.email_valid(info_tuple[2]):
                log.error('Invalid email for user %s: %s' % (
                    info_tuple[1],
                    info_tuple[2],
                ))
                all_okay = False
        if not all_okay:
            sys.exit(0)

        log.debug('Found %d recipients.' % (len(recipients), ))
        if not recipients:
            log.info('No one to email. Bye!')
            sys.exit(0)

        # Always send a copy to us, too.
        g.assurt(conf.internal_email_addr)
        unsubscribe_proof = ''
        recipients.append((
            0,
            'Cyclopath Team',
            conf.internal_email_addr,
            unsubscribe_proof,
        ))

        # Combine recipients if bcc'ing.

        if self.cli_opts.bcc_size:
            addr_lists = []
            addrs_processed = 0
            while addrs_processed < len(recipients):
                last_index = addrs_processed + self.cli_opts.bcc_size
                bcc_list = recipients[addrs_processed:last_index]
                g.assurt(bcc_list)
                addrs_processed += self.cli_opts.bcc_size
                addr_lists.append(bcc_list)
            recipients = addr_lists
            # 2012.11.12: Using bcc is not cool. Don't do it.
            log.error('BCC is too impersonal. Please consider not using it.')
            g.assurt(False)

        # Process the recipients one or many at a time.

        prompted_once = False

        prog_log = Debug_Progress_Logger(loop_max=len(recipients))
        # MAYBE: Don't log for every email?
        #prog_log.log_freq = prog_log.loop_max / 100.0

        for recipient_or_list in recipients:

            email_unames = []

            # Make the To and Bcc headers.
            if self.cli_opts.bcc_size:
                g.assurt(False)  # DEVs: Reconsider using BCC.
                # Otherwise you cannot personalize messages, i.e.,
                # with usernames of private UUID links.
                # Use a generic user name, since there are multiple recipients.
                msg_username = '******'
                # Send the email to ourselves...
                recipient_email = self.cli_opts.mail_from
                recipient_addr = ('"Cyclopath.org" <%s>' %
                                  (self.cli_opts.mail_from, ))
                # ...and Bcc everyone else.
                email_addrs = []
                for recipient in recipient_or_list:
                    # C.f. emailer.check_email, but using Bcc is deprecated, so
                    # don't worry about it.
                    msg_username = recipient[1]
                    recipient_email = recipient[2]
                    really_send = False
                    if ((len(conf.mail_ok_addrs) == 1)
                            and ('ALL_OKAY' in conf.mail_ok_addrs)):
                        log.debug('go_main: conf says ALL_OKAY: %s' %
                                  (recipient_addr, ))
                        really_send = True
                    elif recipient_email in conf.mail_ok_addrs:
                        log.debug('go_main: email in mail_ok_addrs: %s' %
                                  (recipient_addr, ))
                        really_send = True
                    elif not conf.mail_ok_addrs:
                        log.error('go_main: mail_ok_addrs is not set: %s' %
                                  (recipient_addr, ))
                    else:
                        # This is a dev. machine and we don't want to email users.
                        log.debug('go_main: skipping non-dev email: %s' %
                                  (recipient_addr, ))
                    if really_send:
                        log.debug('Emailing user at: %s' % (recipient_addr, ))
                        email_addr = ('"%s" <%s>' % (
                            msg_username,
                            recipient_email,
                        ))
                        email_addrs.append(email_addr)
                        email_unames.append(msg_username)
                addrs_str = ','.join(email_addrs)
                addr_bcc = 'Bcc: %s\n' % (addrs_str, )
                unsubscribe_proof = ''
                unsubscribe_link = ''
            else:
                # This is just a normal, send-directly-to-one-user email.
                msg_username = recipient_or_list[1]
                recipient_email = recipient_or_list[2]
                recipient_addr = ('"%s" <%s>' % (
                    msg_username,
                    recipient_email,
                ))
                email_unames.append(recipient_email)
                addr_bcc = ''
                unsubscribe_proof = recipient_or_list[3]
                unsubscribe_link = Emailer.make_unsubscribe_link(
                    'user_unsubscribe', recipient_email, unsubscribe_proof)

                # To test the unsubscribe feature, try a link like this:
# http://ccpv3/gwis?request=user_unsubscribe&[email protected]&proof=asdasdasd

            db.close()

            the_msg = Emailer.compose_email(
                self.cli_opts.mail_from, msg_username, recipient_addr,
                unsubscribe_proof, unsubscribe_link,
                self.cli_opts.email_subject, content_plain, content_html,
                addr_bcc)

            if not prompted_once:
                do_send = self.ask_permission(the_msg)
                if not do_send:
                    log.warning('Canceled by user. Bye!')
                    sys.exit(0)
                prompted_once = True

            # NOTE: Emailer.send_email will check conf.mail_ok_addrs.
            # ALSO: This is the only place/caller/script that uses do_not_email.
            #       It's really just for testing, and this is the last stop.
            if not self.cli_opts.do_not_email:
                Emailer.send_email(email_unames, the_msg, prog_log,
                                   self.cli_opts.delay_time,
                                   self.cli_opts.dont_shake)

        # end: for recipient_or_list in recipients.

        prog_log.loops_fin()
    def compose_email_item_list(self, qb, msg_type_id, items_fetched):

        for item in items_fetched:

            g.assurt(item.real_item_type_id == Item_Type.post)

            # The item is the post. Get the post and the thread.
            #qb.filters.context_stack_id = {thread_stack_id}
            posts = post.Many()
            posts.search_by_stack_id(item.stack_id, qb)
            if len(posts) > 0:
                g.assurt(len(posts) == 1)
                the_post = posts[0]
            else:
                the_post = None
                log.warning('_compose_item_list: cannot see post: %s / %s' % (
                    qb.username,
                    item.stack_id,
                ))

            if the_post is not None:

                threads = thread.Many()
                threads.search_by_stack_id(the_post.thread_stack_id, qb)

                if len(threads) > 0:
                    g.assurt(len(threads) == 1)
                    the_thread = threads[0]

                    # 2014.07.02: FIXME: test changes to what_username:
                    post_username = User.what_username([
                        the_post.edited_user,
                        the_post.edited_host,
                        the_post.edited_addr,
                    ])

                    # A CcpV1 link:
                    deeplink_text_v1 = (
                        'http://%s/#discussion?thread_id=%d&post_id=%d' % (
                            conf.server_name,
                            the_thread.stack_id,
                            the_post.stack_id,
                        ))
                    # MAYBE: A CcpV2 link so the user is asked to log on:
                    deeplink_text_v2 = (
                        'http://%s/#private?type=post&link=%d' % (
                            conf.server_name,
                            the_post.stack_id,
                        ))
                    log.debug('MAYBE: deeplink_text_v2: %s' %
                              (deeplink_text_v2, ))

                    self.msg_text += ('''Discussion: %s
Posted by:  %s
View post:  %s (Flash required)
+-----
%s
+-----
''' % (
                        the_thread.name,
                        post_username,
                        deeplink_text_v1,
                        the_post.body,
                    ))
                    self.msg_html += ('''<table>
<tr><td>Discussion:</td> <td>%s</td></tr>
<tr><td>Posted by:</td> <td>%s</td></tr>
<tr><td>View post:</td> <td><a href="%s">%s</a> (Flash required)</td></tr>
</table><br/>
+-----<br/>
%s<br/>
+-----<br/>
''' % (
                        the_thread.name,
                        post_username,
                        deeplink_text_v1,
                        deeplink_text_v1,
                        the_post.body,
                    ))

                else:  # len(threads) == 0
                    log.warning(
                        '_compose_item_list: cannot see thread: %s / %s' % (
                            qb.username,
                            the_post.thread_stack_id,
                        ))
   def setup_links(self):

      # First count some table rows and double-check the upgrade so far. We
      # want to be confident we're getting all the CcpV1 records and making
      # appropriate CcpV2 records.
      try:
         self.setup_links_sanity_check()
      except:
         log.warning('setup_links: old CcpV1 already dropped; moving on...')

      # Now get the unique set of usernames. We're going to create items owned
      # by certain users, and we'll need to setup resources for each user, like
      # the query_builder and the grac_mgr.

      usernames_sql = (
         """
         SELECT DISTINCT (username)
         FROM item_watcher_bug_nnnn
         ORDER BY username
         """)

      # NOTE: We're not bothering with dont_fetchall.
      #       There are only a few hundred rows...

      rows = self.qb.db.sql(usernames_sql)

      log.debug('setup_links: found %d unique users with watchers'
                % (len(rows),))

      if not rows:
         log.error('setup_links: nothing found')
         g.assurt(false)

      for row in rows:

         username = row['username']

         # Hmm. There's no user.One() class to load a user. It's all custom.
         user_rows = self.qb.db.sql(
            "SELECT login_permitted FROM user_ WHERE username = %s"
            % (self.qb.db.quoted(username),))
         g.assurt(len(user_rows) == 1)
         if not user_rows[0]['login_permitted']:
            log.debug('setup_links: skipping: !user_.login_permitted: %s'
                      % (username,))
            continue

         log.verbose2('setup_links: processing username: %s' % (username,))

         g.assurt(isinstance(self.qb.revision, revision.Current))
         rev_cur = revision.Current()

         user_qb = Item_Query_Builder(
            self.qb.db, username, self.qb.branch_hier, rev_cur)
         user_qb.grac_mgr = Grac_Manager()
         user_qb.grac_mgr.prepare_mgr('user', user_qb)
         #
         g.assurt(
            user_qb.username and (user_qb.username != conf.anonymous_username))
         user_qb.user_group_id = User.private_group_id(user_qb.db, 
                                                       user_qb.username)
         #
         # Use the same item_mgr so we pull client stack IDs from the same
         # pool.
         user_qb.item_mgr = self.qb.item_mgr

         # Finalize the query. This sets revision.gids so it'll include the
         # user's private group (and the anonymous and All Users groups).
         Query_Overlord.finalize_query(user_qb)

         # We can still get deleted regions and add links for them.
         user_qb.revision.allow_deleted = True

         # Finally, update the database. Oi, there's a lot of setup!
         self.setup_links_for_user(user_qb)

         # The way Item_Query_Builder works, it usually wires the branch_hier
         # revision to the revision revision.
         g.assurt(self.qb.branch_hier[0][1] == rev_cur)
         # We'll reuse the branch_hier so clear this user's gids.
         self.qb.branch_hier[0][1].gids = None
Esempio n. 30
0
   def go_main(self):

      # Skipping: Ccp_Script_Base.go_main(self)

      do_commit = False

      try:

         if self.cli_opts.update_branch is None:
            if not self.cli_opts.last_merge_rid:
               # Make sure we're being run from a terminal.
               # Run from cron, $TERM is not set. Run from bash, it's 'xterm'.
               if ((os.environ.get('TERM') != "dumb")
                   and (os.environ.get('TERM') is not None)):
                  print '\nPlease confirm the last_merge_rid.\n'
                  self.cli_opts.last_merge_rid = self.ask_question(
                     'last_merge_rid',
                     revision.Revision.revision_max(self.qb.db),
                     the_type=int)
               # else, not interactive and no --last-merge-rid, so we'll
               #       just use the new revision ID that's claimed when
               #       we create the branch.

         log.debug('go_main: getting exclusive revision lock...')
         revision.Revision.revision_lock_dance(
            self.qb.db, caller='make_new_branch.py')
         g.assurt((self.qb.locked_tables == ['revision',])
                  or (self.qb.cp_maint_lock_owner))
         log.debug('go_main: database is locked.')

         # MAYBE: There seems to be an awful lot of boilerplate code here.
         self.qb.grac_mgr = Grac_Manager()
         self.qb.grac_mgr.prepare_mgr('user', self.qb)
         # The script should be run be a real developer-user.
         g.assurt(self.qb.username
                  and (self.qb.username != conf.anonymous_username))
         self.qb.user_group_id = User.private_group_id(self.qb.db,
                                                       self.qb.username)

         # Get a new revision ID.
         self.qb.item_mgr.start_new_revision(self.qb.db)
         log.debug('Got rid_new: %d' % (self.qb.item_mgr.rid_new,))

         #import pdb;pdb.set_trace()

#fixme: is there a way to setup the basemap for new Ccp installs? or in
#general? like, we do not need to clone node IDs... or, okay, there are two
#cases: one is for CcpV1->V2, like, what script to call after running SQL
#scripts; second case is fresh Ccp installs, after users initializes database,
#they will have to create groups and branch and memberships and nips...

         if self.cli_opts.update_branch is not None:
            self.update_branch()
            self.load_groups()
            self.add_members()
            self.init_newips()
         elif self.cli_opts.new_branch_name:
            self.make_groups()
            self.make_branch()
            self.add_members()
            self.init_newips()
            self.clone_nodes()
         else:
            g.assurt(self.cli_opts.purge_branch)
            self.purge_branch()

         # FIXME: Is this correct? Or should username be _script name?
         # Save the new revision and finalize the sequence numbers.
         group_names_or_ids = ['Public',]
         changenote = ('%s branch "%s"'
            % ('Updated' if (self.cli_opts.update_branch is not None)
                  else 'Created new',
               self.qb.branch_hier[0][2],))
         self.finish_script_save_revision(group_names_or_ids,
                                          self.qb.username,
                                          changenote)

         if debug_skip_commit:
            raise Exception('DEBUG: Skipping commit: Debugging')
         do_commit = True

      except Exception, e:

         log.error('Exception!: "%s" / %s' % (str(e), traceback.format_exc(),))
Esempio n. 31
0
   def email_send_rev_feedback(self, msgng_id, branch_id, latest_rid,
                                     item_stack_id, notifier_dat):

      # The pickled structure is structured thusly:
      # [0]: The one writing the feedback.
      # [1]: The ones being fedback.
      # [2]: The revision IDs.
      rvt_dat = cPickle.loads(notifier_dat)
      feedbacker = rvt_dat[0]
      feedbackees = rvt_dat[1]
      revision_ids = rvt_dat[2]

      qb, the_branch = self.get_qb_for_branch(branch_id)

      if qb is not None:

         rev_rows = self.hydrate_revs_for_user(qb, revision_ids)

         posts = post.Many()
         posts.search_by_stack_id(item_stack_id, qb)
         if len(posts) > 0:
            g.assurt(len(posts) == 1)
            the_post = posts[0]
         else:
            the_post = None
            log.warning('email_send_rev_feedback: cannot see post: %s / %s'
                        % (qb.username, item_stack_id,))

         if the_post is not None:
            threads = thread.Many()
            threads.search_by_stack_id(the_post.thread_stack_id, qb)
            if len(threads) > 0:
               g.assurt(len(threads) == 1)
               the_thread = threads[0]

               if feedbacker != the_post.created_user:
                  log.warning('_send_rev_fb: unexpected: fber: %s / poster: %s'
                              % (feedbacker, the_post.created_user,))
               if the_post.created_user != the_post.edited_user:
                  log.warning('_send_rev_fb: weird: cr_usr: %s / ed_usr: %s'
                              % (the_post.created_user, the_post.edited_user,))
               # 2014.07.02: FIXME: test changes to what_username:
               u_feedbacker = User.what_username([the_post.edited_user,
                                                  the_post.edited_host,
                                                  the_post.edited_addr,])

               email_subject = 'Cyclopath notice: %s' % (thread.title,)

               link_uri = ('http://%s/#discussion?thread_id=%d'
                           % (conf.server_name, the_thread.stack_id,))

               msg_text = (
'''Another Cyclopath user wrote feedback about one or more revisions,
including at least one that you made.

Feedback by: %s
On revision(s): %s
+-----
%s
+-----

This feedback also begins a discussion thread in Cyclopath. Please
participate by clicking the following link:

%s
''' % (u_feedbacker,
       ', '.join([str(x) for x in revision_ids]),
       the_post.body,
       link_uri,))

               msg_html = (
'''<p>
Another Cyclopath user wrote feedback about one or more revisions,
including at least one that you made.
</p>

<p>
Feedback by: %s
<br/>
On revision(s): %s
<br/>
+-----
<br/>
%s
<br/>
+-----
<br/>
</p>

<p>
This feedback also begins a discussion thread in Cyclopath. Please
participate by clicking the following link:
</p>

<p>
<a href="%s">%s</a>
</p>
''' % (u_feedbacker,
       ', '.join([str(x) for x in revision_ids]),
       the_post.body,
       conf.server_name,
       the_thread.stack_id,
       link_uri,
       link_uri,))

      self.send_onesie(self.username,
                       self.email_addy,
                       self.unsubscribe_proof,
                       self.unsubscribe_link,
                       email_subject,
                       msg_text,
                       msg_html)

      Watcher_Composer.finalize_alerts([msgng_id,], self.username)
Esempio n. 32
0
   def go_main(self):

      # Skipping: Ccp_Script_Base.go_main(self)

      do_commit = False

      try:

         if self.cli_opts.update_branch is None:
            if not self.cli_opts.last_merge_rid:
               # Make sure we're being run from a terminal.
               # Run from cron, $TERM is not set. Run from bash, it's 'xterm'.
               if ((os.environ.get('TERM') != "dumb")
                   and (os.environ.get('TERM') is not None)):
                  print '\nPlease confirm the last_merge_rid.\n'
                  self.cli_opts.last_merge_rid = self.ask_question(
                     'last_merge_rid',
                     revision.Revision.revision_max(self.qb.db),
                     the_type=int)
               # else, not interactive and no --last-merge-rid, so we'll
               #       just use the new revision ID that's claimed when
               #       we create the branch.

         log.debug('go_main: getting exclusive revision lock...')
         revision.Revision.revision_lock_dance(
            self.qb.db, caller='make_new_branch.py')
         g.assurt((self.qb.locked_tables == ['revision',])
                  or (self.qb.cp_maint_lock_owner))
         log.debug('go_main: database is locked.')

         # MAYBE: There seems to be an awful lot of boilerplate code here.
         self.qb.grac_mgr = Grac_Manager()
         self.qb.grac_mgr.prepare_mgr('user', self.qb)
         # The script should be run be a real developer-user.
         g.assurt(self.qb.username
                  and (self.qb.username != conf.anonymous_username))
         self.qb.user_group_id = User.private_group_id(self.qb.db,
                                                       self.qb.username)

         # Get a new revision ID.
         self.qb.item_mgr.start_new_revision(self.qb.db)
         log.debug('Got rid_new: %d' % (self.qb.item_mgr.rid_new,))

         #import pdb;pdb.set_trace()

#fixme: is there a way to setup the basemap for new Ccp installs? or in
#general? like, we do not need to clone node IDs... or, okay, there are two
#cases: one is for CcpV1->V2, like, what script to call after running SQL
#scripts; second case is fresh Ccp installs, after users initializes database,
#they will have to create groups and branch and memberships and nips...

         if self.cli_opts.update_branch is not None:
            self.update_branch()
            self.load_groups()
            self.add_members()
            self.init_newips()
         elif self.cli_opts.new_branch_name:
            self.make_groups()
            self.make_branch()
            self.add_members()
            self.init_newips()
            self.clone_nodes()
         else:
            g.assurt(self.cli_opts.purge_branch)
            self.purge_branch()

         # FIXME: Is this correct? Or should username be _script name?
         # Save the new revision and finalize the sequence numbers.
         group_names_or_ids = ['Public',]
         changenote = ('%s branch "%s"'
            % ('Updated' if (self.cli_opts.update_branch is not None)
                  else 'Created new',
               self.qb.branch_hier[0][2],))
         self.finish_script_save_revision(group_names_or_ids,
                                          self.qb.username,
                                          changenote)

         if debug_skip_commit:
            raise Exception('DEBUG: Skipping commit: Debugging')
         do_commit = True

      except Exception, e:

         log.error('Exception!: "%s" / %s' % (str(e), traceback.format_exc(),))
Esempio n. 33
0
   def go_main(self):

      # Get the content templates.

      content_plain_f = open(self.cli_opts.content_plain)
      content_plain = content_plain_f.read()
      content_plain_f.close()

      content_html_f = open(self.cli_opts.content_html)
      content_html = content_html_f.read()
      content_html_f.close()

      # Assemble the recipients.

      # The file should be of the form
      #
      #   username\temail_address
      #
      # PERFORMANCE: Cyclopath circa 2012 doesn't have that many users (~5,000)
      # so we can load all the emails into memory. If we end up with lots more
      # users, this operation might take a sizeable bite of memory.

      recipients = []
      user_ids = []

      recipients_f = open(self.cli_opts.recipient_file)
      try:
         deprecation_warned = False
         for line in recipients_f:
            line = line.strip()
            # NOTE: Skip comment lines.
            if line and (not line.startswith('#')):
               try:
                  fake_uid = 0
                  username, email = line.split('\t')
                  # NOTE: unsubscribe_proof is unknown since we don't
                  #       select from db, which is why this path is deprecated.
                  unsubscribe_proof = ''
                  recipients.append(
                     (fake_uid, username, email, unsubscribe_proof,))
                  if not deprecation_warned:
                     log.warning('Using username/email file is deprecated.')
                     deprecation_warned = True
               except ValueError:
                  user_id = int(line)
                  user_ids.append(user_id)
      except ValueError:
         log.error('The format of the recipient file is unexpected / line: %s'
                   % (line,))
         raise
      finally:
         recipients_f.close()

      if recipients and user_ids:
         log.error(
            'Please specify only "username, email" or "user IDs" but not both')
         sys.exit(0)

      db = db_glue.new()

      if user_ids:
         extra_where = ("id IN (%s)" % (",".join([str(x) for x in user_ids]),))
         (valid_ids, invalid_ids, not_okay, user_infos, info_lookup) = (
            User.spam_get_user_info(
               db, extra_where, sort_mode='id ASC', make_lookup=True,
               ignore_flags=self.cli_opts.ignore_flags))
         if invalid_ids or not_okay:
            log.error('%s%s'
                      % ('Please recheck the user ID list: ',
                         '%d okay / %d invalid / %d not_okay'
                         % (len(valid_ids), len(invalid_ids), len(not_okay),)))
            log.error('not_okay: %s' % (not_okay,))
            sys.exit(0)
         g.assurt(len(set(valid_ids)) == len(set(user_infos)))
         g.assurt(len(set(valid_ids)) == len(set(user_ids)))
         # Resort according to the input.
         for uid in user_ids:
            # NOTE: info_tuple is formatted: (user_id, username, email,)
            recipients.append(info_lookup[uid])

      all_okay = True
      for info_tuple in recipients:
         if not User.email_valid(info_tuple[2]):
            log.error('Invalid email for user %s: %s'
                      % (info_tuple[1], info_tuple[2],))
            all_okay = False
      if not all_okay:
         sys.exit(0)

      log.debug('Found %d recipients.' % (len(recipients),))
      if not recipients:
         log.info('No one to email. Bye!')
         sys.exit(0)

      # Always send a copy to us, too.
      g.assurt(conf.internal_email_addr)
      unsubscribe_proof = ''
      recipients.append(
         (0, 'Cyclopath Team', conf.internal_email_addr, unsubscribe_proof,))

      # Combine recipients if bcc'ing.

      if self.cli_opts.bcc_size:
         addr_lists = []
         addrs_processed = 0
         while addrs_processed < len(recipients):
            last_index = addrs_processed + self.cli_opts.bcc_size
            bcc_list = recipients[addrs_processed:last_index]
            g.assurt(bcc_list)
            addrs_processed += self.cli_opts.bcc_size
            addr_lists.append(bcc_list)
         recipients = addr_lists
         # 2012.11.12: Using bcc is not cool. Don't do it.
         log.error('BCC is too impersonal. Please consider not using it.')
         g.assurt(False)

      # Process the recipients one or many at a time.

      prompted_once = False

      prog_log = Debug_Progress_Logger(loop_max=len(recipients))
      # MAYBE: Don't log for every email?
      #prog_log.log_freq = prog_log.loop_max / 100.0

      for recipient_or_list in recipients:

         email_unames = []

         # Make the To and Bcc headers.
         if self.cli_opts.bcc_size:
            g.assurt(False) # DEVs: Reconsider using BCC.
                            # Otherwise you cannot personalize messages, i.e.,
                            # with usernames of private UUID links.
            # Use a generic user name, since there are multiple recipients.
            msg_username = '******'
            # Send the email to ourselves...
            recipient_email = self.cli_opts.mail_from
            recipient_addr = ('"Cyclopath.org" <%s>' 
                              % (self.cli_opts.mail_from,))
            # ...and Bcc everyone else.
            email_addrs = []
            for recipient in recipient_or_list:
               # C.f. emailer.check_email, but using Bcc is deprecated, so
               # don't worry about it.
               msg_username = recipient[1]
               recipient_email = recipient[2]
               really_send = False
               if ((len(conf.mail_ok_addrs) == 1)
                   and ('ALL_OKAY' in conf.mail_ok_addrs)):
                  log.debug('go_main: conf says ALL_OKAY: %s'
                            % (recipient_addr,))
                  really_send = True
               elif recipient_email in conf.mail_ok_addrs:
                  log.debug('go_main: email in mail_ok_addrs: %s'
                            % (recipient_addr,))
                  really_send = True
               elif not conf.mail_ok_addrs:
                  log.error('go_main: mail_ok_addrs is not set: %s'
                            % (recipient_addr,))
               else:
                  # This is a dev. machine and we don't want to email users.
                  log.debug('go_main: skipping non-dev email: %s'
                            % (recipient_addr,))
               if really_send:
                  log.debug('Emailing user at: %s' % (recipient_addr,))
                  email_addr = ('"%s" <%s>' % (msg_username, recipient_email,))
                  email_addrs.append(email_addr)
                  email_unames.append(msg_username)
            addrs_str = ','.join(email_addrs)
            addr_bcc = 'Bcc: %s\n' % (addrs_str,)
            unsubscribe_proof = ''
            unsubscribe_link = ''
         else:
            # This is just a normal, send-directly-to-one-user email.
            msg_username = recipient_or_list[1]
            recipient_email = recipient_or_list[2]
            recipient_addr = ('"%s" <%s>' % (msg_username, recipient_email,))
            email_unames.append(recipient_email)
            addr_bcc = ''
            unsubscribe_proof = recipient_or_list[3]
            unsubscribe_link = Emailer.make_unsubscribe_link(
               'user_unsubscribe', recipient_email, unsubscribe_proof)

            # To test the unsubscribe feature, try a link like this:
# http://ccpv3/gwis?request=user_unsubscribe&[email protected]&proof=asdasdasd

         db.close()

         the_msg = Emailer.compose_email(
            self.cli_opts.mail_from,
            msg_username,
            recipient_addr,
            unsubscribe_proof,
            unsubscribe_link,
            self.cli_opts.email_subject,
            content_plain,
            content_html,
            addr_bcc)

         if not prompted_once:
            do_send = self.ask_permission(the_msg)
            if not do_send:
               log.warning('Canceled by user. Bye!')
               sys.exit(0)
            prompted_once = True

         # NOTE: Emailer.send_email will check conf.mail_ok_addrs.
         # ALSO: This is the only place/caller/script that uses do_not_email.
         #       It's really just for testing, and this is the last stop.
         if not self.cli_opts.do_not_email:
            Emailer.send_email(
               email_unames,
               the_msg,
               prog_log,
               self.cli_opts.delay_time,
               self.cli_opts.dont_shake)

      # end: for recipient_or_list in recipients.

      prog_log.loops_fin()
    def fetch_n_save(self):

        gwis_errs = []

        # Call parent's fcn., which calls Query_Overlord.prepare_filters()
        # and initializes self.doc to etree.Element('data').
        command.Op_Handler.fetch_n_save(self)

        # Assemble the qb from the request.
        qb = self.req.as_iqb(addons=False)
        g.assurt(qb.filters == Query_Filters(None))

        # We set login_required so this should always be the case.
        g.assurt(self.req.client.username != conf.anonymous_username)
        g.assurt(qb.username == self.req.client.username)

        self.item_stack_ids = qb.filters.decode_ids_compact(
            './fbil_sids', self.req.doc_in)
        if self.item_stack_ids and self.use_all_in_history:
            gwis_errs.append('Please specify only fbil_sids or all_hist.')

        items = item_user_access.Many()
        g.assurt(qb.sql_clauses is None)
        qb.sql_clauses = item_user_access.Many.sql_clauses_cols_all.clone()
        qb.filters.dont_load_feat_attcs = True
        qb.filters.min_access_level = Access_Level.denied

        if not self.item_stack_ids:
            g.assurt(self.use_all_in_history)
            stack_id_table_ref = 'temp_stack_id__item_findability'
            stack_ids_sql = ("""
            SELECT
               stack_id
            INTO TEMPORARY TABLE
               %s
            FROM
               (SELECT
                  item_stack_id
                FROM
                  item_findability
                WHERE
                  (show_in_history IS TRUE)
                  AND (username = %s)) AS foo_ifp
            """ % (
                stack_id_table_ref,
                qb.db.quoted(qb.username),
            ))
            rows = qb.db.sql(stack_ids_sql)
            #
            qb.sql_clauses.inner.join += ("""
            JOIN %s
               ON (gia.stack_id = %s.stack_id)
            """ % (
                stack_id_table_ref,
                stack_id_table_ref,
            ))
            check_exist = False
        else:
            id_count = self.item_stack_ids.count(',')
            if id_count > conf.constraint_sids_max:
                gwis_errs.append(
                    'Too many stack IDs in request: %d (max: %d).' % (
                        id_count,
                        conf.constraint_sids_max,
                    ))
            else:
                qb.filters.only_stack_ids = self.item_stack_ids
            # We'll have to double-check if these records exist before updating.
            check_exist = True

        if True:

            items.search_for_items_clever(qb)

            if not items:
                log.warning('fetch_n_save: no findability items: %s' %
                            (str(qb.filters), ))
                gwis_errs.append('No items were found to be findabilitied.')
            else:
                log.debug('fetch_n_save: no. item_findability: %d' %
                          (len(items), ))

            use_sids = []
            for itm in items:
                if ((self.action_squelch_pub is not None)
                        or (self.action_squelch_usr is not None)):
                    if itm.access_level_id <= Access_Level.arbiter:
                        log.debug('fetch_n_save: action_squelch: item: %s',
                                  itm)
                        use_sids.append(str(itm.stack_id))
                    else:
                        gwis_errs.append(
                            'You must be arbiter to change squelch.')
                else:
                    # self.action_history_add, self.action_history_chg
                    if itm.access_level_id > Access_Level.viewer:
                        gwis_errs.append('Unknown item or access denied.')
                    else:
                        log.debug('fetch_n_save: action_history: item: %s',
                                  itm)
                        use_sids.append(str(itm.stack_id))

            if not use_sids:

                gwis_errs.append('No items were found.')

            else:

                # use_sids = [ str(sid) for sid in use_sids ]
                self.update_stack_ids = ", ".join(use_sids)

                if check_exist:
                    # Make a list of stack IDs to insert first, before updating.
                    if ((self.action_history_add is not None)
                            or (self.action_history_chg is not None)):
                        username = qb.username
                    elif self.action_squelch_pub is not None:
                        username = conf.anonymous_username
                    else:
                        g.assurt(self.action_squelch_usr is not None)
                        username = qb.username
                    user_id = User.user_id_from_username(qb.db, username)
                    missing_sids_sql = ("""
                  SELECT
	                  DISTINCT(itmv.stack_id)
                  FROM
                     item_versioned AS itmv
                  LEFT OUTER JOIN
                     item_findability AS itmf
                     ON ((itmv.stack_id = itmf.item_stack_id)
                         AND (itmf.username = %s))
                  WHERE
                     (itmf.username IS NULL)
                     AND (itmv.stack_id IN (%s))
                  """ % (
                        qb.db.quoted(username),
                        self.update_stack_ids,
                    ))
                    rows = qb.db.sql(missing_sids_sql)
                    log.debug('fetch_n_save: missing: %d' %
                              (len(missing_sids_sql), ))
                    value_objs = []
                    for row in rows:
                        # These value objects match below:
                        #   INSERT INTO item_findability
                        #      (item_stack_id, username, user_id,
                        #       library_squelch, show_in_history, branch_id)
                        value_objs.append("(%d, '%s', %d, %d, %s, %d)" % (
                            row['stack_id'],
                            username,
                            user_id,
                            Library_Squelch.squelch_always_hide,
                            'FALSE',
                            qb.branch_hier[0][0],
                        ))
                    self.insert_values = ", ".join(value_objs)

                success = qb.db.transaction_retryable(self.attempt_save, qb,
                                                      qb)

                if not success:
                    log.warning('fetch_n_save: failed!')

        if gwis_errs:
            err_str = ' / '.join(gwis_errs)
            log.debug('fetch_n_save: err_str: %s' % (err_str, ))
            raise GWIS_Error(err_str)