Beispiel #1
0
def main():
    db.connect()
    s = ''
    while s != 'exit':
        s = input()
        print(message_handle('n', 'terminal', s))
    db.disconnect()
def getIngredientData():
    conn, cur = connect()

    ingredient_counts = get_total_ingredient_counts(conn)

    disconnect(conn, cur)

    return json.loads((json.dumps(ingredient_counts, indent=2)))
Beispiel #3
0
def add(info):
    conn, c = connect()
    c.execute("SELECT * FROM users WHERE username=?", (info[0],))
    data = c.fetchone()
    if data is None:
        c.execute("INSERT INTO users VALUES (?,?,?)", [info[0], sha256_crypt.encrypt(info[1]), info[2]])
        commit(conn)
        disconnect(conn)
        return "success"
    else:
        return "username taken"
Beispiel #4
0
def verify(username, password):
    conn, c = connect()
    c.execute("SELECT * FROM users where username=?", (username,))
    user = c.fetchone()
    if user is None:
        return False
    else:
        if sha256_crypt.verify(password, user[1]) and user[0] == username:
            disconnect(conn)
            return True
        else:
            disconnect(conn)
    return False
def main():
    import argparse

    def valid_date(d):
        try:
            return (dt.datetime.strptime(d, "%Y-%m-%d").date())
        except ValueError:
            raise argparse.ArgumentTypeError(
                "Please write dates in the preferred format (YYYY-MM-DD)")

    cli_parser = argparse.ArgumentParser(
        description=
        "script to process datasets of article creations by autoconfirmed users to determine if the article was deleted"
    )

    # Verbosity option
    cli_parser.add_argument('-v',
                            '--verbose',
                            action='store_true',
                            help='write informational output')

    #cli_parser.add_argument('dataset_file', type=str,
    #                        help='path to the dataset with historic creations')

    cli_parser.add_argument(
        'start_date',
        type=valid_date,
        help='start date for gathering data (format: YYYY-MM-DD)')

    cli_parser.add_argument(
        'end_date',
        type=valid_date,
        help='end date for gathering data (format: YYYY-MM-DD)')

    args = cli_parser.parse_args()

    if args.verbose:
        logging.basicConfig(level=logging.INFO)

    analyzer = SurvivalAnalyzer()
    analyzer.db_connect()
    ## analyzer.insert_creations(args.dataset_file)

    ## no need to keep this connection alive
    db.disconnect(analyzer.log_conn)

    analyzer.process_creations(args.start_date, args.end_date)

    # ok, done
    return ()
Beispiel #6
0
def run():
    database = db.connect()
    cursor = db.getCursor(database)

    t1 = time.time()
    results = recommendations.getRecommendations(cursor, 3)
    t2 = time.time()
    prettyPrint(cursor, 3, results)
    t3 = time.time()
    print "REQUIRED TIME FOR RECOMENDATIONS: %0.3f ms, for querying and printing: %0.3f ms" % (
        (t2 - t1) * 1000.0, (t3 - t2) * 1000.0)
    results = recommendations.getRecommendations(cursor, 4)
    prettyPrint(cursor, 4, results)
    db.disconnect(cursor)
    def is_reverted(self, revid, radius=15):
        '''
        Check if the given revision ID was reverted by the next 15 edits.
        
        @param revid: revision ID we're testing
        @type revid: int
        '''

        # get the page ID and timestamp of the current revision
        cur_query = ur'''SELECT rev_timestamp, rev_page
                         FROM revision
                         WHERE rev_id=%(revid)s'''

        # get checksums of the past 15 revisions
        past_query = ur'''SELECT rev_sha1
                          FROM revision
                          WHERE rev_page=%(pageid)s
                          AND rev_timestamp < %(timestamp)s
                          ORDER BY rev_timestamp DESC
                          LIMIT %(k)s'''

        # get checksums of the future 15 revisions
        fut_query = ur'''SELECT rev_sha1
                         FROM revision
                         WHERE rev_page=%(pageid)s
                         AND rev_timestamp > %(timestamp)s
                         ORDER BY rev_timestamp ASC
                         LIMIT %(k)s'''

        attempts = 0
        pageid = None
        timestamp = None
        prev_checksums = set()
        future_checksums = list()

        while attempts < self.db_attempts:
            try:
                self.dbcursor.execute(cur_query,
                                      {'revid': revid})
                for row in self.dbcursor:
                    pageid = row['rev_page']
                    timestamp = row['rev_timestamp']
                    
                if not pageid:
                    logging.warning('failed to retrieve page ID for revision ID {0}'.format(revid))
                    break

                self.dbcursor.execute(past_query,
                                      {'pageid': pageid,
                                       'timestamp': timestamp,
                                       'k': radius})
                for row in self.dbcursor:
                    prev_checksums.add(row['rev_sha1'])

                self.dbcursor.execute(fut_query,
                                      {'pageid': pageid,
                                       'timestamp': timestamp,
                                       'k': radius})
                for row in self.dbcursor:
                    future_checksums.append(row['rev_sha1'])
            except MySQLdb.OperationalError as e:
                attempts += 1
                logging.error('unable to execute revert test queries')
                logging.error('MySQLdb error {0}:{1}'.format(e.args[0], e.args[1]))
                db.disconnect(self.dbconn, self.dbcursor)
                (self.dbconn, self.dbcursor) = db.connect()
            else:
                break # ok, done

        if attempts >= self.db_attempts:
            logging.error('exhausted query attempts, aborting')
            return

        # walk through future revisions and see if they reverted
        # to one previous to the current
        i = 0
        while i < len(future_checksums):
            if future_checksums[i] in prev_checksums:
                return True
            i += 1
            
        return False
Beispiel #8
0
def clear():
    conn, c = connect()
    c.execute("DELETE FROM users")
    commit(conn)
    disconnect(conn)
Beispiel #9
0
def remove(user):
    conn, c = connect()
    c.execute("DELETE FROM users WHERE username=?", (user,))
    commit(conn)
    disconnect(conn)
Beispiel #10
0
def read():
    conn, c = connect()
    c.execute("SELECT * FROM users")
    users = c.fetchall()
    disconnect(conn)
    return users
Beispiel #11
0
def get_type(username):
    conn, c = connect()
    c.execute("SELECT * FROM users WHERE username=?", (username,))
    user = c.fetchone()
    disconnect(conn)
    return user[2]
Beispiel #12
0
def init():
    conn, c = connect()
    c.execute("CREATE TABLE users (username TEXT, password TEXT, role TEXT)")
    commit(conn)
    disconnect(conn)
Beispiel #13
0
def disconnect():
    status = db.disconnect()
    return status
 def db_disconnect(self):
     '''
     Disconnect the database connections
     '''
     db.disconnect(self.wiki_db_conn)
     db.disconnect(self.tool_db_conn)
Beispiel #15
0
def teardown_request(exception):
    db.disconnect()
Beispiel #16
0
def main():
    # Parse CLI options
    import argparse
    
    cli_parser = argparse.ArgumentParser(
        description="Code for testing the collaborator recommender"
    )
        
    # Add verbosity option
    cli_parser.add_argument('-v', '--verbose', action='store_true',
                            help='I can has kittehtalkzalot?')
    
    cli_parser.add_argument('member_file', type=str,
                            help='path to member file')
    
    # cli_parser.add_argument('k', type=int,
    #                     help='size of random sample to draw')

    cli_parser.add_argument('output_file', type=str,
                            help='path to output file (for appending, must exist!)')
    
    cli_parser.add_argument('nrecs', type=int,
                            help='number of recommendations per user')
    
    cli_parser.add_argument('test', type=str,
                            help='type of similary test to base recommendations on (jaccard, cosine, or coedit)')

    cli_parser.add_argument('cutoff', type=int,
                            help='the number of 30-day months to use when fetching revisions')

    cli_parser.add_argument('namespaces', type=str,
                            help='comma-separated list of namespaces to base the similarity on')
    
    args = cli_parser.parse_args()
    
    if args.verbose:
        logging.basicConfig(level=logging.DEBUG)

    # Regular expression to match a member username in our membership file
    member_re = re.compile('User talk[:](?P<username>[^\}]+)')

    all_members = set()
    
    with open(args.member_file, 'r', encoding='utf-8') as infile:
        for line in infile:
            match_obj = member_re.search(line.strip())
            if match_obj is None:
                print("None object")
            else:
                all_members.add(match_obj.group('username'))

    # members = random.sample(all_members, args.k)
    if args.test == 'coedit':
        recommender = CollabRecommender(assoc_threshold=0)
    else:
        recommender = CollabRecommender()
        
    site = pywikibot.Site('en') 
               
    print("Beginning collaborator recommendation test")

    total_recs = 0
    total_overlap = 0

    members = ['Slatersteven', 'WerWil', 'Fnlayson', 'Drrcs15', 'Turbothy',
               '21stCenturyGreenstuff', 'RGFI', 'Loesorion', 'Grahamdubya', 'Sioraf',
               'Skittles the hog', 'Smoth 007', 'Superfly94', 'Ewulp', 'Dank', 'Magus732',
               'Redmarkviolinist', 'The27thMaine', 'Kcdlp', 'Foxsch', 'Tdrss', 'URTh',
               'Waase', 'L clausewitz', 'Judgedtwice', 'Choy4311', 'Codrinb', 'Smmurphy',
               'Kliu1', 'Gowallabies', 'Secutor7', 'Moneywagon', 'Nostalgia of Iran',
               'Linmhall', 'Karanacs', 'Dana boomer', 'Robotam', 'Fdsdh1', 'DanieB52',
               'Rosiestep', 'Scholarus', 'Laurinavicius', 'Dapi89', 'UrbanTerrorist',
               'AGK', 'Samuel Peoples', 'Sapphire', 'Catlemur', 'Martocticvs', 'Gparkes',
               'Pratyya Ghosh', 'Eurocopter', 'Pahari Sahib', 'Seitzd', 'The Bushranger',
               'Natobxl', 'MasterOfHisOwnDomain', 'Takashi kurita', 'TeunSpaans',
               'Kierzek', 'WDGraham', 'Miborovsky', 'The lost library',
               'Antidiskriminator', 'The ed17', 'Cliftonian', 'AshLin',
               'GeneralizationsAreBad', 'MechaChrist', 'Joep01', 'Chris.w.braun',
               'TBrandley', 'Marky48', 'Cplakidas', 'John', 'Nyth83', 'Elonka',
               'Alexandru.demian', 'Martinp23', 'GermanJoe', 'P.Marlow', 'ryan.opel',
               'Asarelah', 'Ian Rose', 'Pectory', 'KizzyB', 'MrDolomite', 'Leifern',
               'Timeweaver', 'Ashashyou', 'Sumsum2010', 'Looper5920', 'Geira', 'Ackpriss',
               'Binksternet', 'Lothar von Richthofen', 'Molestash', 'Srnec',
               'Sasuke Sarutobi', '.marc.']
    
    # members = ['Kieran4', 'Brendandh', 'Gog the Mild', 'Seitzd', 'Robotam',
    #            'Keith-264', 'Nyth83', 'Mmuroya', 'Navy2004', 'Secutor7',
    #            'Ranger Steve', 'MisterBee1966']

    # members = ['XavierItzm']

    # Store namespaces as a list of str
    namespaces_list = args.namespaces.split(',')
    
    get_contribs_query = '''SELECT rev_id, page_id
    FROM page JOIN revision_userindex
    ON page_id=rev_page
    WHERE page_namespace IN ({namespaces})
    AND rev_minor_edit=0
    AND rev_deleted=0
    AND rev_user_text=%(username)s
    ORDER BY rev_id DESC
    LIMIT %(k)s
    '''.format(namespaces=",".join(namespaces_list))
    ## Probably set k to 500, and remember to use cursor.fetchall()
    
    for member in members:
    
        contribs = set()

        try:
            ## Note: connecting and disconnecting to prevent the DB
            ## from disconnecting us due to inactivity
            (dbconn, dbcursor) = db.connect(dbhost='c3.labsdb')
            dbcursor.execute(get_contribs_query,
                             {'username': member,
                              'k': 500})
            for row in dbcursor.fetchall():
                try:
                    contribs.add(row['page_id'])
                    if len(contribs) == 128:
                        break
                except AttributeError:
                    continue
            db.disconnect(dbconn, dbcursor)
        except MySQLdb.Error as e:
            logging.error("unable to execute query to get users by article")
            logging.error("Error {0}: {1}".format(e.args[0], e.args[1]))
            return(False)

        # Calculate the cutoff date
        cutoff = datetime.now() - timedelta(days=args.cutoff*30)
        matches = recommender.recommend(contribs, member, 'en', cutoff,
                                        namespaces=namespaces_list,
                                        nrecs=args.nrecs, backoff=1, test=args.test)
        match_set = set([rec['item'] for rec in matches])
        overlap = match_set & all_members
        # for user in overlap:
        #     print(user)
        #     for data in matches:
        #         if data['item'] == user:
        #             print(data['overlap'])
        #             break
        
        total_recs += len(match_set)
        total_overlap += len(overlap)

        print('Got {n} recommendations for User:{user}'.format(n=len(match_set),
                                                               user=member))
        print('Overlap with all members: {0}'.format(len(overlap)))
       
        #for i in range(0, len(match_set)):
        #    print(match_set.pop())

    # Print stats to stdout, and append stats to output file
    print('''Total statistics:
    Number of recommendations: {n}
    Overlap with all members: {o}
    % overlap: {p:.2}'''.format(n=total_recs, o=total_overlap,
                                p=100*float(total_overlap)/float(total_recs)))
    with open(args.output_file, 'a') as outfile:
        outfile.write('{n}\t{t}\t{nrecs}\t{int_n}\t{int_p:.2}\n'.format(
            n=args.nrecs, t=args.cutoff, nrecs=total_recs, int_n=total_overlap,
            int_p=100*float(total_overlap)/float(total_recs)))
    print('Recommendation test complete')
Beispiel #17
0
    def recommend(self, contribs, username, lang, cutoff,
                  namespaces=["0"], nrecs=100, threshold=3, backoff=0,
                  test = 'jaccard'):

        '''
        Find `nrecs` number of neighbours for a given user based on
        the overlap between their contributions.

        :param contribs: The user's contributions
        :type contribs: list

        :param username: Username of the user we're recommending for
        :type username: str

        :param lang: Language code of the Wikipedia we're working on
        :type lang: str

        :param cutoff: Date and time from when to start looking for revisions
        :type cutoff: datetime.datetime

        :param namespaces: Namespaces to base comparisons on
        :type namespaces: list of str

        :param nrecs: Number of recommendations we seek
        :type nrecs: int

        :param threshold: Number of articles in common to be determined a neighbour
        :type threshold: int

        :param backoff: Do we apply a backoff strategy on the threshold?
        :type backoff: int

        :param test: Name of correlation test to return results from
        :type param: str
        '''

        # Override default variables with supplied parameters
        self.cutoff = cutoff
        self.lang = lang
        self.nrecs = nrecs
        self.thresh  = threshold
        self.backoff = backoff
        self.test = test

        # SQL queries are defined here so as to not perform the string
        # formatting multiple times.
        self.get_articles_by_user_query = """SELECT
             DISTINCT p.page_id
             FROM {revision_table} r
             JOIN page p
             ON r.rev_page=p.page_id
             WHERE rev_user_text = %(username)s
             AND rev_timestamp >= %(timestamp)s
             AND p.page_namespace IN ({namespaces})""".format(
                 revision_table=self.revtable,
                 namespaces=",".join(namespaces))

        # Query to get edited articles for a user who is above the threshold,
        # we then disregard minor edits and reverts.
        # self.get_articles_by_expert_user_query = """SELECT p.page_title,
        #      p.page_id, r.rev_id, r.rev_sha1, r.rev_timestamp
        #      FROM {revision_table} r
        #      JOIN page p
        #      ON r.rev_page=p.page_id
        #      WHERE rev_user_text = %(username)s
        #      AND rev_timestamp >= %(timestamp)s
        #      AND rev_minor_edit=0""".format(revision_table=self.revtable)
        self.get_articles_by_expert_user_query = """SELECT
             DISTINCT p.page_id
             FROM {revision_table} r
             JOIN page p
             ON r.rev_page=p.page_id
             WHERE rev_user_text = %(username)s
             AND rev_timestamp >= %(timestamp)s
             AND rev_minor_edit=0
             AND p.page_namespace IN ({namespaces})""".format(
                 revision_table=self.revtable,
                 namespaces=",".join(namespaces))

        # Query to get the number of edits a user has made (in our dataset)
        # might want to limit this to namespace 0 (articles)
        self.get_edit_count_query = """SELECT count(r.rev_id) AS numedits
             FROM {revision_table} r
             JOIN page p
             ON r.rev_page=p.page_id
             WHERE r.rev_user_text = %(username)s
             AND r.rev_timestamp >= %(timestamp)s
             AND p.page_namespace=0""".format(revision_table=self.revtable)

        logging.info(
            "Got request for user {0}:{1} to recommend based on {2} edits!".format(
                lang, username, len(contribs)))

        # Recommendations we'll be returning
        recs = []

        # Mapping usernames to number of edits to not repeat those SQL queries
        self.nedit_map = {}

        (self.dbconn, self.dbcursor) = db.connect(dbhost='c3.labsdb')
        if not self.dbconn:
            logging.error("Failed to connect to database")
            return(recs)

        self.site = pywikibot.Site(lang)

        # Turn contributions into a set, as we'll only use it that way
        contribs = set(contribs)

        # Get some recs.
        recs = self.get_recs_at_coedit_threshold(username, contribs, self.test)

        db.disconnect(self.dbconn, self.dbcursor)
        # Return truncated to nrecs, switched from list of objects to list of dicts
        return([{'item': rec.username, 'value': rec.assoc, 'overlap': rec.overlap} for rec in recs[:nrecs]])
Beispiel #18
0
 def db_disconnect(self):
     '''
     Disconnect the database connections
     '''
     db.disconnect(self.wiki_db_conn)
     db.disconnect(self.tool_db_conn)
    def clean_article(self, articledata):
        '''
        Using info about a specific article, find out when a given class
        assessment was posted to the talk page, as well as the most recent
        article revision at that time, then fetch quality features for that
        article revision and store it in articledata.
        '''

        # Query to get talk page ID and most recent revision of
        # an article and its talk page.
        latest_query = ur''' SELECT ap.page_id AS art_id,
                             tp.page_id AS talk_id,
                             ap.page_latest AS art_latest,
                             tp.page_latest AS talk_latest
                             FROM page ap
                             JOIN page tp
                             USING (page_title)
                             WHERE tp.page_namespace=1
                             AND ap.page_id=%(pageid)s'''

        # Query to get a list of revisions for a given talk page
        # based on the timestamp of a given article revision.
        tp_revquery = ur'''SELECT rev_id, rev_timestamp
                           FROM revision
                           WHERE rev_page=%(talkpageid)s
                           AND rev_timestamp < (SELECT rev_timestamp
                           FROM revision
                           WHERE rev_id=%(revid)s)
                           ORDER BY rev_timestamp DESC'''

        # Query to get the most recent revision ID of an article
        # at the given time, based on a talk page revision ID
        recent_revquery = ur'''SELECT rev_id, rev_timestamp
                               FROM revision
                               WHERE rev_page=%(pageid)s
                               AND rev_timestamp <
                                  (SELECT rev_timestamp
                                   FROM revision
                                   WHERE rev_id=%(tp_revid)s)
                               ORDER BY rev_timestamp DESC
                               LIMIT 1'''

        # Query to get the first next revision ID of an article,
        # based on a talk page revision ID
        next_revquery = ur'''SELECT rev_id, rev_timestamp
                             FROM revision
                             WHERE rev_page=%(pageid)s
                             AND rev_timestamp >
                                (SELECT rev_timestamp
                                 FROM revision
                                 WHERE rev_id=%(tp_revid)s)
                             ORDER BY rev_timestamp ASC
                             LIMIT 1'''

        wp10_scale = {'stub': 0,
                      'start': 1,
                      'c': 2,
                      'b': 3,
                      'ga': 4,
                      'a': 5,
                      'fa': 6}

        # map the current class to a number
        start_idx = wp10_scale[articledata['class'].lower()]
        
        logging.info('initial assessment class is {0}'.format(articledata['class']))

        try:
            self.dbconn.ping()
        except:
            db.disconnect(self.dbconn, self.dbcursor)
            (self.dbconn, self.dbcursor) = db.connect()

        # Fetch talk page ID, as well as latest revision
        # of both article and talk page
        attempts = 0
        while attempts < self.db_attempts:
            try:
                
                self.dbcursor.execute(latest_query,
                                      {'pageid': articledata['pageid']})
                for row in self.dbcursor:
                    articledata['revid'] = row['art_latest']
                    articledata['talkpageid'] = row['talk_id']
                    articledata['talkpagerev'] = row['talk_latest']
            except MySQLdb.OperationalError as e:
                attempts += 1
                logging.error('unable to execute query to get talk page ID and ltest revision IDs')
                logging.error('MySQLdb error {0}:{1}'.format(e.args[0], e.args[1]))
                # reconnect
                db.disconnect(self.dbconn, self.dbcursor)
                (self.dbconn, self.dbcursor) = db.connect()
            else:
                break # ok, done

        if attempts >= self.db_attempts:
            logging.error('exhausted query attempts, aborting')
            return

        # get a list of talk page revisions after a given date
        tp_revs = []
        attempts = 0
        while attempts < self.db_attempts:
            try:
                
                self.dbcursor.execute(tp_revquery,
                                      {'talkpageid': articledata['talkpageid'],
                                       'revid': articledata['revid']})
                for row in self.dbcursor:
                    tp_revs.append(TPRevision(row['rev_id'],
                                              row['rev_timestamp']))
                logging.info('found {0} talk page revisions to inspect'.format(len(tp_revs)))
            except MySQLdb.OperationalError as e:
                attempts += 1
                logging.error('unable to execute query to get talk page revisions')
                logging.error('MySQLdb error {0}:{1}'.format(e.args[0], e.args[1]))
                # reconnect
                db.disconnect(self.dbconn, self.dbcursor)
                (self.dbconn, self.dbcursor) = db.connect()
            else:
                break # ok, done

        if attempts >= self.db_attempts:
            logging.error('exhausted query attempts, aborting')
            return

        # If it's empty it means we have the most recent revision,
        # so we can just keep the data we have and return.
        if not tp_revs:
            return

        prev_tprevid = -1
        i = 0
        slice_size = 20
        done = False
        while i < len(tp_revs) and not done:
            rev_subset = tp_revs[i:i+slice_size]
            revisions.get_revisions(self.site, rev_subset)

            for revision in rev_subset:
                logging.info('assessing talk page revision ID {0}'.format(revision.id))
                # NOTE: The assessments are at the top of the page,
                # and the templates are rather small,
                # so if the page is > 8k, truncate.
                if not revision.content:
                    logging.info('revision has no content, skipping')
                    continue

                if len(revision.content) > 8*1024:
                    logging.info('revision is {0} bytes, truncating to 8k'.format(len(revision.content)))
                    revision.content = revision.content[:8*1024]
                assessments = self.get_assessments(revision.content)
                cur_idx = []
                for assessment in assessments:
                    try:
                        cur_idx.append(wp10_scale[assessment.rating])
                    except KeyError:
                        continue # not a valid assessment

                if not cur_idx:
                    logging.info('found no assessments in this revision')
                    if self.is_reverted(revision.id):
                        logging.info('revision got reverted, continuing...')
                        continue
                    else:
                        # We have found a revision with no assessments
                        # and it was not reverted, prev_tprevid is the
                        # talk page revision ID we want to use
                        done = True
                        break

                cur_idx = max(cur_idx)
                logging.info('found assessment with class index {0}'.format(cur_idx))
                # If we have the same assessment rating
                # update prev_tprevid because
                # we then know we have a more recent assessment.
                if cur_idx == start_idx:
                    prev_tprevid = revision.id
                elif cur_idx != start_idx:
                    # We have found a revision with a lower or higher rating,
                    # that means prev_tprevid is the talk page revision ID
                    # we want to use to find the most recent article revision
                    done = True
                    break
            
            i += slice_size

        # If prev_tprevid is -1, our existing revision is the valid one
        if prev_tprevid < 0:
            return

        # Update articledata with the found talk page revision ID
        articledata['talkpagerev'] = prev_tprevid

        # Find the most recent revision of the article at the time
        # of the previous talk page revision ID.
        article_revision = None
        article_timestamp = None
        attempts = 0
        while attempts < self.db_attempts:
            try:
                self.dbcursor.execute(recent_revquery,
                                      {'pageid': articledata['pageid'],
                                       'tp_revid': prev_tprevid})
                for row in self.dbcursor:
                    article_revision = row['rev_id']
            except MySQLdb.OperationalError as e:
                attempts += 1
                logging.error('unable to execute query to get talk page revisions')
                logging.error('MySQLdb error {0}:{1}'.format(e.args[0], e.args[1]))
                db.disconnect(self.dbconn, self.dbcursor)
                (self.dbconn, self.dbcursor) = db.connect()
            else:
                break # ok, done

        if attempts >= self.db_attempts:
            logging.error('exhausted query attempts, aborting')
            return

        # error check
        if not article_revision:
            # likely a talk page created just before the article page,
            # get the first one after instead
            logging.warning('failed to get article revision for talk page revision ID {0}, picking first after instead'.format(prev_tprevid))
            attempts = 0
            while attempts < self.db_attempts:
                try:
                    self.dbcursor.execute(next_revquery,
                                          {'pageid': articledata['pageid'],
                                           'tp_revid': prev_tprevid})
                    for row in self.dbcursor:
                        article_revision = row['rev_id']
                except MySQLdb.Error as e:
                    attempts += 1
                    logging.error('unable to execute query to get talk page revisions')
                    logging.error('MySQLdb error {0}:{1}'.format(e.args[0], e.args[1]))
                    db.disconnect(self.dbconn, self.dbcursor)
                    (self.dbconn, self.dbcursor) = db.connect()
                else:
                    break # ok, done

        if not article_revision:
            logging.error('picking first next revision also failed, unable to continue')
            return

        logging.info('new most recent article revision ID {0}'.format(article_revision))
        # update article data with new revision ID
        articledata['revid'] = article_revision

        # all done
        return
Beispiel #20
0
def update(username, new_pass):
    conn, c = connect()
    c.execute("UPDATE users SET password=? WHERE username=?", (sha256_crypt.encrypt(new_pass), username))
    commit(conn)
    disconnect(conn)
Beispiel #21
0
db = db.mysql_connector(configuration["mysql_host"],
                        configuration["mysql_user"],
                        configuration["mysql_password"],
                        configuration["mysql_db"])
db.connect()
networks = parser.getNetworks()
for BSSID in networks:
    xmlNetwork = networks[BSSID]
    dbNetwork = db.getNetwork(BSSID)
    print("%s (%s) 📶%d" %
          (xmlNetwork.ESSID, xmlNetwork.BSSID, xmlNetwork.RSSI))
    if (dbNetwork == None):  #If network does not exist yet...
        dbNetwork = db.addNetwork(xmlNetwork)  #...add it
    elif (dbNetwork.RSSI < xmlNetwork.RSSI
          ):  #if networks exists and RSSI of xmlNetwork is higher...
        db.updateNetwork(xmlNetwork)  #...update network

    for MAC in xmlNetwork.devices:
        device = xmlNetwork.devices[MAC]
        print("   %s" % (device.MAC))
        deviceID = db.getDeviceID(device)
        if (deviceID == None):
            deviceID = db.addDevice(device).ID
        else:
            db.updateDevice(device)
        relationID = db.getRelationID(deviceID, dbNetwork.ID)
        if (relationID == None):
            db.addRelation(deviceID, dbNetwork.ID)
db.disconnect()
def getRecipeData():
    conn, cur = connect()
    recipes = get_all_recipes(conn)
    disconnect(conn, cur)
    return json.loads((json.dumps(recipes, indent=2)))
Beispiel #23
0
def db_connections():
    """Принудительно откроем соединение с БД, используемой в тестах и не забудем его закрыть в конце"""
    connect()
    yield True
    disconnect()
Beispiel #24
0
 def close():
     _db.disconnect()