Ejemplo n.º 1
0
def write_issue(ticket, outfile):
    """Dumps a csv line *row* from the issue query to *outfile*.
    """
    # Issue text body
    body = ticket[3]['description']
    body = trac_to_gh(body)

    # Default state: open (no known resolution)
    state = STATES.get(ticket[3]['status'], 'open')

    # Trac will have stored some kind of username.
    reporter = ticket[3]['reporter']

    # Not sure whether we have a related github account for that user.
    if USERNAMES.get(reporter):
        userdata = USERNAMES[reporter]
    else: # If we do not, at least mention the user in our issue body
        userdata = DEFAULT_USER
    
    body = ('This issue was reported by **%s**\r\n\r\n' % reporter) + body

    # Whether this is stored in 'milestone' or '__group__' depends on the
    # query type. Try to find the data or assign the default milestone 0.
    milestone_info = ticket[3]['milestone']
    milestone = MILESTONES.get(milestone_info, 3)

    labels = [] # Collect random tags that might serve as labels
    for tag in ('type', 'component', 'priority'):
        if ticket[3].get(tag) and LABELS.get(ticket[3][tag]):
            label = LABELS[ticket[3][tag]]
            labels.append({'name': github_label(label)})


    # Dates
    updated_at = DateTime(str(ticket[2])).ISO8601()
    created_at = DateTime(str(ticket[1])).ISO8601()
    
    # Now prepare writing all data into the json files
    dct = {
          'title': ticket[3]['summary'],
          'body': body,
          'state': state,
          'user': userdata,
          'milestone': int(milestone),
          'labels': labels,
          'updated_at': updated_at,
          'created_at': created_at,
       }

    # Assigned user in trac and github account of that assignee
#    assigned_trac = ticket[3]['owner']
#    assigned = USERNAMES.get(assigned_trac)
    # Assigning really does not make sense without github account
#    if state == 'open' and assigned and assigned['login'] != 'fifengine':
#        print assigned
#        dct['assignee'] = assigned

    # Everything collected, write the json file
    json.dump(dct, outfile, indent=5)
Ejemplo n.º 2
0
def main():
    #######################################################################
    # Gather information about our tickets (mainly assembles comment list)
    #######################################################################
    # Stores a list of comments for each ticket by ID

    #prock - load pickle file here and generate a row
    f = open(TICKET_FILE, 'rb')
    tickets = pickle.load(f)
    f.close()

    comment_coll = collections.defaultdict(list)

    for ticketid in tickets:
        comments = tickets[ticketid][4]
        for comment in comments:
            if ("comment" in comment):
                dct = massage_comment(ticketid, comment[0],comment[1],comment[4])
                comment_coll[ticketid].append(dct)

    #######################################################################
    # Write the ticket comments to json files indicating their parent issue
    #######################################################################
    for ticket, data in comment_coll.iteritems():
        if (comment_coll[ticket][0]['body'] != '.'):
            count = 0
            for row in data:
                with open(COMMENTS_PATH % (ticket,count), 'w') as f:
                    json.dump(row, f, indent=5)
                    count = count + 1

    #######################################################################
    # Write the actual ticket data to separate json files (GitHub API v3)
    #######################################################################
    for ticketid in tickets:
        with open(ISSUES_PATH % str(ticketid), 'w') as f:
            write_issue(tickets[ticketid], f)

    #######################################################################
    # Finally, dump all milestones and the related data. This script is not
    # attempting to extract due dates or other data. We just manually mined
    # the milestone names once and stored that in MILESTONES for reference.
    #######################################################################
    for name, id in MILESTONES.iteritems():
        with open(MILESTONES_PATH % id, 'w') as f:
            dct = {
                'number': id,
                'creator': DEFAULT_USER,
                'title': name,
            }
            json.dump(dct, f, indent=5)
Ejemplo n.º 3
0
def main():
    #######################################################################
    # Gather information about our tickets (mainly assembles comment list)
    #######################################################################
    # Stores a list of comments for each ticket by ID
    comments = collections.defaultdict(list)
    comment_rows = csv.reader(open(CSVFILE, 'rb'),
                              delimiter=CSVDELIM, quotechar=CSVESCAPE)
    for row in comment_rows:
        try:
            ticket, date, author, body = row
        except: # malformed ticket query, fix in trac or the csv file!
            print '/!\\ Please at check this csv row: /!\\\n', row
            continue
        if not ticket: # nothing we can do if there is no ticket to assign
            continue
        dct = massage_comment(ticket, date, author, body)
        comments[ticket].append(dct) # defaultdict, append always works

    #######################################################################
    # Write the ticket comments to json files indicating their parent issue
    #######################################################################
    for ticket, data in comments.iteritems():
        with open(ISSUES_PATH % ticket, 'w') as f:
            json.dump(data, f, indent=5)

    #######################################################################
    # Write the actual ticket data to separate json files (GitHub API v3)
    #######################################################################
    csv_data = urllib.urlopen(TRAC_REPORT_URL)
    ticket_data = csv.DictReader(csv_data)
    for row in ticket_data:
        if not (row.get('summary') and row.get('ticket')):
            continue
        with open(ISSUES_PATH % row['ticket'], 'w') as f:
            write_issue(row, f)

    #######################################################################
    # Finally, dump all milestones and the related data. This script is not
    # attempting to extract due dates or other data. We just manually mined
    # the milestone names once and stored that in MILESTONES for reference.
    #######################################################################
    for name, id in MILESTONES.iteritems():
        with open(MILESTONES_PATH % id, 'w') as f:
            dct = {
                'number': id,
                'creator': DEFAULT_USER,
                'title': name,
            }
            json.dump(dct, f, indent=5)
Ejemplo n.º 4
0
def write_issue(row, outfile):
    """Dumps a csv line *row* from the issue query to *outfile*.
    """
    for key, value in row.items():
        row[key] = row[key].decode('utf-8')
    # Issue text body
    body = row.get('_description', u'')
    body = trac_to_gh(body) + '\r\n\r\n' \
        '[> Link to originally reported Trac ticket <] ({url})'.format(
        url=TRAC_TICKET_URL % row['ticket'])

    # Default state: open (no known resolution)
    state = STATES.get(row.get('status'), 'open')

    # Trac will have stored some kind of username.
    reporter = row['_reporter']

    # Not sure whether we have a related github account for that user.
    if USERNAMES.get(reporter):
        userdata = USERNAMES[reporter]
    else: # If we do not, at least mention the user in our issue body
        userdata = DEFAULT_USER
        body = ('This issue was reported by **%s**\r\n\r\n' % reporter) + body

    # Whether this is stored in 'milestone' or '__group__' depends on the
    # query type. Try to find the data or assign the default milestone 0.
    milestone_info = row.get(('milestone'), row.get('__group__'))
    milestone = MILESTONES.get(milestone_info, 0)

    labels = [] # Collect random tags that might serve as labels
    for tag in ('type', 'component', 'priority'):
        if row.get(tag) and LABELS.get(row[tag]):
            label = LABELS[row[tag]]
            labels.append({'name': github_label(label)})

    # Also attach a special label to our starter tasks.
    # Again, please ignore this.
    #if row['ticket'] in easy_tickets:
    #    labels.append({'name': unicode(LABELS.get('start').lower())})

    # Dates
    updated_at = row.get('modified') or row.get('_changetime')
    created_at = row.get('created') or updated_at

    # Now prepare writing all data into the json files
    dct = {
          'title': row['summary'],
          'body': body,
          'state': state,
          'user': userdata,
          'milestone': {'number': milestone},
          'labels': labels,
          'updated_at': updated_at,
          'created_at': created_at,
       }

    # Assigned user in trac and github account of that assignee
    assigned_trac = row.get('owner')
    assigned = USERNAMES.get(assigned_trac)
    # Assigning really does not make sense without github account
    if state == 'open' and assigned:
        dct['assignee'] = assigned

    # Everything collected, write the json file
    json.dump(dct, outfile, indent=5)