def main():
    #######################################################################
    # Gather information about our tickets (mainly assembles comment list)
    #######################################################################
    # Stores a list of comments for each ticket by ID

    #prock - load pickle file here and generate a row
    f = open(TICKET_FILE, 'rb')
    tickets = pickle.load(f)
    f.close()

    comment_coll = collections.defaultdict(list)

    for ticketid in tickets:
        comments = tickets[ticketid][4]
        for comment in comments:
            if ("comment" in comment):
                dct = massage_comment(ticketid, comment[0],comment[1],comment[4])
                comment_coll[ticketid].append(dct)

    #######################################################################
    # Write the ticket comments to json files indicating their parent issue
    #######################################################################
    for ticket, data in comment_coll.iteritems():
        if (comment_coll[ticket][0]['body'] != '.'):
            count = 0
            for row in data:
                with open(COMMENTS_PATH % (ticket,count), 'w') as f:
                    json.dump(row, f, indent=5)
                    count = count + 1

    #######################################################################
    # Write the actual ticket data to separate json files (GitHub API v3)
    #######################################################################
    for ticketid in tickets:
        with open(ISSUES_PATH % str(ticketid), 'w') as f:
            write_issue(tickets[ticketid], f)

    #######################################################################
    # Finally, dump all milestones and the related data. This script is not
    # attempting to extract due dates or other data. We just manually mined
    # the milestone names once and stored that in MILESTONES for reference.
    #######################################################################
    for name, id in MILESTONES.iteritems():
        with open(MILESTONES_PATH % id, 'w') as f:
            dct = {
                'number': id,
                'creator': DEFAULT_USER,
                'title': name,
            }
            json.dump(dct, f, indent=5)
def main():
    #######################################################################
    # Gather information about our tickets (mainly assembles comment list)
    #######################################################################
    # Stores a list of comments for each ticket by ID
    comments = collections.defaultdict(list)
    comment_rows = csv.reader(open(CSVFILE, 'rb'),
                              delimiter=CSVDELIM, quotechar=CSVESCAPE)
    for row in comment_rows:
        try:
            ticket, date, author, body = row
        except: # malformed ticket query, fix in trac or the csv file!
            print '/!\\ Please at check this csv row: /!\\\n', row
            continue
        if not ticket: # nothing we can do if there is no ticket to assign
            continue
        dct = massage_comment(ticket, date, author, body)
        comments[ticket].append(dct) # defaultdict, append always works

    #######################################################################
    # Write the ticket comments to json files indicating their parent issue
    #######################################################################
    for ticket, data in comments.iteritems():
        with open(ISSUES_PATH % ticket, 'w') as f:
            json.dump(data, f, indent=5)

    #######################################################################
    # Write the actual ticket data to separate json files (GitHub API v3)
    #######################################################################
    csv_data = urllib.urlopen(TRAC_REPORT_URL)
    ticket_data = csv.DictReader(csv_data)
    for row in ticket_data:
        if not (row.get('summary') and row.get('ticket')):
            continue
        with open(ISSUES_PATH % row['ticket'], 'w') as f:
            write_issue(row, f)

    #######################################################################
    # Finally, dump all milestones and the related data. This script is not
    # attempting to extract due dates or other data. We just manually mined
    # the milestone names once and stored that in MILESTONES for reference.
    #######################################################################
    for name, id in MILESTONES.iteritems():
        with open(MILESTONES_PATH % id, 'w') as f:
            dct = {
                'number': id,
                'creator': DEFAULT_USER,
                'title': name,
            }
            json.dump(dct, f, indent=5)