def import_map(self, urlpath):
	print 'Imporing OpenData index %s' %(urlpath)
	u = urlopen(urlpath)
	data = u.read()
	u.close()    
	odata = make_instance(data)
	
	for o in odata.opendata:
	    print 'Importing OpenDATA: %s' %(o.loc.PCDATA)
	    self.import_data(o.loc.PCDATA)
def votes2digits(file):
    """Encode the casted votes as a decimal number.

    Parameters:
        file: An XML file containing the casted votes (vote-selection.xml)
    """
    from gnosis.xml.objectify import make_instance

    bits = []
    ballot = make_instance(file)
    ballotno = ballot.number
    for contest in ballot.contest:
        name = contest.name
        if contest.ordered == "No":
            b = [0] * len(cont[contest.name])
        else:
            b = [0] * len(cont[contest.name])**2
        n = 0
        for selection in contest.selection:
            if contest.name == "Presidency" and selection.name == "Vice President":
                text = [text, selection.PCDATA]
            else:
                text = selection.PCDATA
            if not (contest.name == "Presidency"
                    and selection.name == "President"):
                if text != "No preference indicated" and text != [
                        "No preference indicated", "No preference indicated"
                ]:
                    if selection.writein == "No":
                        ind = cont[contest.name].index(text)
                        if contest.ordered == "No":
                            b[ind] = 1
                        else:
                            b[ind * len(cont[contest.name]) + n] = 1
                    elif contest.ordered == "No":
                        b[-1] = 1
                    else:
                        b[-len(cont[contest.name]) + n] = 1
            n += 1
        bits += b
    # The string must have exacly 36 digits - insert leading zeroes
    digits = "%036d" % todecimal(bits)
    return ballotno + digits
Esempio n. 3
0
def votes2digits(file):
    """Encode the casted votes as a decimal number.

    Parameters:
        file: An XML file containing the casted votes (vote-selection.xml)
    """
    from gnosis.xml.objectify import make_instance

    bits = []
    ballot = make_instance(file)
    ballotno = ballot.number
    for contest in ballot.contest:
        name = contest.name
        if contest.ordered == "No":
            b = [0] * len(cont[contest.name])
        else:
            b = [0] * len(cont[contest.name])**2
        n = 0
        for selection in contest.selection:
            if contest.name == "Presidency" and selection.name == "Vice President":
                text = [text, selection.PCDATA]
            else:
                text = selection.PCDATA
            if not (contest.name == "Presidency" and selection.name == "President"):
                if text != "No preference indicated" and text != ["No preference indicated", "No preference indicated"]:
                    if selection.writein == "No":
                        ind = cont[contest.name].index(text)
                        if contest.ordered == "No":
                            b[ind] = 1
                        else:
                            b[ind*len(cont[contest.name])+n] = 1
                    elif contest.ordered == "No":
                        b[-1] = 1
                    else:
                        b[-len(cont[contest.name])+n] = 1
            n += 1
        bits += b
    # The string must have exacly 36 digits - insert leading zeroes
    digits = "%036d" % todecimal(bits)
    return ballotno + digits
    def import_data(self, urlpath):
	from opendata import models
	u = urlopen(urlpath)
	data = u.read()
	u.close()    

	odata = make_instance(data)
#	odata.source.id
	slug = odata.source.id.replace('_', '-')   # To conform requrements for urls
	(opendata, created) = models.OpenData.objects.get_or_create(slug=slug)
	opendata.slug = slug
	opendata.opendata_id = odata.source.id
	if hasattr(odata.source, 'location'):
	    opendata.location_url = odata.source.location.PCDATA
	opendata.opendata_url = urlpath
	opendata.name = odata.source.name.PCDATA
	opendata.description = odata.source.description.PCDATA
	opendata.language = odata.source.language.PCDATA
	opendata.dinfo_isupdatable = (odata.datainfo.datatype.is_updatable != 'False')
	opendata.date_created = datetime.datetime.strptime(odata.datainfo.created.PCDATA, "%Y-%m-%dT%H:%M:%SZ")
	opendata.date_updated = datetime.datetime.strptime(odata.datainfo.updated.PCDATA, "%Y-%m-%dT%H:%M:%SZ")
	opendata.author_name = odata.datainfo.preparedBy.author.PCDATA
	opendata.author_website = odata.datainfo.preparedBy.authorWebsite.PCDATA
	
	# Import organization
	orgname = odata.source.org.name.PCDATA
	(org, created) = models.Organization.objects.get_or_create(name=orgname)
	if created:
	    org.description = odata.source.org.description.PCDATA
	    org.website = odata.source.org.website.PCDATA
	    org.address = odata.source.org.address.PCDATA
	    org.save()	

	opendata.organization = org
	opendata.spec_id = odata.specs.tablespec.id
	opendata.save()
	    
	opendata.fields().delete()
	for ofield in odata.specs.tablespec.cols.colspec:
	    try:
		field = models.DataField.objects.get(opendata=opendata, key=ofield.key)
	    except:
		field = models.DataField(num=ofield.num, name=ofield.name.PCDATA, opendata=opendata)
	    field.description = ofield.description.PCDATA
	    field.key = ofield.key
	    field_type = ofield.type
	    ftype = models.FieldType.objects.get(name=field_type)
	    field.field_type = ftype
	    field.save()
	    pass

	opendata.files().delete()
	for tableref in odata.data.current.tableref:	
	    try:
		dfile = models.OpenDataFile.objects.get(opendata=opendata, urlpath=tableref.url)
	    except:
		format = models.DataFormat.objects.get(name=tableref.format)
		datatype = models.OpenDataType.objects.get(key=tableref.type)
		opendata.datatypes.add(datatype)
		opendata.formats.add(format)
		dfile = models.OpenDataFile(opendata=opendata, urlpath=tableref.url, format=format, datatype=datatype, hash_sha512=tableref.hash_sha512)
		dfile.specid = tableref.specid
		parts = tableref.date_updated.rstrip('Z').rsplit('.', 1)
		gt = parts[0]
		try:
		    dfile.date_updated = datetime.datetime.strptime(gt, "%Y-%m-%dT%H:%M:%S")
		except:
		    dfile.date_updated = datetime.datetime.strptime(gt, "%Y-%m-%dT%H:%M:%SZ")
		    
	    dfile.save()
Esempio n. 5
0
import gnosis.xml.objectify as xo
import gnosis.xml.pickle as xp
from StringIO import StringIO
import funcs

funcs.set_parser()

xml = '''<?xml version="1.0"?>
<!DOCTYPE Spam SYSTEM "spam.dtd" >
<Spam>
  <Eggs>Some text about eggs.</Eggs>
  <MoreSpam>Ode to Spam</MoreSpam>
</Spam>
'''

fh = StringIO(xml)
o = xo.make_instance(xml)
s = xp.dumps(o)
#print "---------- xml_objectify'd object, xml_pickle'd ----------"
#print s
o2 = xp.loads(s)
#print "---------- object after the pickle/unpickle cycle --------"
#print xp.dumps(o2)

if o.Eggs.PCDATA != o2.Eggs.PCDATA or \
   o.MoreSpam.PCDATA != o2.MoreSpam.PCDATA:
    raise "ERROR(1)"

print "** OK **"
Esempio n. 6
0
from gnosis.xml.objectify import make_instance
from gnosis.xml.objectify.utils import XPath
import sys

xml = '''<foo>
  <bar>this</bar>
  <bar>that</bar>
  <baz a1="fie" a2="fee">
    stuff <bar>fo</bar>
    <bar a1="fiddle">fum</bar>
    and <bam><bar>fizzle</bar></bam>
    more stuff
  </baz>
</foo>
'''

print xml
print
open('xpath.xml','w').write(xml)
o = make_instance(xml)

patterns = '''/bar //bar //* /baz/*/bar
              /bar[2] //bar[2..4]
              //@a1 //bar/@a1 /baz/@* //@*
              baz//bar/text() /baz/text()[3]'''

for pattern in patterns.split():
    print 'XPath:', pattern
    for match in XPath(o, pattern):
        print ' ', match
Esempio n. 7
0
import gnosis.xml.pickle as xp
from StringIO import StringIO
import funcs

funcs.set_parser()
    
xml = '''<?xml version="1.0"?>
<!DOCTYPE Spam SYSTEM "spam.dtd" >
<Spam>
  <Eggs>Some text about eggs.</Eggs>
  <MoreSpam>Ode to Spam</MoreSpam>
</Spam>
'''

fh = StringIO(xml)
o = xo.make_instance(xml)
s = xp.dumps(o)
#print "---------- xml_objectify'd object, xml_pickle'd ----------"
#print s
o2 = xp.loads(s)
#print "---------- object after the pickle/unpickle cycle --------"
#print xp.dumps(o2)

if o.Eggs.PCDATA != o2.Eggs.PCDATA or \
   o.MoreSpam.PCDATA != o2.MoreSpam.PCDATA:
    raise "ERROR(1)"

print "** OK **"


Esempio n. 8
0
 def makeBallot(self):
     "Creates the ballot object by parsing the vote file"
     # We are using David's gnosis.xml.objectify module here
     self.ballot = make_instance(self.votefile)
"""This data should come from ballot-election.xml

- But for now include a fallback definition if the XML is unavailable
- Also, send the Python code to STDOUT if run as main (semi-Quine)
"""
from os.path import isfile
xml_data = 'ballot-election.xml'

if isfile(xml_data):
    from gnosis.xml.objectify import make_instance
    ballot = make_instance('ballot-election.xml')
    contnames, cont = [], {}
    for contest in ballot.contest:
        name = contest.name
        contnames.append(name)
        if contest.coupled == "No":
            cont[name] = [select.PCDATA for select in contest.selection]
            if contest.allow_writein == "Yes":
                cont[name].append("")
        else:
            cont[name] = []
            for n in range(0, len(contest.selection), 2):
                cont[name].append(
                    [s.PCDATA for s in contest.selection[n:n + 2]])
            if contest.allow_writein == "Yes":
                cont[name].append(["", ""])
else:
    contnames = [
        "Presidency", "Senator", "U.S. Representative", "Treasurer",
        "Attorney General", "Commis. of Education", "State Senate",
        "State Assembly", "Transportation Initiative",
Esempio n. 10
0
 def makeBallot(self):
     "Creates the ballot object by parsing the vote file"
     # We are using David's gnosis.xml.objectify module here
     self.ballot = make_instance(self.votefile)
Esempio n. 11
0
"""This data should come from ballot-election.xml

- But for now include a fallback definition if the XML is unavailable
- Also, send the Python code to STDOUT if run as main (semi-Quine)
"""
from os.path import isfile
xml_data = 'ballot-election.xml'

if isfile(xml_data):
    from gnosis.xml.objectify import make_instance
    ballot = make_instance('ballot-election.xml')
    contnames, cont = [], {}
    for contest in ballot.contest:
        name = contest.name
        contnames.append(name)
        if contest.coupled=="No":
            cont[name] = [select.PCDATA for select in contest.selection]
            if contest.allow_writein=="Yes":
                cont[name].append("")
        else:
            cont[name] = []
            for n in range(0, len(contest.selection), 2):
                cont[name].append([s.PCDATA for s in contest.selection[n:n+2]])
            if contest.allow_writein=="Yes":
                cont[name].append(["",""])
else:
    contnames = ["Presidency", "Senator", "U.S. Representative", "Treasurer",
                 "Attorney General", "Commis. of Education", "State Senate",
                 "State Assembly", "Transportation Initiative",
                 "Health care initiative", "Term limits", "Cat Catcher",
                 "County Commissioner"]
Esempio n. 12
0
def buildreports():
    drivetable = db['drives']
    linetable = db['lines']
    linecache = []

    clustertable = db['clusters']
    clusters = {}

    new = False
    for kfile in tqdm([x for x in sorted(os.listdir('./data')) if '.kml' in x], 'parsing kml', leave=True):
        if not drivetable.find_one(filename=kfile):
            drive = {'filename': kfile}
            drive['distance'] = float(drive['filename'][:-4].split('-')[-1][:-2])
            startdate = datetime.date(*map(int,drive['filename'][:-4].split('-')[:3]))
            startdate = startdate.replace(year=startdate.year+2000)

            if drive['distance'] < 1:
                continue

            kmldata = make_instance(open('./data/'+drive['filename']).read())
            try:
                lines = kmldata.Document.Folder.Placemark
                if not lines:
                    continue
            except:
                continue

            new = True

            lastline = 'start'
            linelist = []
            for l in lines:
                try:
                    data = datadict(l.ExtendedData.SchemaData.SimpleData)
                except:
                    continue

                status = data['status']
                if status != 'OK':
                    continue

                speed = int(int(data['speed'])*0.621371) #convert kmh to mph
                if speed > 110 or speed <= 0:
                    continue

                line = {
                    'prevline': lastline,
                    'speed': speed,
                    'length': round(int(data['length'])*0.000621371,1),
                }

                line['coords'] = simplejson.dumps([tuple(map(float, x.split(','))) for x in l.LineString.coordinates.PCDATA.split()])

                if hasattr(l, 'name') and getattr(l.name, 'PCDATA'):
                    name = l.name.PCDATA
                elif 'Name' in data and data['Name']:
                    name = data['Name']
                else:
                    name = ''

                name = string.replace(string.replace(name.strip(','), ',', ', ').strip(), '  ', ' ')

                line['name'] = name
                line['fullname'] = '%s - %s' % (lastline, name)

                linetime = map(int, data['start_time'].split(':'))
                date = pytz.utc.localize(datetime.datetime(startdate.year, startdate.month, startdate.day,
                                                           linetime[0], linetime[1], linetime[2]))

                linetime = map(int, data['end_time'].split(':'))
                enddate = pytz.utc.localize(datetime.datetime(startdate.year, startdate.month, startdate.day,
                                                           linetime[0], linetime[1], linetime[2]))

                timezone = pytz.timezone('US/Eastern')
                date = date.astimezone(timezone)
                enddate = enddate.astimezone(timezone)

                if lastline == 'start':
                    startdate = date

                if date < startdate:
                    date += datetime.timedelta(days=1)

                line['date'] = date
                linelist.append(line)
                lastline = line['name']

            firstline = 'start'
            for line in linelist:
                if line['name']:
                    firstline = line['name']
                    break

            drive['endpoints'] = "%s to %s" % (firstline, lastline)
            drive['startdate'] = startdate.replace(tzinfo=None)
            drive['enddate'] = enddate.replace(tzinfo=None)
            drive['triptime'] = (drive['enddate']-drive['startdate']).seconds
            drive['avgspeed'] = round(drive['distance']/(drive['triptime']/3600.0),1)
            drive['weekbucket'] = drive['startdate'].strftime('%Y-%W')
            drive['weekdaybucket'] = drive['startdate'].strftime('(%w) %A')
            drive['monthbucket'] = drive['startdate'].strftime('%Y-%m')
            drive['timebucket'] = '%s:%02d%s' % (int(drive['startdate'].strftime('%I')),
                                            math.floor(drive['startdate'].minute/60.0*(60/timeslices))*timeslices,
                                            drive['startdate'].strftime('%p').lower())
            drive['fmtname'] = '%s-%s (%smi/%smin/%smph)' % (drive['startdate'].strftime('%m/%d %I:%M%p'),
                                                             drive['enddate'].strftime('%I:%M%p'),
                                                             drive['distance'], int(drive['triptime'])/60, drive['avgspeed'])

            def namecheck(s, start, end):
                for l in linelist[start:end]:
                    if s.lower() in l['name'].lower():
                        return True
                return False

            if drive['startdate'] >= datetime.datetime(2013, 8, 5) and \
               drive['startdate'].weekday() < 5 and \
               drive['startdate'].hour >= 7 and \
               drive['startdate'].hour <= 10 and \
               drive['distance'] >= 47.3 and \
               drive['distance'] <= 57 and \
               namecheck('Studer', 0, 1) and \
               namecheck('CR-612', -7, None):
                drivetype = 'morning'
            elif drive['startdate'] >= datetime.datetime(2013, 8, 5) and \
                 drive['startdate'].weekday() < 5 and \
                 drive['startdate'].hour >= 16 and \
                 drive['startdate'].hour <= 19 and \
                 drive['distance'] >= 47.3 and \
                 drive['distance'] <= 57 and \
                 namecheck('Studer', -1, None) and \
                 namecheck('CR-612', 0, 7):
                drivetype = 'evening'
            else:
                drivetype = 'other'

            drive['type'] = drivetype
            driveid = drivetable.insert(drive)
            for line in linelist:
                line['drive'] = driveid
                line['type'] = drivetype
                line['cluster'] = repr(namecluster(line, clusters))

            linecache.extend(linelist)


    print
    log.info('loading line table')
    linetable.insert_many(linecache)

    log.info('clustering')
    if new:
        clusterrows = []
        for cname, cluster in clusters.items():
            for drivetype in cluster['speeds'].keys():
                speedarray = np.array(cluster['speeds'][drivetype])
                clusterrows.append({
                    'uuid': repr(cname),
                    'speed': int(speedarray.mean()),
                    'minspeed': int(speedarray.min()),
                    'maxspeed': int(speedarray.max()),
                    'startpt': simplejson.dumps(cluster['startpt']),
                    'endpt': simplejson.dumps(cluster['endpt']),
                    'coords': simplejson.dumps(cluster['coords']),
                    'count': len(speedarray),
                    'type': drivetype,
                    'speeds': simplejson.dumps(cluster['speeds']),
                    'length': round(np.array(cluster['lengths']).mean(), 2),
                    'name': '|'.join(cluster['names']),
                })

            speedarray = np.array([speed for dt in cluster['speeds'].values() for speed in dt])
            clusterrows.append({
                'uuid': repr(cname),
                'speed': int(speedarray.mean()),
                'minspeed': int(speedarray.min()),
                'maxspeed': int(speedarray.max()),
                'startpt': simplejson.dumps(cluster['startpt']),
                'endpt': simplejson.dumps(cluster['endpt']),
                'coords': simplejson.dumps(cluster['coords']),
                'count': cluster['count'],
                'type': 'all',
                'speeds': simplejson.dumps(cluster['speeds']),
                'length': round(np.array(cluster['lengths']).mean(), 2),
                'name': '|'.join(cluster['names']),
            })
        clustertable.delete()
        clustertable.insert_many(clusterrows)

    log.info('building kmls')
    allfolders = [folder for folder, rule in kmlfolderrules] + ['all',]

    drivesplitbucket('drives', allfolders, drivetable, linetable, clustertable, 'date(startdate) desc', recent_drives_count)
    drivesplitbucket('drives by length', allfolders, drivetable, linetable, clustertable, 'distance desc', 10)
    drivesplitbucket('drives by avg speed', allfolders, drivetable, linetable, clustertable, 'avgspeed desc', 10, 10)
    drivesplitbucket('drives by total time', allfolders, drivetable, linetable, clustertable, 'avgspeed desc', 10, 10)
    commutesplitbucket('commutes by depart time', 'timebucket', drivetable, linetable, clustertable, 0)
    commutesplitbucket('commutes by week', 'weekbucket', drivetable, linetable, clustertable, 3)
    commutesplitbucket('commutes by month', 'monthbucket', drivetable, linetable, clustertable, 0)
    commutesplitbucket('commutes by weekday', 'weekdaybucket', drivetable, linetable, clustertable, 0)
    commutesplitbucket('commutes by distance', 'distancebucket', drivetable, linetable, clustertable, 0)
    clusterspeedbucket('averages', allfolders, drivetable, linetable, clustertable, 'speed')
    clusterspeedbucket('top speeds', allfolders, drivetable, linetable, clustertable, 'maxspeed')
    clusterspeedbucket('slow speeds', allfolders, drivetable, linetable, clustertable, 'minspeed')

    log.info('graphing')

    def movingaverage(values, window):
        weights = np.repeat(1.0, window)/window
        smas = np.convolve(values, weights, 'valid')
        return list(smas)
    movingavgfuncs = ((3, lambda x: movingaverage(x,3), False), (7, lambda x: movingaverage(x,7), False), (30, lambda x: movingaverage(x,30), True))

    apply_avg = lambda d, avgfunc: zip(*map(avgfunc, zip(*[(x['x'], x['y']) for x in d])))

    namefunc = lambda d, f: '%s (%smi)' % (secs_to_hms(d,f), d['distance'])
    secs_to_hms = lambda d, f: '%sh %sm %ss' % (d[f]/3600, (d[f]%3600)/60, d[f]%60) if d[f] > 3600 else '%sm %ss' % ((d[f]%3600)/60, d[f]%60)
    to_ms_ts = lambda d, f: float((to_dt(d[f])-datetime.datetime.utcfromtimestamp(0)).total_seconds()*1000)
    to_ms_ts_day = lambda d, f: float((to_dt_day(d[f])-datetime.datetime.utcfromtimestamp(0)).total_seconds()*1000)
    to_dt = lambda d: datetime.datetime.strptime(d, '%Y-%m-%d %H:%M:%S.%f')
    to_dt_day = lambda d: datetime.datetime.strptime(' '.join(['2014-01-01', d.split()[1]]), '%Y-%m-%d %H:%M:%S.%f')
    to_float_mins = lambda d, f: float(d[f])/60.0

    def get_drive_field_data(drivetype, field, fieldlimit, datafunc, namefunc, timefunc):
        data = []
        for drive in db.query('select * from drives where type="%s" order by date(startdate) asc' % drivetype):
            if fieldlimit:
                if fieldlimit[0] > int(drive[field]) or int(drive[field]) > fieldlimit[1]:
                    continue
                data.append({'x': timefunc(drive, 'startdate'), 'y': datafunc(drive, field), 'name': namefunc(drive, field)})
        return data

    def make_commute_graphs(drivetype):
        data = get_drive_field_data(drivetype, 'triptime', (0,3600*10), to_float_mins, namefunc, to_ms_ts)

        graph_data = [{
            'name': 'Drives',
            'type': 'scatter',
            'data': data,
        },]

        for days, avgfunc, vis in movingavgfuncs:
            graph_data.append({
                'name': '%s-Day Moving Average' % days,
                'type': 'line',
                'data': apply_avg(data, avgfunc),
                'marker': {'enabled': False},
                'states': {'hover': {'lineWidth': 0}},
                'enableMouseTracking': False,
                'visible': vis,
            })
        return graph_data

    morning_data = make_commute_graphs('morning')
    evening_data = make_commute_graphs('evening')

    depart_time_data = [
        {
            'name': 'Morning Departures',
            'type': 'scatter',
            'data': get_drive_field_data('morning', 'triptime', (0,3600*3), to_float_mins, namefunc, to_ms_ts_day),
        },
        {
            'name': 'Evening Departures',
            'type': 'scatter',
            'data': get_drive_field_data('evening', 'triptime', (0,3600*3), to_float_mins, namefunc, to_ms_ts_day),
        },
    ]

    def drive_graph_data(drive):
        categories = []
        data = []
        average_data = []
        average_data_diff = []
        average_speed = []
        speed_diffs = []

        for line in linetable.find(drive=drive['id'], order_by='date'):
            display_name = '%s %s (%smi)' % (line['date'].strftime('%H:%M'), line['name'], line['length'])
            categories.append(line['name'].split(',')[-1].strip() if line['name'] else '')

            speed = float(line['speed'])
            avgdrivespeed = float(clustertable.find_one(uuid=line['cluster'], type='all')['speed'])
            speeddiff = float(speed-avgdrivespeed)

            diffcolor = "#000000"
            if speeddiff >= 1:
                diffcolor = colorspeed(speed/avgdrivespeed*55+15, 90.0, True)
                avgcolor = diffcolor
            elif speeddiff <= -1:
                avgcolor = colorspeed(speed/avgdrivespeed*55-15, 90.0, True)
            else:
                avgcolor = colorspeed(speed, 90.0, True)
            diff_diff = speed-abs(speeddiff) if speeddiff >= 1 else avgdrivespeed-abs(speeddiff)

            tooltip = '<b>%s</b><br/>Speed: %smph<br/>Avg Speed: %smph<br/>Difference: %smph<br/>' % (display_name, int(speed), int(avgdrivespeed), int(speeddiff))

            data.append({'y': speed, 'name': tooltip, 'color': '#%s' % colorspeed(speed, 90.0, True)})
            average_speed.append({'y': avgdrivespeed, 'name': tooltip, 'color': '#%s' % colorspeed(avgdrivespeed, 90.0, True)})
            average_data.append({'y': diff_diff, 'name': tooltip, 'color': '#%s' % avgcolor})
            average_data_diff.append({'y': abs(speeddiff), 'name': tooltip, 'color': '#%s' % diffcolor})
            speed_diffs.append({'y': speeddiff, 'name': tooltip, 'color': '#%s' % avgcolor})

        series = {
            'categories': simplejson.dumps(categories),
            'data': simplejson.dumps([
                {'name': 'Speed', 'data': data, 'stack': 'speed', 'visible': False},
                {'name': 'Average Speed', 'data': average_speed, 'stack': 'avg', 'visible': False},
                {'name': 'Difference from Average', 'data': speed_diffs, 'stack': 'diff', 'visible': False},
                {'name': 'Average Difference', 'data': average_data_diff, 'stack': 'stackdiff', 'visible': True},
                {'name': 'Average Difference', 'data': average_data, 'stack': 'stackdiff', 'visible': True},
            ],),
            'average_data': simplejson.dumps(average_data),
            'drive': drive,
        }

        return series

    drive_graphs = []
    for drive in db.query('select * from drives order by date(startdate) desc limit 10'):
        drive_graphs.append(drive_graph_data(drive))

    fastest_commutes = []
    slowest_commutes = []
    for drivetype in commutes:
        for drive in db.query('select * from drives where type="%s" order by avgspeed desc limit 10' % drivetype):
            fastest_commutes.append(drive_graph_data(drive))
        for drive in db.query('select * from drives where type="%s" order by avgspeed asc limit 10' % drivetype):
            slowest_commutes.append(drive_graph_data(drive))


    def commute_split_graph(drivetype, drivebucket):
        avgspeeds = []
        avglengths = []
        avgtimes = []
        buckets = sorted(drivetable.distinct(drivebucket, type=drivetype))
        for bucket in buckets:
            bucket = bucket[drivebucket]
            bucketdrives = list(db.query('select id, distance, avgspeed from drives where type="%s" and %s="%s"' % (drivetype, drivebucket, bucket)))
            drivecount = len(bucketdrives)

            tooltip = '<b>%s</b><br/>%s Drives' % (bucket, drivecount)
            avglength = round(np.mean([x['distance'] for x in bucketdrives]), 1)
            avgspeed = round(np.mean([x['avgspeed'] for x in bucketdrives]), 1)

            avglengths.append({'y': avglength, 'name': tooltip})
            avgspeeds.append({'y': avgspeed, 'name': tooltip})
            avgtimes.append({'y': round((avglength/avgspeed)*60, 1), 'name': tooltip})

        series = {
            'categories': simplejson.dumps([b[drivebucket] for b in buckets]),
            'data': simplejson.dumps([
                {'name': 'Average Time', 'data': avgtimes, 'stack': 'time', 'visible': True},
                {'name': 'Average Length', 'data': avglengths, 'stack': 'length', 'visible': False},
                {'name': 'Average Speed', 'data': avgspeeds, 'stack': 'speed', 'visible': False},
            ],),
            'title': '%s Commute by %s' % (drivetype.title(), drivebucket.title()),
            'target': '%s-%s' % (drivebucket, drivetype),
        }

        return series

    bucket_graphs = [
        commute_split_graph('morning', 'weekbucket'),
        commute_split_graph('evening', 'weekbucket'),
        commute_split_graph('morning', 'monthbucket'),
        commute_split_graph('evening', 'monthbucket'),
        commute_split_graph('morning', 'weekdaybucket'),
        commute_split_graph('evening', 'weekdaybucket'),
    ]

    t = loader.get_template('commutes.html')
    c = Context({
        'morning_data': simplejson.dumps(morning_data),
        'evening_data': simplejson.dumps(evening_data),
        'depart_time_data': simplejson.dumps(depart_time_data),
        'drive_graphs': drive_graphs,
        'fastest_commutes': fastest_commutes,
        'slowest_commutes': slowest_commutes,
        'bucket_graphs': bucket_graphs,
    })
    with open('report.html', 'w') as f:
        f.write(t.render(c))
    commands.getoutput('/usr/bin/google-chrome "file:///home/mach5/Documents/waze/report.html"')