Пример #1
0
def main():
	if len( sys.argv ) != 2:
		print >> sys.stderr, "Usage: {name} directory-or-file".format(
			name = sys.argv[0],
		)
		exit(1)
	
	file_or_dir = sys.argv[1]
	if not os.path.exists( file_or_dir ):
		print >> sys.stderr, "No such file or directory: {f!r}".format(
			f = file_or_dir,
		)
		exit(1)
	
	( files, base_dir, ) = get_files( file_or_dir )
	
	with chdir( base_dir ):
		ret = measure_complexity_for_files( files )
		for f in ret['files']:
			sum_complexity = ret['files'][f]
			print "FILE: {f}\t{c}".format(
				f = os.path.relpath( f, '.' ),
				c = sum_complexity,
			)
		
		print "TOTAL_COMPLEXITY: {c}".format( c = ret['total_complexity'] )
		print "NUM_FILES: {n}".format( n = ret['num_files'] )
		print "AVG_COMPLEXITY_PER_FILE: {c:.3f}".format( c = ret['avg_complexity_per_file'] )
Пример #2
0
 def test_os_chdir_is_called_with_orig_cwd_in_exit_even_if_exception_occurs(
         self, mock_chdir):
     try:
         with chdir(self.dst_dir):
             mock_chdir.reset_mock()
             raise RuntimeError
     except RuntimeError:
         mock_chdir.assert_called_once_with(self.orig_cwd)
Пример #3
0
def get_files( file_or_dir ):
	files = []
	
	if os.path.isdir( file_or_dir ):
		base_dir = file_or_dir
		with chdir( base_dir ):
			for root, dirnames, filenames in os.walk( '.' ):
				for filename in fnmatch.filter( filenames, '*.py' ):
					files.append( os.path.join( root, filename ))
	
	elif os.path.isfile( file_or_dir ):
		base_dir = os.path.dirname( file_or_dir )
		files.append( os.path.relpath( file_or_dir, base_dir ))
	
	return ( files, base_dir, )
from datetime import date
import logging

import init_utils as iu
import import_utils as util
from db.models import FieldInformation
from django.db import transaction

import os
import os.path as op
_mydir = op.abspath(op.dirname(__file__))
_djangodir = op.normpath(op.join(_mydir, '../django'))
import sys
sys.path.insert(0, _djangodir)
import chdir as cd
with cd.chdir(_djangodir):
    os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'hmslincs_server.settings')
    import django.db.models as models
    import django.db.models.fields as fields
del _mydir, _djangodir

__version__ = "$Revision: 24d02504e664 $"
# $Source$

# ---------------------------------------------------------------------------

import setparams as _sg
_params = dict(
    VERBOSE = False,
    APPNAME = 'db',
)
Пример #5
0
DEFAULT_ENCODING = 'utf8'

# ---------------------------------------------------------------------------

#------------------------------------------------------------------------------
# ugly kluge: to be eliminated once this module lives under some
# django/app/management directory

_mydir = op.abspath(op.dirname(__file__))
_djangodir = op.normpath(op.join(_mydir, '../django'))
import sys
sys.path.insert(0, _djangodir)
sys.path.insert(0, _mydir)

import chdir as cd
with cd.chdir(_djangodir):
    os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'hmslincs_server.settings')
    import django.db.models as models
    import django.db.models.fields as fields
del _mydir, _djangodir
mo = models
fl = fields
#------------------------------------------------------------------------------

import xls2py as x2p
import sdf2py as s2p

# def check_record(fields, labels, data, row=None, exc=None):
#     if exc:
#         pfx = '' if row is None else 'row %d: ' % row
#         print '%s%s' % (pfx, str(exc))
Пример #6
0
 def test_os_chdir_is_called_with_orig_cwd_in_exit(self, mock_chdir):
     with chdir(self.dst_dir):
         mock_chdir.reset_mock()
     mock_chdir.assert_called_once_with(self.orig_cwd)
Пример #7
0
 def test_os_chdir_is_called_with_dst_dir_in_entry(self, mock_chdir):
     with chdir(self.dst_dir):
         mock_chdir.assert_called_once_with(self.dst_dir)
Пример #8
0
# -*- coding: utf-8 -*-
"""
Created on Wed Dec  2 11:15:29 2015

@author: Luke
"""
import numpy as np
from chdir import chdir

with open('input.txt', 'r') as nf:
    next(nf)
    genes = []
    ctrls = []
    expms = []
    for line in nf:
        words = line.strip('\r\n').split(' ')
        genes.append(words[0])
        ctrls.append([float(x) for x in words[1:4]])
        expms.append([float(x) for x in words[4:7]])

b = chdir(ctrls, expms)
b = np.squeeze(b)
annot_b = list(zip(genes, b.tolist()))
res = sorted(annot_b, key=lambda x: x[1] * x[1], reverse=True)
assert (res[0][0] == 'MCL1')
assert (res[1][0] == 'LIMD2')
assert ((res[1][1] - 0.379125) < 0.0001)
Пример #9
0
# Author: Qiaonan Duan
# Ma'ayan Lab, Icahn School of Medicine at Mount Sinai
# Oct. 15, 2013
#

filename = 'example.txt'

with open(filename) as nf:
	header = next(nf).rstrip('\r\n').split('\t')
	header = header[1:]
	ctrlIdx = [i for i in range(len(header)) if header[i]=='0']
	expIdx = [i for i in range(len(header)) if header[i]=='1']
	assert((len(ctrlIdx)+len(expIdx))==len(header))
	#next(nf) #skip 2nd line

	identifiers = []
	ctrlMat = []
	expMat = []
	for line in nf:
		words = line.rstrip('\r\n').split('\t')
		identifiers.append(words[0])
		values = words[1:]
		ctrlMat.append([float(values[i]) for i in ctrlIdx])
		expMat.append([float(values[i]) for i in expIdx])


chdirVector = chdir(ctrlMat,expMat,identifiers,1)
print 'chdirVector:', chdirVector

Пример #10
0
def calling_module(level=0):
    with cd.chdir(_getiwd()):
        return (ins.getmodule(ins.currentframe(level + 2)))
Пример #11
0
def __main__():
    if len(sys.argv) < 5:
        print_usage()
        exit(1)

    if not "-o" in sys.argv or not "-n" in sys.argv:
        print "You must provide both old and new jar paths"
        print_usage()
        exit(1)

    old_jar_path = sys.argv[sys.argv.index("-o") + 1]
    new_jar_path = sys.argv[sys.argv.index("-n") + 1]

    l = Logger("/".join((os.getcwd(), "pyjarcmp.log")))

    diff_app = DEFAULT_DIFF_APP
    reports_dir = DEFAULT_REPORTS_DIR
    working_dir = DEFAULT_WORKING_DIR

    if not os.path.exists(reports_dir):
        os.makedirs(reports_dir)
    if not os.path.exists(working_dir):
        os.makedirs(working_dir)

    with chdir(working_dir):
        l.log("Removing old data")
        if os.path.exists("old_jar"):
            subprocess.call("rm -rf old_jar".split(" "))
        if os.path.exists("new_jar"):
            subprocess.call("rm -rf new_jar".split(" "))

        l.log("Creating working dirs")
        os.mkdir("old_jar")
        os.mkdir("new_jar")


    with chdir("/".join((working_dir, "old_jar"))):
        l.log("Extracting old jar")
        subprocess.call("jar -xf".split(" ") + [old_jar_path])

    with chdir("/".join((working_dir, "new_jar"))):
        l.log("Extracting new jar")
        subprocess.call("jar -xf".split(" ") + [new_jar_path])


    l.log("Getting old jar files list")
    old_jar_files = []
    for subdir, dirs, files in os.walk("/".join((working_dir, "old_jar"))):
        for f in files:
            old_jar_files.append(os.path.join(subdir, f))

    l.log("Getting new jar files list")
    new_jar_files = []
    for subdir, dirs, files in os.walk("/".join((working_dir, "new_jar"))):
        for f in files:
            new_jar_files.append(os.path.join(subdir, f))

    if "-exclude_meta_inf" in sys.argv:
        l.log("Excluding META-INF")
        old_jar_files = [x for x in old_jar_files if x.find("/META-INF/") < 0]
        new_jar_files = [x for x in new_jar_files if x.find("/META-INF/") < 0]

    if "-exclude" in sys.argv:
        excludes = sys.argv[sys.argv.index("-exclude") + 1].split(",")
        for efile in excludes:
            l.log("Excluding " + efile)
            old_jar_files = [x for x in old_jar_files if x.find(efile.strip()) < 0]
            new_jar_files = [x for x in new_jar_files if x.find(efile.strip()) < 0]
            

    exit(jar_compare(old_jar_files, new_jar_files))
Пример #12
0
import numpy as np
from chdir import chdir

with open('/Users/Carol/Desktop/RNAseqbrain-out.txt','r') as nf:
    next(nf)
    genes = []
    ctrls = []
    expms = []
    for line in nf:
        words = line.split('\t')
        genes.append(words[0])
        ctrls.append([float(x) for x in words[1:8]])
        expms.append([float(x) for x in words[8:16]])
        
b = chdir(ctrls,expms)
b = np.squeeze(b)
annot_b = list(zip(genes,b.tolist()))
res = sorted(annot_b,key=lambda x:x[1]*x[1],reverse=True)
#assert(res[0][0]=='MCL1')
#assert(res[1][0]=='LIMD2')
#assert((res[1][1]-0.379125)<0.0001)

print(res)

new_file = '/Users/Carol/Desktop/gtex_analysis/brain-2050-chdir.txt'
f = open(new_file, 'w')
#f.write(res)
f.write(str(res))


Пример #13
0
def main():
	if len( sys.argv ) != 2:
		print >> sys.stderr, "Usage: {name} directory".format(
			name = sys.argv[0],
		)
		exit(1)
	
	dir = sys.argv[1]
	if not os.path.exists( dir ):
		print >> sys.stderr, "No such directory: {f!r}".format(
			f = dir,
		)
		exit(1)
	
	new_dir = '/tmp/metrics-cc-git-repo'
	print "Copying Git repo {d0} to {d1!r} ...".format(
		d0 = dir,
		d1 = new_dir,
	)
	err = call(
		[ 'rm', '-rf', new_dir ],
		stdout = sys.stdout,
		stderr = sys.stderr,
	)
	
	exc = None
	try:
		err = call(
			[ 'cp', '-a', os.path.abspath( dir ), new_dir ],
			stdout = sys.stdout,
			stderr = sys.stderr,
		)
		
		branch = 'master'
		revisions = {}
		previous = 0
		
		while True:
			with chdir( new_dir ):
				info = git_checkout_previous( branch, previous )
				if not info:
					break
				
				( sha, unix_time, iso_time, ) = info
				
				( files, base_dir, ) = get_files( new_dir )
				with chdir( base_dir ):
					ret = measure_complexity_for_files( files )
					revisions[previous] = ret
					
					revisions[previous]['sha'       ] = sha
					revisions[previous]['unix_time' ] = unix_time
					revisions[previous]['iso_time'  ] = iso_time
			
			previous += 1
			
			#if previous > 50:  #FIXME REMOVEME
			#	break
		
		metrics = (
			'num_files',
			'total_complexity',
			'avg_complexity_per_file',
		)
		max = {}
		for metric in metrics:
			max[metric] = 0.0
		
		metrics_titles = {
			'num_files': "{value:d} Python files",
			'total_complexity': "{value:.2f} total Python codebase complexity",
			'avg_complexity_per_file': "{value:.2f} avg. complexity per Python file",
		}
		metrics_value_formats = {
			'num_files': "{value:d}",
			'total_complexity': "{value:.2f}",
			'avg_complexity_per_file': "{value:.2f}",
		}
		metrics_labels = {
			'num_files': "number of Python files",
			'total_complexity': "total Python codebase complexity",
			'avg_complexity_per_file': "avg. complexity per Python file",
		}
		
		for previous in reversed(sorted( revisions )):
			rev = revisions[previous]
			for metric in metrics:
				if rev[metric] > max[metric]:
					max[metric] = rev[metric]
		
		w = 900
		h = 400
		
		for metric in metrics:
			with open( os.path.basename(dir)+'-'+metric+'.svg', 'wb' ) as fh:
				print >> fh, '<?xml version="1.0" encoding="UTF-8"?>'
				print >> fh, '<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">'
				print >> fh, '<svg'
				print >> fh, '  xmlns="http://www.w3.org/2000/svg"'
				print >> fh, '  xmlns:xlink="http://www.w3.org/1999/xlink"'
				print >> fh, '  xmlns:ev="http://www.w3.org/2001/xml-events"'
				print >> fh, '  version="1.1"'
				print >> fh, '  baseProfile="full"'
				print >> fh, '  width="'+ str(w+3*2) +'"'
				print >> fh, '  height="'+ str(h+3*2) +'"'
				print >> fh, '  viewBox="-5 -5 '+ str(w+3*2+5) +' '+ str(h+3*2+5) +'"'
				print >> fh, '>'
				
				print >> fh, '<style>'
				print >> fh, '  circle.p { stroke: green; stroke-width: 2; fill: none; opacity: 0.8; }'
				print >> fh, '  circle.p:hover { stroke-width: 4; fill: yellow; fill-opacity: 0.6; opacity: 1; }'
				print >> fh, '</style>'
				
				print >> fh, '<rect x="'+ str(3) +'" y="'+ str(3) +'" width="'+ str(w) +'" height="'+ str(h) +'" fill="white" stroke="black" stroke-width="1px"/>'
				
				print >> fh, '<path '
				print >> fh, '  fill="none" stroke="blue" stroke-width="2"'
				print >> fh, '  d="M 5 '+ str(h)
				
				i = 0
				for previous in reversed(sorted( revisions )):
					rev = revisions[previous]
					x = 5 + (float(w-10) / float(len(revisions))) * float(i)
					y = h - (float(rev[metric]) / float(max[metric])) * (h-10)
					
					print >> fh, '    L '+ str(x) +' '+ str(y)
					
					i += 1
				
				print >> fh, '"/>'
				
				i = 0
				for previous in reversed(sorted( revisions )):
					rev = revisions[previous]
					x = 5 + (float(w-10) / float(len(revisions))) * float(i)
					y = h - (float(rev[metric]) / float(max[metric])) * (h-10)
					
					#title = str(rev[metric]) +' files @' + str(rev['sha'][0:6]) +' ('+ str(rev['iso_time']) +')'
					title = (metrics_titles[metric] +' @ {sha} as of {date}').format(
						value  = rev[metric],
						sha    = rev['sha'][0:6],
						date   = rev['iso_time'],
					)
					
					print >> fh, '<circle class="p" cx="'+ str(x) +'" cy="'+ str(y) +'" r="6" title="'+ str(title) +'" />'
					
					i += 1
				
				print >> fh, '</svg>'
			
			with open( os.path.basename(dir)+'-'+metric+'.html', 'wb' ) as fh:
				print >> fh, '<!DOCTYPE html>'
				print >> fh, '<html>'
				print >> fh, '	<head>'
				print >> fh, '		<title>Metrics</title>'
				print >> fh, '		<script type="text/javascript" src="https://www.google.com/jsapi"></script>'
				print >> fh, '		<script type="text/javascript">'
				print >> fh, '			google.load( "visualization", "1", { packages: ["corechart"]});'
				print >> fh, '			google.setOnLoadCallback( drawChart );'
				print >> fh, '			function drawChart()'
				print >> fh, '			{'
				print >> fh, '				var data = google.visualization.DataTable();'
				print >> fh, '				var data = new google.visualization.DataTable();'
				print >> fh, '				data.addColumn( "string", "SHA" );  // implicit domain label col.'
				print >> fh, '				data.addColumn( "number", '+repr(str( metrics_labels[metric] ))+' );  // implicit series 1 data col.'
				print >> fh, '				data.addColumn({ type:"string", role:"tooltip" });  // annotation role col.'
				print >> fh, '				data.addRows(['
				
				for previous in reversed(sorted( revisions )):
					rev = revisions[previous]
					
					title = ('{value}\n@ {sha} as of {date}').format(
						value  = rev[metric],
						sha    = rev['sha'][0:6],
						date   = rev['iso_time'],
					)
					
					print >> fh, '[ {sha!r}, {value!r}, {title!r} ],'.format(
						sha    = rev['sha'][0:6],
						value  = rev[metric],
						title  = title,
					)
				
				print >> fh, '			]);'
				print >> fh, '			var options = {'
				print >> fh, '				title: "Metrics",'
				print >> fh, '			};'
				print >> fh, '			var chart = new google.visualization.LineChart('
				print >> fh, '				document.getElementById( "chart-1" ));'
				print >> fh, '			chart.draw( data, options );'
				print >> fh, '		}'
				print >> fh, '		</script>'
				print >> fh, '	</head>'
				print >> fh, '	<body>'
				print >> fh, '		<div id="chart-1" style="width:99%; min-width:40em; height:99%; min-height:30em;"></div>'
				print >> fh, '	</body>'
				print >> fh, '</html>'
	
		with open( 'index.html', 'wb' ) as fh:
			print >> fh, '<!DOCTYPE html>'
			print >> fh, '<html>'
			print >> fh, '	<head>'
			print >> fh, '		<title>Metrics</title>'
			print >> fh, '		<script type="text/javascript" src="https://www.google.com/jsapi"></script>'
			print >> fh, '		<script type="text/javascript">'
			print >> fh, '			google.load( "visualization", "1", { packages: ["corechart"]});'
			print >> fh, '			google.setOnLoadCallback( drawChart );'
			print >> fh, '			function drawChart()'
			print >> fh, '			{'
			
			for metric in metrics:
				print >> fh, '				var data_'+metric+' = google.visualization.DataTable();'
				print >> fh, '				var data_'+metric+' = new google.visualization.DataTable();'
				print >> fh, '				data_'+metric+'.addColumn( "string", "SHA" );  // implicit domain label col.'
				print >> fh, '				data_'+metric+'.addColumn( "number", '+repr(str( metrics_labels[metric] ))+' );  // implicit series 1 data col.'
				print >> fh, '				data_'+metric+'.addColumn({ type:"string", role:"tooltip" });  // annotation role col.'
				print >> fh, '				data_'+metric+'.addRows(['
				
				for previous in reversed(sorted( revisions )):
					rev = revisions[previous]
					
					title = (metrics_value_formats[metric]+'\n@ {sha} as of {date}').format(
						value  = rev[metric],
						sha    = rev['sha'][0:6],
						date   = rev['iso_time'],
					)
					
					print >> fh, '[ {sha!r}, {value!r}, {title!r} ],'.format(
						sha    = rev['sha'][0:6],
						value  = rev[metric],
						title  = title,
					)
				
				print >> fh, '			]);'
				print >> fh, '			var options_'+metric+' = {'
				print >> fh, '				title: '+ repr(str( metrics_labels[metric] )) +','
				print >> fh, '			};'
				print >> fh, '			var chart_'+metric+' = new google.visualization.LineChart('
				print >> fh, '				document.getElementById( "chart-'+metric+'" ));'
				print >> fh, '			chart_'+metric+'.draw( data_'+metric+', options_'+metric+' );'
			
			print >> fh, '		}'
			print >> fh, '		</script>'
			print >> fh, '	</head>'
			print >> fh, '	<body>'
			for metric in metrics:
				print >> fh, '		<div id="chart-'+metric+'" style="width:99%; min-width:40em; height:20em;"></div>'
			print >> fh, '	</body>'
			print >> fh, '</html>'
	
	except (KeyboardInterrupt, Exception,) as e:
		exc = e
	
	print "Cleaning up ..."
	err = call(
		[ 'rm', '-rf', new_dir ],
		stdout = sys.stdout,
		stderr = sys.stderr,
	)
	
	if exc:
		if isinstance( exc, KeyboardInterrupt ):
			print >> sys.stderr, "Keyboard interrupt."
			exit(1)
		
		raise exc
Пример #14
0
def save_chdir(drug, drug_dataframe, gene_identifiers, gene_lookup_dict,
               exp_data_df, exp_metadata_df, control_data_df,
               control_metadata_df, output_folder):
    all_out_filenames = []
    time_vals = set(drug_dataframe['SM_Time'])
    for time in time_vals:
        time_dataframe = drug_dataframe.loc[(
            drug_dataframe["SM_Time"] == time)]
        dose_vals = set(time_dataframe['SM_Dose'])
        dose_vals = np.sort(np.array(list(dose_vals)), axis=None)
        # clustergrammer_df = pd.DataFrame()

        for dose in dose_vals:
            dose_dataframe = time_dataframe.loc[(
                time_dataframe["SM_Dose"] == dose)]
            ref_id = list(dose_dataframe['id'])
            plates = list(dose_dataframe['det_plate'])
            expMat = []
            for r in ref_id:
                expMat.append(list(exp_data_df[r]))

            control_id = []
            for p in plates:
                control_id = list(control_metadata_df.loc[(
                    control_metadata_df['det_plate'] == p)]["id"])
            ctrlMat = []
            for c in control_id:
                ctrlMat.append(list(control_data_df[c]))

            ctrlMat = np.array(ctrlMat).T
            expMat = np.array(expMat).T

            chdirVector = chdir(ctrlMat, expMat, gene_identifiers)
            chdir_genes = chdirVector[0]

            # output matrix for each drug at a specific time, specific dose
            out_filename = output_folder + "MCF10A_L1000_chdir_" + str(
                drug) + "_" + str(time) + str(
                    drug_dataframe["SM_Time_Unit"].iloc[0]) + "_" + str(
                        dose) + "_um.tsv"
            df = pd.DataFrame(index=chdir_genes)

            gene_symbols = []
            for gene_id in chdir_genes:
                gene_symbols.append(gene_lookup_dict[gene_id])

            df["gene_symbol"] = gene_symbols
            df[dose] = chdirVector[1]

            # temp_clustergrammer_df = pd.DataFrame(index=chdir_genes)
            # temp_clustergrammer_df[dose] = chdirVector[1]

            # clustergrammer_df = pd.concat([clustergrammer_df, temp_clustergrammer_df], axis=1)

            # if clustergrammer_df.empty: # first dose to add to df
            #     clustergrammer_df = pd.DataFrame(index=chdir_genes)
            #     clustergrammer_df["gene_symbol"] = gene_symbols
            #     clustergrammer_df[dose] = chdirVector[1]
            #     print "dose: ", dose, "len: ", len(chdirVector[1])
            # else:
            #     print "dose: ", dose, "len: ", len(chdirVector[1])
            #     clustergrammer_df[dose] = chdirVector[1]

            df.to_csv(out_filename, sep='\t', encoding='utf-8')
            all_out_filenames.append(out_filename)

        # output matrix for each drug at a specific time for inputting into clustergrammer
        # output_folder = "output-testing-clustergrammer/"
        # out_filename = output_folder + "MCF10A_L1000_chdir_" + str(drug) + "_" + str(time) + str(drug_dataframe["SM_Time_Unit"].iloc[0]) + ".tsv"
        # clustergrammer_df.to_csv(out_filename, sep='\t', encoding='utf-8')
        # all_out_filenames.append(out_filename)

    return all_out_filenames
Пример #15
0
def calling_module(level=0):
    with cd.chdir(_getiwd()):
        return(ins.getmodule(ins.currentframe(level+2)))
Пример #16
0
def main():
	if len( sys.argv ) != 2:
		print >> sys.stderr, "Usage: {name} directory".format(
			name = sys.argv[0],
		)
		exit(1)
	
	dir = sys.argv[1]
	if not os.path.exists( dir ):
		print >> sys.stderr, "No such directory: {f!r}".format(
			f = dir,
		)
		exit(1)
	
	new_dir = '/tmp/metrics-cc-git-repo'
	print "Copying Git repo {d0} to {d1!r} ...".format(
		d0 = dir,
		d1 = new_dir,
	)
	err = call(
		[ 'rm', '-rf', new_dir ],
		stdout = sys.stdout,
		stderr = sys.stderr,
	)
	
	exc = None
	try:
		err = call(
			[ 'cp', '-a', os.path.abspath( dir ), new_dir ],
			stdout = sys.stdout,
			stderr = sys.stderr,
		)
		
		branch = 'master'
		revisions = {}
		previous = 0
		
		while True:
			with chdir( new_dir ):
				info = git_checkout_previous( branch, previous )
				if not info:
					break
				
				( sha, unix_time, iso_time, ) = info
				
				( files, base_dir, ) = get_files( new_dir )
				with chdir( base_dir ):
					ret = measure_complexity_for_files( files )
					revisions[previous] = ret
					
					revisions[previous]['sha'       ] = sha
					revisions[previous]['unix_time' ] = unix_time
					revisions[previous]['iso_time'  ] = iso_time
			
			previous += 1
			
			#if previous > 50:  #FIXME REMOVEME
			#	break
		
		metrics = (
			'num_files',
			'total_complexity',
			'avg_complexity_per_file',
		)
		max = {}
		for metric in metrics:
			max[metric] = 0.0
		
		metrics_titles = {
			'num_files': "{value:d} Python files",
			'total_complexity': "{value:.2f} total Python codebase complexity",
			'avg_complexity_per_file': "{value:.2f} avg. complexity per Python file",
		}
		
		for previous in reversed(sorted( revisions )):
			rev = revisions[previous]
			for metric in metrics:
				if rev[metric] > max[metric]:
					max[metric] = rev[metric]
		
		w = 900
		h = 400
		
		for metric in metrics:
			with open( os.path.basename(dir)+'-'+metric+'.svg', 'wb' ) as fh:
				print >> fh, '<?xml version="1.0" encoding="UTF-8"?>'
				print >> fh, '<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">'
				print >> fh, '<svg'
				print >> fh, '  xmlns="http://www.w3.org/2000/svg"'
				print >> fh, '  xmlns:xlink="http://www.w3.org/1999/xlink"'
				print >> fh, '  xmlns:ev="http://www.w3.org/2001/xml-events"'
				print >> fh, '  version="1.1"'
				print >> fh, '  baseProfile="full"'
				print >> fh, '  width="'+ str(w+3*2) +'"'
				print >> fh, '  height="'+ str(h+3*2) +'"'
				print >> fh, '  viewBox="-5 -5 '+ str(w+3*2+5) +' '+ str(h+3*2+5) +'"'
				print >> fh, '>'
				
				print >> fh, '<style>'
				print >> fh, '  circle.p { stroke: green; stroke-width: 2; fill: none; opacity: 0.8; }'
				print >> fh, '  circle.p:hover { stroke-width: 4; fill: yellow; fill-opacity: 0.6; opacity: 1; }'
				print >> fh, '</style>'
				
				print >> fh, '<rect x="'+ str(3) +'" y="'+ str(3) +'" width="'+ str(w) +'" height="'+ str(h) +'" fill="white" stroke="black" stroke-width="1px"/>'
				
				print >> fh, '<path '
				print >> fh, '  fill="none" stroke="blue" stroke-width="2"'
				print >> fh, '  d="M 5 '+ str(h)
				
				i = 0
				for previous in reversed(sorted( revisions )):
					rev = revisions[previous]
					x = 5 + (float(w-10) / float(len(revisions))) * float(i)
					y = h - (float(rev[metric]) / float(max[metric])) * (h-10)
					
					print >> fh, '    L '+ str(x) +' '+ str(y)
					
					i += 1
				
				print >> fh, '"/>'
				
				i = 0
				for previous in reversed(sorted( revisions )):
					rev = revisions[previous]
					x = 5 + (float(w-10) / float(len(revisions))) * float(i)
					y = h - (float(rev[metric]) / float(max[metric])) * (h-10)
					
					#title = str(rev[metric]) +' files @' + str(rev['sha'][0:6]) +' ('+ str(rev['iso_time']) +')'
					title = (metrics_titles[metric] +' @ {sha} as of {date}').format(
						value  = rev[metric],
						sha    = rev['sha'][0:6],
						date   = rev['iso_time'],
					)
					
					print >> fh, '<circle class="p" cx="'+ str(x) +'" cy="'+ str(y) +'" r="6" title="'+ str(title) +'" />'
					
					i += 1
				
				print >> fh, '</svg>'
	
	except (KeyboardInterrupt, Exception,) as e:
		exc = e
	
	print "Cleaning up ..."
	err = call(
		[ 'rm', '-rf', new_dir ],
		stdout = sys.stdout,
		stderr = sys.stderr,
	)
	
	if exc:
		if isinstance( exc, KeyboardInterrupt ):
			print >> sys.stderr, "Keyboard interrupt."
			exit(1)
		
		raise exc