示例#1
0
def save_timecourses(session_ids,
                     factin_filename='results/melki_factin_timecourses.dat',
                     pi_filename='results/melki_pi_timecourses.dat'):
    dbs = database.DBSession()
    sessions = [dbs.query(database.Session).get(sid) for sid in session_ids]
    cooperativities = [s.parameters['release_cooperativity'] for s in sessions]
    cooperativities, sessions = zip(*sorted(zip(cooperativities, sessions)))

    times = None
    factin_values = []
    pi_values = []
    for session in sessions:
        run = session.experiments[0].runs[0]

        times, fvals, ferrors = run.analyses['factin']
        times, pvals, perrors = run.analyses['Pi']

        factin_values.append(fvals)
        pi_values.append(pvals)

    factin_rows = zip(*([times] + factin_values))
    pi_rows = zip(*([times] + pi_values))

    _write_results(factin_filename, factin_rows, 'Time (s)', 'F-actin (uM)',
                   'Release Cooperativitiy', cooperativities)

    _write_results(pi_filename, pi_rows, 'Time (s)', '[Pi] (uM)',
                   'Release Cooperativitiy', cooperativities)
def main(session_filename, objective_name, polling_period, plot):
    db_session = database.DBSession()
    with job_control.process('controller', db_session) as process:
        try:
            session, par_spec = factories.controllers.load_complete_session(
                db_session, session_filename)

            par_name, par_guess = _par_from_spec(par_spec)

            population = fitting_controller.SimplePopulation(
                dbs=db_session,
                session=session,
                parameter_name=par_name,
                parameter_guess=par_guess,
                objective_name=objective_name,
                process=process,
                plot=plot)

            c = fitting_controller.SimpleFitController(
                dbs=db_session,
                session=session,
                population=population,
                polling_period=polling_period,
                process=process)
            c.run()
        except:
            logger.exception('Exception in controller main.')
            raise
示例#3
0
def vectorial_save(session_id,
                   rate_filename='results/melki_vectorial_rate.dat'):
    dbs = database.DBSession()
    session = dbs.query(database.Session).get(session_id)

    sample_size = (session.parameters['number_of_simulations'] *
                   session.parameters['number_of_filaments'])
    fractional_error = 1 / numpy.sqrt(sample_size)

    best_run = None
    best_fit = None
    for run in session.experiments[0].runs:
        run_fit = run.get_objective('pi_fit')
        if run_fit is None:
            continue
        if not best_run or best_fit > run.get_objective('pi_fit'):
            best_run = run
            best_fit = run.get_objective('pi_fit')

    best_rate = best_run.parameters['release_rate']
    statistical_error = best_rate * fractional_error
    row = ('Inf', best_rate, statistical_error,
           best_run.get_objective('halftime'),
           best_run.get_objective('halftime_error'))

    _small_writer(rate_filename, [row], [
        'rho', 'release_rate', 'naive statistical error', 'halftime',
        'halftime_error'
    ])
示例#4
0
def plot_nearest_fit_timecourse(session_id, alpha=0.01):
    rates, chi2s = get_xy(session_id)
    rate_pack, k_pack, c_pack = fit_xy(rates, chi2s, alpha)

    import bisect
    xi = bisect.bisect(rates, rate_pack[0])
    nearest_rate = rates[xi]

    dbs = database.DBSession()
    session = dbs.query(database.Session).get(session_id)
    runs = session.experiments[0].runs
    nearest_run = None
    for run in runs:
        if run.parameters['release_rate'] == nearest_rate:
            nearest_run = run
            break

    t, fact, p, fe, pe = _get_timecourses(nearest_run)

    f_data = data.load_data(
        'experimental_data/melki_fievez_carlier_1996/factin_concentration.dat')
    p_data = data.load_data(
        'experimental_data/melki_fievez_carlier_1996/phosphate_concentration.dat'
    )
    import pylab
    f = pylab.figure()
    a = f.add_subplot(1, 1, 1)
    a.plot(f_data[0], f_data[1], 'k-', label='[F] Data')
    a.plot(p_data[0], p_data[1], 'k--', label='[Pi] Data')

    a.plot(t, fact, 'b-', label='[F] Sim')
    a.plot(t, p, 'b--', label='[Pi] Sim')

    a.set_ylim([0, 35])
示例#5
0
def session_fits(session_id,
                 fit_length=50,
                 output_filename='results/check_fit.dat',
                 write=True):
    dbs = database.DBSession()
    session = dbs.query(database.Session).get(session_id)
    runs = session.experiments[0].runs

    data = []
    for run in runs:
        parameter = run.parameters['release_rate']
        fitness = run.get_objective('pi_fit')
        if fitness is not None:
            data.append((parameter, fitness))

    fit_sorted_data = sorted(data, key=operator.itemgetter(1))
    px, py = zip(*fit_sorted_data[:fit_length])
    coeffs, R, n, svs, rcond = scipy.polyfit(px, py, 2, full=True)

    x, y = zip(*sorted(data))

    peak = float(-coeffs[1] / (2 * coeffs[0]))
    fit = float(R / fit_length)

    if write:
        p = scipy.polyval(coeffs, x)
        header = '# Parabola peak at %s\n# Parabola R^2/n = %s\n'
        rows = zip(x, y, p)
        _small_writer(output_filename,
                      rows, ['release_rate', 'pi_fit', 'parabola'],
                      header=header % (peak, fit))

    return session.parameters.get('release_cooperativity'), peak, fit
示例#6
0
def save_qof(session_ids,
             alpha=0.01,
             cooperative_filename='results/depoly_cooperative_qof.dat',
             vectorial_filename='results/depoly_vectorial_qof.dat'):
    dbs = database.DBSession()
    coop_results = []
    vec_results = None
    for sid in session_ids:
        session = dbs.query(database.Session).get(sid)

        chi2, chi2_min, chi2_max, chi2_pct = _get_qof(session, alpha)
        #        chi2, chi2_min, chi2_max, chi2_pct = _get_dumb_qof(session, alpha)

        cooperativity = session.parameters.get('release_cooperativity')
        if cooperativity is not None:
            coop_results.append(
                [cooperativity, chi2, chi2_min, chi2_max, chi2_pct])
        else:
            vec_results = [chi2, chi2_min, chi2_max, chi2_pct]

    if coop_results:
        coop_results.sort()
        _small_writer(
            cooperative_filename,
            coop_results,
            ['Cooperativity', 'Chi^2', 'Min CI', 'Max CI', '% Error'],
            header="# Cooperative quality of fit for Jegou 2011\n")

    if vec_results:
        _small_writer(vectorial_filename, [vec_results],
                      ['Chi^2', 'Min CI', 'Max CI', '% Error'],
                      header="# Vectorial quality of fit for Jegou 2011\n")
示例#7
0
def fnc_plot(session_id,
             x_range=0.1,
             alpha=0.01,
             num_parabola_points=500,
             interactive=True,
             output_filename=None):
    dbs = database.DBSession()
    session = dbs.query(database.Session).get(session_id)

    x, y = get_xy(session_id,
                  target='f_fit',
                  parameter='filament_tip_concentration')

    #    min_x = x[0]
    #    max_x = x[-1]
    #    filtered_x = x
    #    filtered_y = y

    imin = numpy.argmin(y)
    xmin = x[imin]
    min_x = (1 - x_range) * xmin
    max_x = (1 + x_range) * xmin
    indices = numpy.intersect1d(numpy.argwhere(min_x < x),
                                numpy.argwhere(max_x > x))
    filtered_x = x.take(indices)
    filtered_y = y.take(indices)

    rate_pack, k_pack, c_pack = fit_xy(filtered_x, filtered_y, alpha)

    def f(x, r, k, c):
        return k * (x - r)**2 + c

    par_x_vals = numpy.linspace(min_x, max_x, num_parabola_points)
    par_y_vals = f(par_x_vals, rate_pack[0], k_pack[0], c_pack[0])

    from . import plot_util

    f = plot_util.figure(interactive=interactive)
    top_axes = f.add_subplot(2, 1, 1)
    top_axes.plot(x, y, 'r.')
    top_axes.set_xlabel(r'Filament Number Concentration [$\mu$M]')
    top_axes.set_ylabel('Chi^2 Fit to Melki Data')
    top_axes.axvline(rate_pack[0], 0, 1, color='g', linestyle=':')
    #    top_axes.set_yscale('log')
    top_axes.xaxis.major.formatter.set_powerlimits((0, 0))
    #    top_axes.set_xlim(0, x[-1])

    bot_axes = f.add_subplot(2, 1, 2)
    bot_axes.plot(filtered_x, filtered_y, 'ro')
    bot_axes.plot(par_x_vals, par_y_vals, 'b-')
    bot_axes.set_xlabel(r'Filament Number Concentration [$\mu$M]')
    bot_axes.set_ylabel('Chi^2 Fit to Melki Data')
    bot_axes.axvline(rate_pack[0], 0, 1, color='g', linestyle='-')
    bot_axes.axvline(rate_pack[1], 0, 1, color='g', linestyle=':')
    bot_axes.axvline(rate_pack[2], 0, 1, color='g', linestyle=':')
    bot_axes.xaxis.major.formatter.set_powerlimits((0, 0))
    bot_axes.set_xlim(min_x, max_x)

    top_axes.set_title(r'FNC = %.3e $\mu$M +/- %.3f%%' %
                       (rate_pack[0], rate_pack[-1]))
示例#8
0
def save_timecourse(session_id,
                    experiment_index=0,
                    run_index=0,
                    timecourse_filename='results/copoly_timecourse.dat'):
    dbs = database.DBSession()

    session = dbs.query(database.Session).get(session_id)
    run = session.experiments[experiment_index].runs[run_index]

    ftc = run.all_parameters['filament_tip_concentration']
    seed_concentration = run.all_parameters['seed_concentration']

    #    length_data = run.analyses['length']
    #    # convert to  [factin]
    #    factin_data = measurements.add_number(measurements.scale(length_data, ftc),
    #            -seed_concentration)
    factin_data = run.analyses['factin']

    pi_data = run.analyses['Pi']

    # File output columns are "time [factin] (error) [pi] (error)"
    combined_data = _combine_timecourse_data(factin_data, pi_data)

    _write_results(timecourse_filename, combined_data, 'Time (s)',
                   'Concentration (uM)', 'Data',
                   ['[F-actin]', '[F-actin] error', '[Pi]', '[Pi] error'])
示例#9
0
def save_lagtimes(
        session_ids,
        cooperative_output_filename='results/fnc_cooperative_lagtimes.dat',
        vectorial_output_filename='results/fnc_vectorial_lagtimes.dat'):
    dbs = database.DBSession()

    fncs = None
    vectorial_lagtimes = None
    all_lagtimes = []
    for sid in session_ids:
        session = dbs.query(database.Session).get(sid)
        cooperativity, fncs, lagtimes = get_lagtimes(session)
        s_lagtimes = scale_lagtimes(lagtimes)
        if cooperativity is not None:
            all_lagtimes.append((cooperativity, s_lagtimes))
        else:
            vectorial_lagtimes = s_lagtimes

    if all_lagtimes:
        all_lagtimes.sort()
        cooperativities, cooperative_lagtimes = zip(*all_lagtimes)
        #        cooperative_lagtimes = numpy.array(cooperative_lagtimes)

        rhos = ['%.0e' % c for c in cooperativities]
        rows = zip(fncs, *cooperative_lagtimes)

        _write_results(cooperative_output_filename, rows, 'FNC', 'Lagtime',
                       'Release Cooperativity', rhos)

    if vectorial_lagtimes is not None:
        _small_writer(vectorial_output_filename,
                      zip(fncs, vectorial_lagtimes), ['fnc', 'lagtime'],
                      header='# Lagtime for vectorial model.\n')
示例#10
0
def _plot_nf_timecourse(session_id, axes=None, alpha=None, color=None):
    rates, chi2s = get_xy(session_id)
    rate_pack, k_pack, c_pack = fit_xy(rates, chi2s, alpha)

    import bisect
    xi = bisect.bisect(rates, rate_pack[0])
    nearest_rate = rates[xi]

    dbs = database.DBSession()
    session = dbs.query(database.Session).get(session_id)
    runs = session.experiments[0].runs
    nearest_run = None
    for run in runs:
        if run.parameters['release_rate'] == nearest_rate:
            nearest_run = run
            break

    t, f, p, fe, pe = _get_timecourses(nearest_run)
    cooperativity = session.parameters.get('release_cooperativity', None)
    if cooperativity is not None:
        label = r'$\rho_d$ = %.2e' % cooperativity
    else:
        label = 'Vectorial'

    axes.plot(t, f, color + '-', label=label)
    axes.plot(t, p, color + '--')
示例#11
0
def _extract_fnc_fits(session_id):
    dbs = database.DBSession()
    session = dbs.query(database.Session).get(session_id)

    e = session.experiments[0]

    fncs = _get_fnc_mesh(e)

    obj_bind = e.objectives['factin_fit']

    fits = []
    for fnc in fncs:
        obj_q = dbs.query(database.Objective).filter_by(bind=obj_bind)
        runs_q = obj_q.join(database.Run)
        par_q = runs_q.join(database.RunParameter).filter_by(
            name='filament_tip_concentration').filter(
                database.RunParameter.value.like(fnc))
        best_fit = None
        for objective in par_q:
            this_fit = objective.value
            if best_fit is None or this_fit < best_fit:
                best_fit = this_fit

        fits.append(best_fit)

    return fncs, fits
示例#12
0
def save_qof(numerator_session_ids,
             denominator_session_ids,
             alpha=0.01,
             cooperative_output_filename='results/fnc_cooperative_qof.dat',
             vectorial_output_filename='results/fnc_vectorial_qof.dat'):
    TARGET = 620.69 / 166.7  # about 3.72
    dbs = database.DBSession()

    n_rows = []
    for nsid in numerator_session_ids:
        session = dbs.query(database.Session).get(nsid)
        n_rows.append(_get_single_lagtime(session))

    d_rows = []
    for dsid in denominator_session_ids:
        session = dbs.query(database.Session).get(dsid)
        d_rows.append(_get_single_lagtime(session))

    n_rows.sort()
    d_rows.sort()

    coop_results = []
    vec_results = None
    for nrow, drow in zip(n_rows, d_rows):
        nrho, nval, nerr, nnum = nrow
        drho, dval, derr, dnum = drow
        if nrho != drho:
            raise RuntimeError("Numerator and Denominator FNCs don't match.")
#        if nnum != dnum:
#            raise RuntimeError("Numerator and Denominator simulation counts don't match.")

        qof = (nval / dval - TARGET)**2

        fit_std_error = numpy.sqrt((nerr / dval)**2 +
                                   (nval * derr / dval**2)**2)
        t = scipy.stats.t.ppf(1 - alpha / 2, min(nnum, dnum) - 1)
        ci_size = t * fit_std_error

        if nrho is not None:
            coop_results.append(
                (nrho, qof, qof - ci_size, qof + ci_size, ci_size / qof * 100))
        else:
            vec_results = (qof, qof - ci_size, qof + ci_size,
                           ci_size / qof * 100)

    if coop_results:
        coop_results.sort()
        _small_writer(
            cooperative_output_filename,
            coop_results,
            ['Cooperativity', 'Chi^2', 'Min CI', 'Max CI', '% Error'],
            header="# Cooperative quality of fit for Carlier 86\n")

    if vec_results:
        _small_writer(vectorial_output_filename, [vec_results],
                      ['Chi^2', 'Min CI', 'Max CI', '% Error'],
                      header="# Vectorial quality of fit for Carlier 86\n")
示例#13
0
def save_halftimes(session_id, halftime_filename='results/fnc_halftimes.dat'):
    dbs = database.DBSession()
    session = dbs.query(database.Session).get(session_id)

    fnc_mesh, cooperativities, halftime_results = _get_halftimes(
        session, concentration_name='filament_tip_concentration')

    _write_results(halftime_filename, _create_rows(fnc_mesh, halftime_results),
                   'FNC', 'Halftime', 'Release Cooperativity', cooperativities)
def get_cc_d(session_id):
    dbs = database.DBSession()
    session = dbs.query(database.Session).get(session_id)

    run = session.experiments[0].runs[0]
    cc = run.get_objective('final_ATP_concentration')
    D = run.get_objective('diffusion_coefficient')

    cooperativity = run.all_parameters.get(parameter)

    return cooperativity, cc, D
示例#15
0
def main():
    db_session = database.DBSession()
    with job_control.process('test', db_session) as process:
        logger.debug('Test debug message.')
        logger.info('Test info message.')
        logger.warn('Test warn message.')
        logger.error('Test error message.')
        logger.critical('Test critical message.')
        try:
            foo_raises()
        except:
            logger.exception('Apparently I can catch exceptions too!')
示例#16
0
def ht_v_fil(session_id, output_filename='results/ht_v_numfil.dat'):
    dbs = database.DBSession()
    session = dbs.query(database.Session).get(session_id)
    rows = []
    for run in session.experiments[0].runs:
        numfils = run.all_parameters['number_of_filaments']
        halftime = run.get_objective('halftime')
        row = numfils, halftime
        rows.append(row)

    _small_writer(output_filename, sorted(rows),
                  ['release rate', 'pi halftime'])
示例#17
0
def single(session_id,
           objective_name='halftime',
           output_filename='results/fnc_halftimes.dat'):
    dbs = database.DBSession()
    session = dbs.query(database.Session).get(session_id)

    results = _get_objective_v_concentration(
        session,
        concentration_name='filament_tip_concentration',
        objective_name=objective_name)

    _small_writer(output_filename, results, ['fnc', 'halftime'])
示例#18
0
def main(idle_timeout, retry_delay):
    db_session = database.DBSession()
    with job_control.process('worker', db_session) as process:
        stop_time = time.time() + idle_timeout
        while time.time() < stop_time:
            job = job_control.get_job(process.id, db_session)
            if job:
                run_support.run_job(job, db_session)
                job.complete = True
                stop_time = time.time() + idle_timeout
            else:
                time.sleep(retry_delay)
示例#19
0
def get_xy(session_id, target='pi_fit', parameter='release_rate'):
    dbs = database.DBSession()
    session = dbs.query(database.Session).get(session_id)
    runs = session.experiments[0].runs

    pairs = sorted([(r.get_objective(target), r.parameters[parameter])
                    for r in runs if r.get_objective(target) is not None],
                   key=operator.itemgetter(1))

    y_values, x_values = zip(*pairs)
    x_values = numpy.array(x_values)
    y_values = numpy.array(y_values)

    return x_values, y_values
示例#20
0
def extract_fits(session_ids,
                 output_filename='results/fit_rates.dat',
                 fit_length=50):
    dbs = database.DBSession()

    rows = []
    for session_id in session_ids:
        rows.append(
            session_fits(session_id, fit_length=fit_length, write=False))

    rows.sort()

    _small_writer(output_filename, rows,
                  ['release_cooperativity', 'release_rate', 'parabola R2'])
示例#21
0
def _save_vectorial(session_id,
                    halftime_name='halftime',
                    fraction_name=None,
                    output_filename=None):
    dbs = database.DBSession()
    session = dbs.query(database.Session).get(session_id)

    fractions, halftimes = _get_halftimes(session,
                                          halftime_name=halftime_name,
                                          fraction_name=fraction_name)

    rows = zip(fractions, halftimes)

    _small_writer(output_filename, rows, [fraction_name, 'pi halftime'])
示例#22
0
def save_mean(sid, filename):
    dbs = database.DBSession()
    session = dbs.query(database.Session).get(sid)
    e = session.experiments[0]

    polymerization_duration = e.all_parameters['polymerization_duration']
    simulation_duration = e.all_parameters['simulation_duration']
    sample_period = e.all_parameters['sample_period']
    times = numpy.arange(
        0, simulation_duration - polymerization_duration +
        float(sample_period) / 2, sample_period)
    values = _get_timecourse(e.runs[0], times, polymerization_duration)

    rows = zip(times, values)
    _small_writer(filename, rows, ['times', 'mean_filament_length'])
def save_vs_parameter(session_id,
                      output_filename='results/cc_d_tip.dat',
                      parameter='barbed_tip_release_rate'):
    dbs = database.DBSession()
    session = dbs.query(database.Session).get(session_id)

    results = []
    for run in session.experiments[0].runs:
        results.append(get_cc_d_run(run, parameter=parameter))

    results.sort()

    _small_writer(output_filename, results, [
        parameter, 'Critical Concentration (uM)',
        'Diffusion Coefficient (mon/s^2)'
    ])
示例#24
0
def console_main():
    # Make plotting interactive
    pylab.ion()

    # Prepare and drop into iPython shell.
    namespace = {
        'database': database,
        'db_session': database.DBSession(),
        'numpy': numpy,
        'pylab': pylab,
        'job_control': job_control,
        'visualization': visualization
    }

    shell = IPShellEmbed(argv=[], banner=banner)
    shell(local_ns=namespace)
示例#25
0
def save_fits(session_id,
              output_filename='results/sample_rates.dat',
              objective_name='pi_fit',
              parameter_name='release_rate'):
    dbs = database.DBSession()
    session = dbs.query(database.Session).get(session_id)

    rows = []
    for run in session.experiments[0].runs:
        rate = run.parameters[parameter_name]
        fit = run.get_objective(objective_name)
        halftime = run.get_objective('halftime')
        halftime_error = run.get_objective('halftime_error')
        if fit is not None:
            rows.append((rate, fit, halftime, halftime_error))
    _small_writer(
        output_filename, sorted(rows),
        [parameter_name, objective_name, 'pi halftime', 'pi halftime error'])
示例#26
0
def main(filter_by_time=False,
         start_time=None,
         stop_time=None,
         min_level=None,
         level_name=None,
         process_type=None,
         process_id=None,
         follow=False,
         polling_period=None,
         use_color=True):

    disp = display.LogDisplayer(use_color=use_color)
    # NOTE We only need a read-only session.
    # I don't know whether SQLA supports that.
    if filter_by_time:
        start_time = dateutil.parser.parse(start_time)
        if stop_time:
            stop_time = dateutil.parser.parse(stop_time)
            if start_time > stop_time:
                start_time = None
    else:
        start_time = None
        stop_time = None

    db_session = database.DBSession()

    with db_session.transaction:
        last_id = disp.print_all(
            make_query(0, start_time, stop_time, min_level, level_name,
                       process_type, process_id, db_session))

    if follow:
        while True:
            if stop_time and datetime.datetime.now() > stop_time:
                break

            time.sleep(polling_period)
            with db_session.transaction:
                this_id = disp.print_all(
                    make_query(last_id, start_time, stop_time, min_level,
                               level_name, process_type, process_id,
                               db_session))
            if this_id:
                last_id = this_id
示例#27
0
def _save_halftimes(session_ids, fraction_name=None, output_filename=None):
    dbs = database.DBSession()

    fractions = None
    cooperativities = []
    halftimes = []
    for session_id in session_ids:
        session = dbs.query(database.Session).get(session_id)
        fractions, session_halftimes = _get_halftimes(
            session, fraction_name=fraction_name)

        cooperativities.append(session.parameters['release_cooperativity'])
        halftimes.append(session_halftimes)

    cooperativities, halftimes = zip(*sorted(zip(cooperativities, halftimes)))

    rows = zip(*([fractions] + list(halftimes)))

    _write_results(output_filename, rows, fraction_name, 'Halftime',
                   'Release Cooperativity', cooperativities)
示例#28
0
def _get_best_tc(session_id, alpha):
    rates, chi2s = get_xy(session_id)
    rate_pack, k_pack, c_pack = fit_xy(rates, chi2s, alpha)

    import bisect
    xi = bisect.bisect(rates, rate_pack[0])
    nearest_rate = rates[xi]

    dbs = database.DBSession()
    session = dbs.query(database.Session).get(session_id)
    runs = session.experiments[0].runs
    nearest_run = None
    for run in runs:
        if run.parameters['release_rate'] == nearest_rate:
            nearest_run = run
            break

    t, f, p, fe, pe = _get_timecourses(nearest_run)
    cooperativity = session.parameters.get('release_cooperativity', None)

    return cooperativity, t, f, p
示例#29
0
def save_timecourses(session_id,
                     output_filename='results/carlier_86_timecourses.dat'):
    dbs = database.DBSession()
    session = dbs.query(database.Session).get(session_id)

    run = session.experiments[0].runs[0]

    times, atp_vals, atp_errors = run.analyses['F_ATP']
    times, adppi_vals, adppi_errors = run.analyses['F_ADPPi']
    times, adp_vals, adp_errors = run.analyses['F_ADP']

    times, pi_vals, pi_errors = run.analyses['Pi']

    times, length_vals, length_errors = run.analyses['length']

    rows = zip(times, length_vals, atp_vals, adppi_vals, adp_vals, pi_vals)

    _small_writer(output_filename, rows, [
        'Time (s)', 'length', 'F-ATP-actin', 'F-ADPPi-actin', 'F-ADP-actin',
        '[Pi] uM'
    ])
示例#30
0
def save(session_id,
         output_filename='results/depolymerization_timecourses.dat'):
    dbs = database.DBSession()
    session = dbs.query(database.Session).get(session_id)
    e = session.experiments[0]

    polymerization_duration = e.all_parameters['polymerization_duration']
    simulation_duration = e.all_parameters['simulation_duration']
    sample_period = e.all_parameters['sample_period']

    times = numpy.arange(
        0, simulation_duration - polymerization_duration +
        float(sample_period) / 2, sample_period)
    values = []
    for run in session.experiments[0].runs:
        values.append(_get_timecourse(run, times, polymerization_duration))

    tv = numpy.transpose(values)
    results = numpy.vstack((times, values))
    results = numpy.transpose(results)

    _small_writer(output_filename, results, ['times', 'filament_length'])