Exemplo n.º 1
0
def main():
    mod = PipelineModule("Filter Ophys ROIs produced from cell segmentation.")

    input_data = mod.input_data()
    data = load_all_input(input_data)
    model_id = data["model_id"]
    classifier = data["classifier"]
    object_data = data["object_data"]
    depth = data["depth"]
    structure_id = data["structure_id"]
    drivers = data["drivers"]
    reporters = data["reporters"]
    border = data["border"]
    rois = data["rois"]

    label_array = classifier.get_labels(object_data, depth, structure_id,
                                        drivers, reporters)
    rois = roi_filter.apply_labels(rois, label_array, classifier.label_names)

    rois = roi_filter.label_unions_and_duplicates(rois, OVERLAP_THRESHOLD)

    output_data = create_output_data(rois, model_id, border,
                                     object_data["eXcluded"],
                                     classifier.unexpected_features)

    mod.write_output_data(output_data)
Exemplo n.º 2
0
def main():
    parser = argparse.ArgumentParser("Generate brain observatory alignment.")
    parser.add_argument('input_json')
    parser.add_argument('--log-level', default=logging.DEBUG)
    mod = PipelineModule("Generate brain observatory alignment.", parser)

    input_data = mod.input_data()
    experiment_id = input_data.pop("ophys_experiment_id")
    sync_file = input_data.pop("sync_file")
    output_file = input_data.pop("output_file")

    aligner = ts.OphysTimeAligner(sync_file, **input_data)

    ophys_times, ophys_delta = aligner.corrected_ophys_timestamps
    stim_times, stim_delta = aligner.corrected_stim_timestamps
    eye_times, eye_delta = aligner.corrected_eye_video_timestamps
    beh_times, beh_delta = aligner.corrected_behavior_video_timestamps

    # stim array is index of ophys frame for each stim frame to match to
    # so len(stim_times)
    stim_alignment = ts.get_alignment_array(ophys_times, stim_times)

    # camera arrays are index of camera frame for each ophys frame ...
    # cam_nwb_creator depends on this so keeping it that way even though
    # it makes little sense... len(video_times)
    eye_alignment = ts.get_alignment_array(eye_times, ophys_times,
                                           int_method=np.ceil)

    behavior_alignment = ts.get_alignment_array(beh_times, ophys_times,
                                                int_method=np.ceil)

    write_output(output_file, ophys_times, stim_alignment, eye_alignment,
                 behavior_alignment, ophys_delta, stim_delta, eye_delta,
                 beh_delta)
def main():

    module = PipelineModule()

    input_data = module.input_data()
    output_data = run(input_data)
    module.write_output_data(output_data)
Exemplo n.º 4
0
def main():
    mod = PipelineModule()
    mod.parser.add_argument("--num_frames", type=int, default=None)
    mod.parser.add_argument("--threshold_factor",
                            type=float,
                            default=DEFAULT_THRESHOLD_FACTOR)

    data = mod.input_data()
    args = dict(movie_file=data['movie_file'],
                metadata_file=data['metadata_file'],
                output_directory=data['output_directory'],
                threshold_factor=data.get('threshold_factor',
                                          mod.args.threshold_factor),
                num_frames=mod.args.num_frames,
                auto=True,
                cache_input_frames=True,
                input_block_size=None,
                output_annotated_movie_block_size=None)

    if data.get('pupil_points', None):
        args['bbox_pupil'] = compute_bounding_box(data['pupil_points'])
    if data.get('corneal_reflection_points', None):
        args['bbox_cr'] = compute_bounding_box(
            data['corneal_reflection_points'])

    tracker = run_itracker(**args)

    logging.debug("finished running itracker")

    output_data = dict(pupil_file=tracker.pupil_file,
                       corneal_reflection_file=tracker.cr_file,
                       mean_frame_file=tracker.mean_frame_file)

    mod.write_output_data(output_data)
Exemplo n.º 5
0
def main():
    mod = PipelineModule()
    jin = mod.input_data()

    results = {}

    for ident, experiment in iteritems(jin):
        nwb_file = experiment['nwb_file']
        output_file = experiment['output_file']

        if experiment["session_name"] not in si.SESSION_STIMULUS_MAP.keys():
            raise Exception("Could not run analysis for unknown session: %s" %
                            experiment["session_name"])

        logging.info("Running %s analysis", experiment["session_name"])
        logging.info("NWB file %s", nwb_file)
        logging.info("Output file %s", output_file)

        results[ident] = run_session_analysis(nwb_file,
                                              output_file,
                                              save_flag=True,
                                              plot_flag=False)

    logging.info("Generating output")

    jout = {}
    for session_name, data in results.items():
        # results for this session
        res = {}
        # metric fields
        names = {}
        roi_id = None
        for metric, values in data['cell'].items():
            if metric == "roi_id":
                roi_id = values
            else:
                # convert dict to array
                vals = []
                for i in range(len(values)):
                    vals.append(values[i])  # panda syntax
                names[metric] = vals
        # make an output record for each roi_id
        if roi_id is not None:
            for i in range(len(roi_id)):
                name = roi_id[i]
                roi = {}
                for field, values in names.items():
                    roi[field] = values[i]
                res[name] = roi

        jout[session_name] = {'cell': res, 'experiment': data['experiment']}

    logging.info("Saving output")

    mod.write_output_data(jout)
def main():
    mod = PipelineModule()
    data = mod.input_data()

    h5_file = data["processed_h5"]
    altitude_phase, azimuth_phase = load_arrays(h5_file)
    del data["processed_h5"]

    output_data = get_metrics(altitude_phase, azimuth_phase, **data)

    mod.write_output_data(output_data)
def main():

    module = PipelineModule()
    input_data = module.input_data()

    output_dir = os.path.dirname(module.args.output_json)

    logging.info('reading data volume from {0}'.format(input_data['volume_path']))
    volume = sitk.ReadImage(str(input_data['volume_path']))
    volume = sitk.PermuteAxes(volume, PERMUTATION)
    volume = sitk.Flip(volume, FLIP)

    logging.info('reading colormap from {0}'.format(input_data['colormap_path']))
    colormap = pd.read_csv(input_data['colormap_path'], header=None, 
                           names=['red', 'green', 'blue'], delim_whitespace=True)
    colormap = convert_discrete_colormap(colormap.values, 'projection')

    output_data = {'output_file_paths': []}
    for rot in input_data['rotations']:
    
        rot['write_depth_sheet'] = functools.partial(write_depth_image, 
                                                     path=str(os.path.join(output_dir, rot['depth_path'])))
        output_data['output_file_paths'].append(os.path.join(output_dir, rot['depth_path']))

        if isinstance(rot['window_size'], six.string_types):
            if rot['window_size'] == 'no_pad':
                rot['window_size'] = no_pad(volume)
            elif rot['window_size'] == 'pad':
                rot['window_size'] = pad(volume)
            else:
                raise ValueError('did not understand window size option {0}'.format(rot['window_size']))
        logging.info('window_size: {0}'.format(rot['window_size']))
        
        for out_image in rot['output_images']:
            out_image['write'] = functools.partial(imsave, os.path.join(output_dir, out_image['path']))
            output_data['output_file_paths'].append(os.path.join(output_dir, out_image['path']))            

            if 'background_path' in out_image:
                out_image['background'] = load_background_image(out_image['background_path'])
            else:
                out_image['background'] = None

    run(volume, input_data['min_threshold'], input_data['max_threshold'], 
        input_data['rotations'], colormap)
    module.write_output_data(output_data)
Exemplo n.º 8
0
def main():
    mod = PipelineModule()
    data = mod.input_data()
    calibrator, cr_params, pupil_params, outfile = parse_input_data(data)

    pupil_areas = calibrator.compute_area(pupil_params)
    pupil_on_monitor_deg = calibrator.pupil_position_on_monitor_in_degrees(
        pupil_params, cr_params)
    pupil_on_monitor_cm = calibrator.pupil_position_on_monitor_in_cm(
        pupil_params, cr_params)
    missing_index = np.isnan(pupil_areas) | np.isnan(pupil_on_monitor_deg.T[0])
    pupil_areas[missing_index] = np.nan
    pupil_on_monitor_deg[missing_index,:] = np.nan
    pupil_on_monitor_cm[missing_index,:] = np.nan
    write_output(outfile, pupil_on_monitor_deg, pupil_on_monitor_cm,
                 pupil_areas)

    mod.write_output_data({"screen_mapping_file": outfile})
Exemplo n.º 9
0
def main():
    mod = PipelineModule()
    mod.parser.add_argument("--types", default=','.join(robsth.PLOT_TYPES))
    mod.parser.add_argument("--threads", default=4)

    data = mod.input_data()
    types = mod.args.types.split(',')

    for input_file in data:
        exp_input_json = input_file['input_json']
        exp_output_json = input_file['output_json']

        exp_input_data = ju.read(exp_input_json)

        nwb_file, analysis_file, output_directory = robsth.parse_input(
            exp_input_data)

        robsth.build_experiment_thumbnails(nwb_file, analysis_file,
                                           output_directory, types,
                                           mod.args.threads)
Exemplo n.º 10
0
def main():
    parser = argparse.ArgumentParser("Generate brain observatory alignment.")
    parser.add_argument("input_json", type=str, 
        help="path to input json"
    )
    parser.add_argument("output_json", type=str, nargs="?",
        help="path to which output json will be written"
    )
    parser.add_argument("--log-level", default=logging.DEBUG)
    parser.add_argument("--min-stimulus-delay", type=float, default=0.0, 
        help="reject results if monitor delay less than this value (s)"
    )
    parser.add_argument("--max-stimulus-delay", type=float, default=0.07, 
        help="reject results if monitor delay greater than this value (s)"
    )
    mod = PipelineModule("Generate brain observatory alignment.", parser)

    input_data = mod.input_data()

    writer = TimeSyncWriter(input_data.get("output_file"), mod.args.output_json)
    writer.validate_paths()

    aligner = ts.OphysTimeAligner(
        input_data.get("sync_file"), 
        scanner=input_data.get("scanner", None),
        dff_file=input_data.get("dff_file", None),
        stimulus_pkl=input_data.get("stimulus_pkl", None),
        eye_video=input_data.get("eye_video", None),
        behavior_video=input_data.get("behavior_video", None),
        long_stim_threshold=input_data.get(
            "long_stim_threshold", ts.LONG_STIM_THRESHOLD
        )
    )

    outputs = run_ophys_time_sync(
        aligner, 
        input_data.get("ophys_experiment_id"), 
        mod.args.min_stimulus_delay, 
        mod.args.max_stimulus_delay
    )
    writer.write(outputs)
Exemplo n.º 11
0
def main():
    mod = PipelineModule("Decompose ophys session into individual planes.")
    mod.parser.add_argument("-t", "--threads", type=int, default=4)

    input_data = mod.input_data()
    conversion_definitions = parse_input(input_data)

    if mod.args.threads > 1:
        pool = Pool(processes=mod.args.threads)
        output = pool.map(convert_frame, conversion_definitions)
    else:
        output = []
        for definition in conversion_definitions:
            output.append(convert_frame(definition))

    output_data = {}
    for eid, ophys_file, auxiliary_file in output:
        output_data[eid] = {
            "ophys_data": ophys_file,
            "auxiliary_data": auxiliary_file
        }

    mod.write_output_data(output_data)
Exemplo n.º 12
0
def main():
    mod = PipelineModule()
    mod.parser.add_argument("--exclude-labels",
                            nargs="*",
                            default=EXCLUDE_LABELS)

    data = mod.input_data()
    logging.debug("reading input")

    traces, masks, valid, trace_ids, movie_h5, output_h5 = parse_input(
        data, mod.args.exclude_labels)

    logging.debug("excluded masks: %s",
                  str(zip(np.where(~valid)[0], trace_ids[~valid])))
    output_dir = os.path.dirname(output_h5)
    plot_dir = os.path.join(output_dir, "demix_plots")
    if os.path.exists(plot_dir):
        shutil.rmtree(plot_dir)
    Manifest.safe_mkdir(plot_dir)

    logging.debug("reading movie")
    with h5py.File(movie_h5, 'r') as f:
        movie = f['data'].value

    # only demix non-union, non-duplicate ROIs
    valid_idxs = np.where(valid)
    demix_traces = traces[valid_idxs]
    demix_masks = masks[valid_idxs]

    logging.debug("demixing")
    demixed_traces, drop_frames = demixer.demix_time_dep_masks(
        demix_traces, movie, demix_masks)

    nt_inds = demixer.plot_negative_transients(demix_traces, demixed_traces,
                                               valid[valid_idxs], demix_masks,
                                               trace_ids[valid_idxs], plot_dir)

    logging.debug("rois with negative transients: %s",
                  str(trace_ids[valid_idxs][nt_inds]))

    nb_inds = demixer.plot_negative_baselines(demix_traces, demixed_traces,
                                              demix_masks,
                                              trace_ids[valid_idxs], plot_dir)

    # negative baseline rois (and those that overlap with them) become nans
    logging.debug("rois with negative baselines (or overlap with them): %s",
                  str(trace_ids[valid_idxs][nb_inds]))
    demixed_traces[nb_inds, :] = np.nan

    logging.info("Saving output")
    out_traces = np.zeros(traces.shape, dtype=demix_traces.dtype)
    out_traces[:] = np.nan
    out_traces[valid_idxs] = demixed_traces

    with h5py.File(output_h5, 'w') as f:
        f.create_dataset("data", data=out_traces, compression="gzip")
        roi_names = np.array([str(rn) for rn in trace_ids]).astype(np.string_)
        f.create_dataset("roi_names", data=roi_names)

    mod.write_output_data(
        dict(negative_transient_roi_ids=trace_ids[valid_idxs][nt_inds],
             negative_baseline_roi_ids=trace_ids[valid_idxs][nb_inds]))
Exemplo n.º 13
0
def main():
    module = PipelineModule()
    args = module.args

    jin = module.input_data()

    ########################################################################
    # prelude -- get processing metadata

    trace_file = jin["roi_trace_file"]
    neuropil_file = jin["neuropil_trace_file"]
    storage_dir = jin["storage_directory"]

    plot_dir = os.path.join(storage_dir, "neuropil_subtraction_plots")
    if os.path.exists(plot_dir):
        shutil.rmtree(plot_dir)

    try:
        os.makedirs(plot_dir)
    except:
        pass

    logging.info("Neuropil correcting '%s'", trace_file)

    ########################################################################
    # process data

    try:
        roi_traces = h5py.File(trace_file, "r")
    except:
        logging.error("Error: unable to open ROI trace file '%s'", trace_file)
        raise

    try:
        neuropil_traces = h5py.File(neuropil_file, "r")
    except:
        logging.error("Error: unable to open neuropil trace file '%s'",
                      neuropil_file)
        raise
    '''
    get number of traces, length, etc.
    '''
    num_traces, T = roi_traces['data'].shape
    T_orig = T
    T_cross_val = int(T / 2)
    if (T - T_cross_val > T_cross_val):
        T = T - 1

    # make sure that ROI and neuropil trace files are organized the same
    n_id = neuropil_traces["roi_names"][:].astype(str)
    r_id = roi_traces["roi_names"][:].astype(str)
    logging.info("Processing %d traces", len(n_id))
    assert len(n_id) == len(
        r_id), "Input trace files are not aligned (ROI count)"
    for i in range(len(n_id)):
        assert n_id[i] == r_id[
            i], "Input trace files are not aligned (ROI IDs)"
    '''
    initialize storage variables and analysis routine
    '''
    r_list = [None] * num_traces
    RMSE_list = [-1] * num_traces
    roi_names = n_id
    corrected = np.zeros((num_traces, T_orig))
    r_vals = [None] * num_traces

    for n in range(num_traces):
        roi = roi_traces['data'][n]
        neuropil = neuropil_traces['data'][n]

        if np.any(np.isnan(neuropil)):
            logging.warning(
                "neuropil trace for roi %d contains NaNs, skipping", n)
            continue

        if np.any(np.isnan(roi)):
            logging.warning("roi trace for roi %d contains NaNs, skipping", n)
            continue

        r = None

        logging.info("Correcting trace %d (roi %s)", n, str(n_id[n]))
        results = estimate_contamination_ratios(roi, neuropil)
        logging.info("r=%f err=%f it=%d", results["r"], results["err"],
                     results["it"])

        r = results["r"]
        fc = roi - r * neuropil
        RMSE_list[n] = results["err"]
        r_vals[n] = results["r_vals"]

        debug_plot(os.path.join(plot_dir, "initial_%04d.png" % n), roi,
                   neuropil, fc, r, results["r_vals"], results["err_vals"])

        # mean of the corrected trace must be positive
        if fc.mean() > 0:
            r_list[n] = r
            corrected[n, :] = fc
        else:
            logging.warning("fc has negative baseline, skipping this r value")

    # compute mean valid r value
    r_mean = np.array([r for r in r_list if r is not None]).mean()

    # fill in empty r values
    for n in range(num_traces):
        roi = roi_traces['data'][n]
        neuropil = neuropil_traces['data'][n]

        if r_list[n] is None:
            logging.warning("Error estimated r for trace %d. Setting to zero.",
                            n)
            r_list[n] = 0
            corrected[n, :] = roi

        # save a debug plot
        debug_plot(os.path.join(plot_dir, "final_%04d.png" % n), roi, neuropil,
                   corrected[n, :], r_list[n])

        # one last sanity check
        eps = -0.0001
        if np.mean(corrected[n, :]) < eps:
            raise Exception(
                "Trace %d baseline is still negative value after correction" %
                n)

        if r_list[n] < 0.0:
            raise Exception("Trace %d ended with negative r" % n)

    ########################################################################
    # write out processed data

    try:
        savefile = os.path.join(storage_dir, "neuropil_correction.h5")
        hf = h5py.File(savefile, 'w')
        hf.create_dataset("r", data=r_list)
        hf.create_dataset("RMSE", data=RMSE_list)
        hf.create_dataset("FC", data=corrected, compression="gzip")
        hf.create_dataset("roi_names", data=roi_names.astype(np.string_))

        for n in range(num_traces):
            r = r_vals[n]
            if r is not None:
                hf.create_dataset("r_vals/%d" % n, data=r)
        hf.close()
    except:
        logging.error("Error creating output h5 file")
        raise

    roi_traces.close()
    neuropil_traces.close()

    jout = copy.copy(jin)
    jout["neuropil_correction"] = savefile
    module.write_output_data(jout)

    logging.info("finished")
Exemplo n.º 14
0
        alignment["scale_z"] = 1.0
        alignment["skew_x"] = 0.0
        alignment["skew_y"] = 0.0
        alignment["skew_z"] = 0.0
        jout["alignment"] = alignment
    except:
        print("** Internal error **")
        raise

    return jout

    #
    # test transform -- bar.swc should match the source file
    #print("source swc: " + jin["swc_file"])
    #print tr_rot
    #morph2 = swc.read_swc(jin["swc_file"])
    #morph2.apply_affine(tr_rot)
    #morph2.save("foo.swc")
    #morph3 = swc.read_swc("foo.swc")
    #morph3.apply_affine(inv_tr_rot)
    #morph3.save("bar.swc")


if __name__ == "__main__":
    # read module input. PipelineModule object automatically parses the
    #   command line to pull out input.json and output.json file names
    module = PipelineModule()
    jin = module.input_data()  # loads input.json
    jout = main(jin)
    module.write_output_data(jout)  # writes output.json
Exemplo n.º 15
0
    feat["average_diameter"] = data["dendrite"]["average_diameter"]
    feat["total_length"] = data["dendrite"]["total_length"]
    feat["nodes_over_branches"] = data["dendrite"]["neurites_over_branches"]
    feat["overall_width"] = data["dendrite"]["width"]
    feat["number_of_nodes"] = data["dendrite"]["num_nodes"]
    feat["average_bifurcation_angle_local"] = data["dendrite"][
        "bifurcation_angle_local"]
    feat["number_of_bifurcations"] = data["dendrite"]["num_bifurcations"]
    feat["average_fragmentation"] = data["dendrite"]["mean_fragmentation"]
    feat["number_of_tips"] = data["dendrite"]["num_tips"]
    feat["average_contraction"] = data["dendrite"]["contraction"]
    feat["average_bifuraction_angle_remote"] = data["dendrite"][
        "bifurcation_angle_remote"]
    feat["number_of_branches"] = data["dendrite"]["num_branches"]
    feat["total_surface"] = data["dendrite"]["total_surface"]
    feat["max_branch_order"] = data["dendrite"]["max_branch_order"]
    feat["soma_surface"] = data["dendrite"]["soma_surface"]
    feat["overall_height"] = data["dendrite"]["height"]

    md["features"] = feat
    data["morphology_data"] = md

    return data


if __name__ == '__main__':
    module = PipelineModule()
    jin = module.input_data()
    jout = main(jin)
    module.write_output_data(jout)
Exemplo n.º 16
0
def main():
    module = PipelineModule()
    jin = module.input_data()

    infile = jin["input_nwb"]
    outfile = jin["output_nwb"]

    # a temporary nwb file must be created. this is that file's name
    tmpfile = outfile + ".tmp"

    # create temp file and make modifications to it using h5py
    shutil.copy2(infile, tmpfile)
    f = h5py.File(tmpfile, "a")
    # change dataset names in acquisition time series to match that
    #   of existing ephys NWB files
    # also rescale the contents of 'data' fields to match the scaling
    #   in original files
    acq = f["acquisition/timeseries"]
    sweep_nums = []
    for k, v in iteritems(acq):
        # parse out sweep number
        try:
            num = int(k[5:10])
        except:
            print("Error - unexpected sweep name encountered in IGOR nwb file")
            print("Sweep called: '%s'" % k)
            print("Expecting 5-digit sweep number between chars 5 and 9")
            sys.exit(1)
        swp = "Sweep_%d" % num
        # rename objects
        try:
            acq.move(k, swp)
            ts = acq[swp]
            ts.move("stimulus_description", "aibs_stimulus_description")
        except:
            print("*** Error renaming HDF5 object in %s" % swp)
            type_, value_, traceback_ = sys.exc_info()
            print(traceback.print_tb(traceback_))
            sys.exit(1)
        # rescale contents of data so conversion is 1.0
        try:
            data = ts["data"]
            scale = float(data.attrs["conversion"])
            data[...] = data.value * scale
            data.attrs["conversion"] = 1.0
        except:
            print("*** Error rescaling data in %s" % swp)
            type_, value_, traceback_ = sys.exc_info()
            print(traceback.print_tb(traceback_))
            sys.exit(1)
        # keep track of sweep numbers
        sweep_nums.append("%d" % num)

    ###################################
    #... ditto for stimulus time series
    stim = f["stimulus/presentation"]
    for k, v in iteritems(stim):
        # parse out sweep number
        try:
            num = int(k[5:10])
        except:
            print("Error - unexpected sweep name encountered in IGOR nwb file")
            print("Sweep called: '%s'" % k)
            print("Expecting 5-digit sweep number between chars 5 and 9")
            sys.exit(1)
        swp = "Sweep_%d" % num
        try:
            stim.move(k, swp)
        except:
            print("Error renaming HDF5 group from %s to %s" % (k, swp))
            sys.exit(1)
        # rescale contents of data so conversion is 1.0
        try:
            ts = stim[swp]
            data = ts["data"]
            scale = float(data.attrs["conversion"])
            data[...] = data.value * scale
            data.attrs["conversion"] = 1.0
        except:
            print("*** Error rescaling data in %s" % swp)
            type_, value_, traceback_ = sys.exc_info()
            print(traceback.print_tb(traceback_))
            sys.exit(1)

    f.close()

    ####################################################################
    # re-open file w/ nwb library and add indexing (epochs)
    nd = nwb.NWB(filename=tmpfile, modify=True)
    for num in sweep_nums:
        ts = nd.file_pointer["acquisition/timeseries/Sweep_" + num]
        # sweep epoch
        t0 = ts["starting_time"].value
        rate = float(ts["starting_time"].attrs["rate"])
        n = float(ts["num_samples"].value)
        t1 = t0 + (n - 1) * rate
        ep = nd.create_epoch("Sweep_" + num, t0, t1)
        ep.add_timeseries("stimulus", "stimulus/presentation/Sweep_" + num)
        ep.add_timeseries("response", "acquisition/timeseries/Sweep_" + num)
        ep.finalize()
        if "CurrentClampSeries" in ts.attrs["ancestry"]:
            # test pulse epoch
            t0 = ts["starting_time"].value
            t1 = t0 + PULSE_LEN
            ep = nd.create_epoch("TestPulse_" + num, t0, t1)
            ep.add_timeseries("stimulus", "stimulus/presentation/Sweep_" + num)
            ep.add_timeseries("response",
                              "acquisition/timeseries/Sweep_" + num)
            ep.finalize()
            # experiment epoch
            t0 = ts["starting_time"].value
            t1 = t0 + (n - 1) * rate
            t0 += EXPERIMENT_START_TIME
            ep = nd.create_epoch("Experiment_" + num, t0, t1)
            ep.add_timeseries("stimulus", "stimulus/presentation/Sweep_" + num)
            ep.add_timeseries("response",
                              "acquisition/timeseries/Sweep_" + num)
            ep.finalize()
    nd.close()

    # rescaling the contents of the data arrays causes the file to grow
    # execute hdf5-repack to get it back to its original size
    try:
        print("Repacking hdf5 file with compression")
        process = subprocess.Popen(
            ["h5repack", "-f", "GZIP=4", tmpfile, outfile],
            stdout=subprocess.PIPE)
        process.wait()
    except:
        print("Unable to run h5repack on temporary nwb file")
        print("--------------------------------------------")
        raise

    try:
        print("Removing temporary file")
        os.remove(tmpfile)
    except:
        print("Unable to delete temporary file ('%s')" % tmpfile)
        raise

    # done (nothing to return)
    module.write_output_data({})