Example #1
0
    def train(self, epoch=10):
        """Train model with early stop
            epoch (int, optional): Defaults to 10. Number of epochs
        """
        from keras.callbacks import ModelCheckpoint, EarlyStopping
        model_name = f"./model/{datetime.now().strftime('%Y-%m-%d-%H-%M-%S')}.h5"

        checkpoint = ModelCheckpoint(filepath=model_name,
                                     monitor="loss",
                                     verbose=1,
                                     save_best_only=True)
        early_stop = EarlyStopping(monitor="loss")

        makepath('./model')
        self._save_label(model_name)

        if not self.pretrained:
            dataset = self._data_generator()
            model = self.new_model()

        else:
            dataset = self._data_generator(224)
            model = self.pretrained_model()

        result = model.fit_generator(dataset,
                                     epochs=epoch,
                                     callbacks=[checkpoint, early_stop])
Example #2
0
    def __init__(self, path=os.path.join(os.getcwd(), "record")):
        """Set saving path and if path not exist, make directory.

        Args:
            path (string): Defaults to os.path.join(os.getcwd(), "record"). Path for saving record.
        """
        self.path = path
        makepath(self.path)
def get_gaba_data_dict(filelist):
    """Returns a mapping from GABA conductance to TraubData objects"""
    ret = defaultdict(list)
    for fname in get_filenames(filelist):        
        print fname, makepath(fname)
        data = TraubData(makepath(fname))
        gaba = dict(data.fdata['/runconfig/GABA'])
        ret[float(gaba['conductance_scale'])].append(data)
        
    return ret
def get_gaba_data_dict(filelist):
    """Returns a mapping from GABA conductance to TraubData objects"""
    ret = defaultdict(list)
    for fname in get_filenames(filelist):
        print fname, makepath(fname)
        data = TraubData(makepath(fname))
        gaba = dict(data.fdata['/runconfig/GABA'])
        ret[float(gaba['conductance_scale'])].append(data)

    return ret
Example #5
0
 def __init__(self, model, path=os.path.join(os.getcwd(), "result")):
     """Init option and make result directory
     
     Args:
         model (string): Path of model for classification
         path (string): Defaults to os.path.join(os.getcwd(), "result"). 
     """
     self.path = path
     makepath(self.path)
     self.model = os.path.join(os.getcwd(), "model", model)
def get_psth_with_GABA_scale(window, binwidth):
    psthdict = defaultdict(dict)
    bins = np.arange(-window / 2, window / 2 + 0.5 * binwidth, binwidth)
    with open('gaba_scaling.csv', 'r') as fd:
        reader = csv.DictReader(fd, delimiter='\t')
        for row in reader:
            fname = row['filename']
            if (not fname) or fname.strip().startswith('#'):
                continue
            try:
                data = TraubData(makepath(fname))
                bgtimes, probetimes = get_stim_times(data)
                stim_times = np.concatenate((bgtimes, probetimes))
                stim_times.sort()
                gaba = dict(data.fdata['/runconfig/GABA'])
                gaba_scale = float(gaba['conductance_scale'])
                pop_spike_times = []
                for cell, spikes in data.spikes.items():
                    if not cell.startswith('SpinyStellate'):
                        continue
                    pop_spike_times.append(spikes)
                pop_spike_times = np.concatenate(pop_spike_times)
                pop_spike_times.sort()
                psth_, b = psth(pop_spike_times,
                                stim_times,
                                window=window,
                                bins=bins)
                psthdict[gaba_scale][fname] = psth_
            except IOError as e:
                print(fname, e)
    return psthdict, bins
def dump_pre_post_stim_spike_count(ffname, outprefix, celltype, window=10e-3):
    """Dump the standard deviation in population spike before and after stimulus.
    """
    with open('{}_prepost_spikes_{}_{}ms_window.csv'.format(outprefix, celltype, window*1e3), 'wb') as fd:
        writer = csv.writer(fd, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
        norm_files = get_dbcnt_dict(ffname)
        writer.writerow(['dbcount', 'filename', 'premean', 'premedian', 'prestd', 'postmean', 'postmedian', 'poststd'])
        for dbcnt, flist in norm_files.items():
            for fname in flist:
                data = TraubData(makepath(fname))
                pop_train_list = []
                bgtimes, probetimes = get_stim_times(data, correct_tcr=False)
                times = np.concatenate((bgtimes, probetimes))
                times.sort()
                for cell, train in data.spikes.items():
                    if cell.startswith(celltype):
                        pop_train_list.append(train)
                pop_train = np.concatenate(pop_train_list)
                pop_train.sort()
                pre = []
                post = []
                for t in times:
                    pre.append(np.flatnonzero((pop_train < t) & (pop_train > t - window/2)).shape[0]*1.0 / data.cellcounts._asdict()[celltype])
                    post.append(np.flatnonzero((pop_train > t) & (pop_train < t + window/2)).shape[0]*1.0 / data.cellcounts._asdict()[celltype])
                writer.writerow([dbcnt, fname] + [np.mean(pre), np.median(pre), np.std(pre, ddof=1), np.mean(post), np.median(post), np.std(post, ddof=1)])
def collect_feedback(data):
    userid = data['userid']
    feedback = data['feedback']
    fblist = json.loads(feedback)
    if isinstance(feedback, list):
        feedback = r'\n\n'.join(fblist)
    else:
        feedback = str(feedback)
    fileappend(makepath('feedback.txt'), feedback)
Example #9
0
    def __init__(self, label, videoname):
        """Init label, and video and make label dir if not exist.

        Args:
            label (string): The name of class you wanted to train your classifier 
            videoname (string): The path for video to labeling 
        """
        # Set label and video name
        self.label = label
        self.videoname = os.path.join(os.getcwd(), "record", videoname)

        # Set path for dataset
        trainpath = os.path.join(os.getcwd(), "train")
        self.labelpath = os.path.join(trainpath, label)

        # Create dir if not exist
        makepath(trainpath)
        makepath(self.labelpath)
        self._check_video()
def dump_ss_fraction_peaks(flistfilename,
                           trange=(2, 20),
                           cutoff=0.2,
                           binsize=5e-3,
                           lookahead=10):
    """Plot the peaks in fraction of spiny stellate cells over multiple
    simulations."""
    #data_dict = get_gaba_data_dict(flistfilename)
    peak_frac_med = defaultdict(list)
    peak_frac_mean = defaultdict(list)
    iqr_dict = defaultdict(list)
    with open(
            'gaba_scale_ss_frac_cutoff_{}_binwidth_{}ms_lookahead_{}.csv'.
            format(cutoff, binsize * 1000, lookahead), 'wb') as fd:
        writer = csv.writer(fd,
                            delimiter=',',
                            quotechar='"',
                            quoting=csv.QUOTE_MINIMAL)
        writer.writerow(
            ('filename', 'gabascale', 'frac_mean', 'frac_med', 'frac_iqr'))
        for fname in get_filenames(flistfilename):
            data = TraubData(makepath(fname))
            gaba = dict(data.fdata['/runconfig/GABA'])
            scale = gaba['conductance_scale']
            print fname, gaba
            hist, bins = data.get_spiking_cell_hist('SpinyStellate',
                                                    timerange=trange,
                                                    binsize=binsize,
                                                    frac=True)
            peaks, troughs = peakdetect(hist, bins[:-1], lookahead=lookahead)
            if len(peaks) == 0:
                print 'No peaks for', data.fdata.filename
                writer.writerow((fname, scale, '', '', ''))
                continue
            x, y = zip(*peaks)
            x = np.asarray(x)
            y = np.asarray(y)
            idx = np.flatnonzero(y > cutoff)
            frac_med = ''
            frac_mean = ''
            iqr = ''
            if len(idx) > 0:
                frac_med = np.median(y[idx])
                frac_mean = np.mean(y[idx])
                iqr = np.diff(np.percentile(y[idx], [25, 75]))
                if len(iqr) > 0:
                    iqr = iqr[0]
                else:
                    iqr = ''
            peak_frac_med[scale].append(frac_med)
            peak_frac_mean[scale].append(frac_mean)
            iqr_dict[scale].append(iqr)
            writer.writerow((fname, scale, frac_mean, frac_med, iqr))
    return peak_frac_mean, peak_frac_med, iqr_dict
Example #11
0
def report_feedback(userid):
    """
    """
    print "[statistics] Starting feedback send"
    fbpath = makepath('feedback.json')
    if not path_exists(fbpath):
        feedback = []
    else:
        feedback = jsonload(fbpath)
    if len(feedback) > 0:
        data = {'feedback' : feedback, 'userid' : userid}
        network_POST('/stats/report_feedback', data)
        jsondump(makepath('feedback.json'), [])
    print '[statistics] Finished feedback send'

    altpath = makepath('feedback.txt')
    if path_exists(altpath):
        feedback = fileload(altpath)
        data = {'feedback' : feedback, 'userid' : userid}
        network_POST('/stats/report_legacy_feedback', data)
        command('rm', altpath)
def old_main():
    figfilename = 'Figure_2B.svg'
    trange = (2, 20)
    filename = 'data_20140423_101740_1735.h5'
    data = TraubData(makepath(filename))
    rates = defaultdict(list)
    
    counts = data.cellcounts._asdict()
    for celltype in counts.keys():
        if counts[celltype] == 0:
            counts.pop(celltype)
            continue
        for cell, spiketrain in data.spikes.items():
            if cell.startswith(celltype):
                rate = 1.0 * np.count_nonzero((spiketrain > trange[0]) & \
                                              (spiketrain < trange[1])) \
                    / (trange[1] - trange[0])
                rates[celltype].append(rate)
    rates.pop('TCR')
    bins = np.arange(0, 61.0, 5.0)
    print 'bins:', bins
    hists = {}
    prev = np.zeros(len(bins)-1)
    ax = None
    # plt.axis('off')
    for ii, celltype in enumerate(reversed(rates.keys())):        
        ctype_rates = rates[celltype]
        ax = plt.subplot(len(rates), 1, ii+1, sharex=ax, sharey=ax)
        h, b = np.histogram(ctype_rates, bins=bins)
        h = np.asarray(h, dtype='float64') / counts[celltype]
        x = bins[:-1]
        plt.bar(x,
                h,
                color=cellcolor[celltype],
                width=(bins[1]-bins[0]))
                # bottom = prev, color=cellcolor[celltype], label=celltype)
        prev += h        
        ax.tick_params(axis='y', right=False, left=False)
        # plt.setp(ax, frame_on=False)
        ax.tick_params(axis='x', top=False, bottom=True)
        ax.spines['top'].set_visible(False)
        ax.spines['right'].set_visible(False)
        # ax.spines['bottom'].set_color((0, 0, 0, 0))
        # ax.xaxis.set_visible(False)
    # ax.xaxis.set_visible(True)
    ax.set_xticks(bins[::2])
    ax.set_yticks([0, 1.0])
    ax.tick_params(axis='y', left=True)
    plt.xlabel('Firing rate (Hz)')
    plt.tight_layout()
    plt.savefig(figfilename)
    plt.show()
def dump_pre_post_stim_firing_rate(ffname, outprefix, window=10e-3):
    """Dump mean, median and standard deviation in population spike before and after stimulus.
    """
    dbcnt_flist_dict = get_dbcnt_dict(ffname)
    celltype_data_dict = defaultdict(list)
    for dbcnt, flist in dbcnt_flist_dict.items():
        for fname in flist:
            data = TraubData(makepath(fname))
            bgtimes, probetimes = get_stim_times(data, correct_tcr=True)
            times = np.concatenate((bgtimes, probetimes))
            times.sort()
            spiketrains = defaultdict(list)
            for cell, train in data.spikes.items():
                celltype = cell.partition('_')[0]
                spiketrains[celltype].append(train)
            for celltype, trains in spiketrains.items():
                popspikes = np.concatenate(trains)
                popspikes.sort()
                pre = []
                post = []
                for t in times:
                    npre = np.flatnonzero((popspikes <= t) & (popspikes > (t - window/2))).shape[0]
                    pre.append(npre / (data.cellcounts._asdict()[celltype] * window / 2.0))
                    npost = np.flatnonzero((popspikes > t) & (popspikes < (t + window/2))).shape[0]
                    post.append(npost / (data.cellcounts._asdict()[celltype] * window / 2.0))
                dstats = {
                    'filename': fname,
                    'dbcount': dbcnt,
                    'premean': np.mean(pre),
                    'premedian': np.median(pre),
                    'prestd': np.std(pre),
                    'presem': stats.sem(pre),
                    'postmean': np.mean(post),
                    'postmedian': np.median(post),
                    'poststd': np.std(post),
                    'postsem': stats.sem(post),
                    'nstim': len(times)}
                celltype_data_dict[celltype].append(dstats)
    for celltype, datalist in celltype_data_dict.items():
        df = pd.DataFrame(datalist, columns=['filename',
                    'dbcount',
                    'premean',
                    'premedian',
                    'prestd',
                    'presem',
                    'postmean',
                    'postmedian',
                    'poststd',
                    'postsem',
                    'nstim'])
        outfile = '{}_prepost_rates_{}_{}ms_window.csv'.format(outprefix, celltype, window*1e3)
        df.to_csv(outfile)
def systemRefresh():
    """
    Pull the system spices from github and create a new system.tar
    for the clients all to download.
    """
    log('UPDATING SYSTEM SPICE')

    goto_dir(makepath('spicerackclient'))
    command('git', 'pull')
    command('tar', '-cvf', makepath('system.tar)', 'system'))
    goto_dir(makepath(''))
    
    currentversion = dget('systemversion', 0)
    currentversion = 1 + int(currentversion)
    put('systemversion', currentversion)

    filewrite('systemversion.txt', str(currentversion))

    command('tar', '--append', '--file=system.tar', 'systemversion.txt')

    log('UPDATED SYSTEM SPICE TO VERSION: %s' % currentversion)
    return 'success'
Example #15
0
def set_logging(file = None, level = None, echo = True, logger = None):
    if logger is None:
        logger = logging.getLogger()
    logger.setLevel(logging.DEBUG)
    logger.handlers = []   # Clear any existing handlers

    if echo:
        # The stdout handler should only print what the user has requested
        shandler = logging.StreamHandler()
        shandler.setFormatter(logging.Formatter("%(message)s"))
        if   level == 0: shandler.setLevel(logging.ERROR)
        elif level == 1: shandler.setLevel(logging.INFO)
        elif level == 2: shandler.setLevel(logging.DEBUG)
        logger.addHandler(shandler)

    if file is not None:
        # The file handler should always write full debug logging
        util.makepath(os.path.dirname(file))
        fhandler = logging.FileHandler(file,'w')
        fhandler.setFormatter(logging.Formatter("%(message)s"))
        fhandler.setLevel(logging.DEBUG)
        logger.addHandler(fhandler)
Example #16
0
def set_logging(file=None, level=None, echo=True, logger=None):
    if logger is None:
        logger = logging.getLogger()
    logger.setLevel(logging.DEBUG)
    logger.handlers = []  # Clear any existing handlers

    if echo:
        # The stdout handler should only print what the user has requested
        shandler = logging.StreamHandler()
        shandler.setFormatter(logging.Formatter("%(message)s"))
        if level == 0: shandler.setLevel(logging.ERROR)
        elif level == 1: shandler.setLevel(logging.INFO)
        elif level == 2: shandler.setLevel(logging.DEBUG)
        logger.addHandler(shandler)

    if file is not None:
        # The file handler should always write full debug logging
        util.makepath(os.path.dirname(file))
        fhandler = logging.FileHandler(file, 'w')
        fhandler.setFormatter(logging.Formatter("%(message)s"))
        fhandler.setLevel(logging.DEBUG)
        logger.addHandler(fhandler)
def old_main():
    figfilename = 'Figure_2B.svg'
    trange = (2, 20)
    filename = 'data_20140423_101740_1735.h5'
    data = TraubData(makepath(filename))
    rates = defaultdict(list)

    counts = data.cellcounts._asdict()
    for celltype in counts.keys():
        if counts[celltype] == 0:
            counts.pop(celltype)
            continue
        for cell, spiketrain in data.spikes.items():
            if cell.startswith(celltype):
                rate = 1.0 * np.count_nonzero((spiketrain > trange[0]) & \
                                              (spiketrain < trange[1])) \
                    / (trange[1] - trange[0])
                rates[celltype].append(rate)
    rates.pop('TCR')
    bins = np.arange(0, 61.0, 5.0)
    print 'bins:', bins
    hists = {}
    prev = np.zeros(len(bins) - 1)
    ax = None
    # plt.axis('off')
    for ii, celltype in enumerate(reversed(rates.keys())):
        ctype_rates = rates[celltype]
        ax = plt.subplot(len(rates), 1, ii + 1, sharex=ax, sharey=ax)
        h, b = np.histogram(ctype_rates, bins=bins)
        h = np.asarray(h, dtype='float64') / counts[celltype]
        x = bins[:-1]
        plt.bar(x, h, color=cellcolor[celltype], width=(bins[1] - bins[0]))
        # bottom = prev, color=cellcolor[celltype], label=celltype)
        prev += h
        ax.tick_params(axis='y', right=False, left=False)
        # plt.setp(ax, frame_on=False)
        ax.tick_params(axis='x', top=False, bottom=True)
        ax.spines['top'].set_visible(False)
        ax.spines['right'].set_visible(False)
        # ax.spines['bottom'].set_color((0, 0, 0, 0))
        # ax.xaxis.set_visible(False)
    # ax.xaxis.set_visible(True)
    ax.set_xticks(bins[::2])
    ax.set_yticks([0, 1.0])
    ax.tick_params(axis='y', left=True)
    plt.xlabel('Firing rate (Hz)')
    plt.tight_layout()
    plt.savefig(figfilename)
    plt.show()
def multifile_firing_rate_distribution(flist='unconnected_network.csv',
                                                   figfilename='Figure_3B.svg', trange=(2,20)):
    """Plots histograms showing distribution of firing rates among cells
    of each type collected from multiple simulations.

    """
    start = trange[0]
    end = trange[1]
    rates = defaultdict(list)    
    for fname in get_filenames(flist):
        data = TraubData(makepath(fname))
        if data.simtime < end:
            end = data.simtime
        for celltype in CELLTYPES:
            for cell, spiketrain in data.spikes.items():
                if cell.startswith(celltype):
                    rate = 1.0 * np.count_nonzero((spiketrain > start) & (spiketrain < end)) / (end - start)
                    rates[celltype].append(rate)
    bins = np.arange(0, 61.0, 5.0)
    hists = {}
    prev = np.zeros(len(bins) - 1)
    ax = None
    for ii, celltype in enumerate(CELLTYPES):
        ctype_rates = rates[celltype]
        ax = plt.subplot(len(rates), 1, ii+1, sharex=ax, sharey=ax)
        h, b = np.histogram(ctype_rates, bins=bins)
        h = np.asarray(h, dtype='float64') / len(ctype_rates)
        x = bins[:-1]
        plt.bar(x,
                h,
                color=cellcolor[celltype],
                width=(bins[1]-bins[0]))
                # bottom = prev, color=cellcolor[celltype], label=celltype)
        prev += h        
        ax.tick_params(axis='y', right=False, left=False)
        # plt.setp(ax, frame_on=False)
        ax.tick_params(axis='x', top=False, bottom=True)        
        ax.spines['bottom'].set_color((0, 0, 0, 0))
        ax.spines['top'].set_visible(False)
        ax.spines['right'].set_visible(False)
        # ax.xaxis.set_visible(False)
    ax.xaxis.set_visible(True)
    ax.set_xticks(bins[::2])
    ax.set_yticks([0, 1.0])
    ax.tick_params(axis='y', left=True)
    plt.xlabel('Firing rate (Hz)')
    plt.tight_layout()
    plt.savefig(figfilename)
    plt.show()
def multifile_firing_rate_distribution(flist='unconnected_network.csv',
                                       figfilename='Figure_3B.svg',
                                       trange=(2, 20)):
    """Plots histograms showing distribution of firing rates among cells
    of each type collected from multiple simulations.

    """
    start = trange[0]
    end = trange[1]
    rates = defaultdict(list)
    for fname in get_filenames(flist):
        data = TraubData(makepath(fname))
        if data.simtime < end:
            end = data.simtime
        for celltype in CELLTYPES:
            for cell, spiketrain in data.spikes.items():
                if cell.startswith(celltype):
                    rate = 1.0 * np.count_nonzero((spiketrain > start) & (
                        spiketrain < end)) / (end - start)
                    rates[celltype].append(rate)
    bins = np.arange(0, 61.0, 5.0)
    hists = {}
    prev = np.zeros(len(bins) - 1)
    ax = None
    for ii, celltype in enumerate(CELLTYPES):
        ctype_rates = rates[celltype]
        ax = plt.subplot(len(rates), 1, ii + 1, sharex=ax, sharey=ax)
        h, b = np.histogram(ctype_rates, bins=bins)
        h = np.asarray(h, dtype='float64') / len(ctype_rates)
        x = bins[:-1]
        plt.bar(x, h, color=cellcolor[celltype], width=(bins[1] - bins[0]))
        # bottom = prev, color=cellcolor[celltype], label=celltype)
        prev += h
        ax.tick_params(axis='y', right=False, left=False)
        # plt.setp(ax, frame_on=False)
        ax.tick_params(axis='x', top=False, bottom=True)
        ax.spines['bottom'].set_color((0, 0, 0, 0))
        ax.spines['top'].set_visible(False)
        ax.spines['right'].set_visible(False)
        # ax.xaxis.set_visible(False)
    ax.xaxis.set_visible(True)
    ax.set_xticks(bins[::2])
    ax.set_yticks([0, 1.0])
    ax.tick_params(axis='y', left=True)
    plt.xlabel('Firing rate (Hz)')
    plt.tight_layout()
    plt.savefig(figfilename)
    plt.show()
Example #20
0
def dump_pre_post_stim_spike_count(ffname, outprefix, celltype, window=10e-3):
    """Dump the standard deviation in population spike before and after stimulus.
    """
    with open(
            '{}_prepost_spikes_{}_{}ms_window.csv'.format(
                outprefix, celltype, window * 1e3), 'wb') as fd:
        writer = csv.writer(fd,
                            delimiter=',',
                            quotechar='"',
                            quoting=csv.QUOTE_MINIMAL)
        norm_files = get_dbcnt_dict(ffname)
        writer.writerow([
            'dbcount', 'filename', 'premean', 'premedian', 'prestd',
            'postmean', 'postmedian', 'poststd'
        ])
        for dbcnt, flist in norm_files.items():
            for fname in flist:
                data = TraubData(makepath(fname))
                pop_train_list = []
                bgtimes, probetimes = get_stim_times(data, correct_tcr=False)
                times = np.concatenate((bgtimes, probetimes))
                times.sort()
                for cell, train in data.spikes.items():
                    if cell.startswith(celltype):
                        pop_train_list.append(train)
                pop_train = np.concatenate(pop_train_list)
                pop_train.sort()
                pre = []
                post = []
                for t in times:
                    pre.append(
                        np.flatnonzero((pop_train < t) &
                                       (pop_train > t - window / 2)).shape[0] *
                        1.0 / data.cellcounts._asdict()[celltype])
                    post.append(
                        np.flatnonzero((pop_train > t) &
                                       (pop_train < t + window / 2)).shape[0] *
                        1.0 / data.cellcounts._asdict()[celltype])
                writer.writerow([dbcnt, fname] + [
                    np.mean(pre),
                    np.median(pre),
                    np.std(pre, ddof=1),
                    np.mean(post),
                    np.median(post),
                    np.std(post, ddof=1)
                ])
def dump_ss_fraction_peaks(flistfilename, trange=(2,20), cutoff=0.2, binsize=5e-3, lookahead=10):
    """Plot the peaks in fraction of spiny stellate cells over multiple
    simulations."""
    #data_dict = get_gaba_data_dict(flistfilename)
    peak_frac_med = defaultdict(list)
    peak_frac_mean = defaultdict(list)
    iqr_dict = defaultdict(list)
    with open('gaba_scale_ss_frac_cutoff_{}_binwidth_{}ms_lookahead_{}.csv'.format(cutoff, binsize*1000, lookahead), 'wb') as fd:
        writer = csv.writer(fd, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
        writer.writerow(('filename', 'gabascale',  'frac_mean', 'frac_med', 'frac_iqr'))
        for fname in get_filenames(flistfilename):
            data = TraubData(makepath(fname))
            gaba = dict(data.fdata['/runconfig/GABA'])
            scale = gaba['conductance_scale']
            print fname, gaba
            hist, bins = data.get_spiking_cell_hist('SpinyStellate',
                                                    timerange=trange,
                                                    binsize=binsize,
                                                    frac=True)
            peaks, troughs = peakdetect(hist, bins[:-1], lookahead=lookahead)
            if len(peaks) == 0:
                print 'No peaks for', data.fdata.filename
                writer.writerow((fname, scale, '', '', ''))
                continue
            x, y = zip(*peaks)
            x = np.asarray(x)
            y = np.asarray(y)
            idx = np.flatnonzero(y > cutoff)
            frac_med = ''
            frac_mean = ''
            iqr = ''
            if len(idx) > 0:
                frac_med = np.median(y[idx])
                frac_mean = np.mean(y[idx])
                iqr = np.diff(np.percentile(y[idx], [25,75]))
                if len(iqr) > 0:
                    iqr = iqr[0]
                else:
                    iqr = ''
            peak_frac_med[scale].append(frac_med)
            peak_frac_mean[scale].append(frac_mean)
            iqr_dict[scale].append(iqr)
            writer.writerow((fname, scale, frac_mean, frac_med, iqr))
    return peak_frac_mean, peak_frac_med, iqr_dict
Example #22
0
def dump_syncspike_stats(outfile,
                         dbcnt_file_dict,
                         trange=(2, 20),
                         cutoff=0.2,
                         binsize=5e-3,
                         lookahead=3):
    """Combined statistics for synchrounous fractions and inter burst intervals"""
    datalist = []
    for dbcnt, flist in dbcnt_file_dict.items():
        for fname in flist:
            data = TraubData(makepath(fname))
            hist, bins = data.get_spiking_cell_hist('SpinyStellate',
                                                    timerange=trange,
                                                    binsize=binsize,
                                                    frac=True)
            peaks, troughs = peakdetect(hist, bins[:-1], lookahead=lookahead)
            time, frac, = zip(*peaks)
            frac = np.array(frac)
            time = np.array(time)
            idx = np.flatnonzero(frac > cutoff)
            frac = frac[idx].copy()
            ibi = np.diff(time[idx])
            data_stats = {
                'filename': fname,
                'dbcount': dbcnt,
                'frac_mean': np.mean(frac),
                'frac_median': np.median(frac),
                'frac_iqr': np.diff(np.percentile(frac, [25, 75]))[0],
                'frac_sem': stats.sem(frac),
                'ibi_mean': np.mean(ibi),
                'ibi_median': np.median(ibi),
                'ibi_iqr': np.diff(np.percentile(ibi, [25, 75]))[0],
                'ibi_sem': stats.sem(ibi)
            }
            datalist.append(data_stats)
    dataframe = pd.DataFrame(datalist,
                             columns=[
                                 'filename', 'dbcount', 'frac_mean',
                                 'frac_median', 'frac_iqr', 'frac_sem',
                                 'ibi_mean', 'ibi_median', 'ibi_iqr', 'ibi_sem'
                             ])
    dataframe.to_csv(outfile)
    print 'Saved data in', outfile
def dump_syncspike_stats(outfile, dbcnt_file_dict,  trange=(2,20), cutoff=0.2, binsize=5e-3, lookahead=3):
    """Combined statistics for synchrounous fractions and inter burst intervals"""
    datalist = []
    for dbcnt, flist in dbcnt_file_dict.items():
        for fname in flist:
            data = TraubData(makepath(fname))
            hist, bins = data.get_spiking_cell_hist('SpinyStellate',
                                            timerange=trange,
                                            binsize=binsize,
                                            frac=True)
            peaks, troughs = peakdetect(hist, bins[:-1], lookahead=lookahead)
            time, frac, = zip(*peaks)
            frac = np.array(frac)
            time = np.array(time)
            idx = np.flatnonzero(frac > cutoff)                
            frac = frac[idx].copy()
            ibi = np.diff(time[idx])
            data_stats = {
                'filename': fname,
                'dbcount': dbcnt,
                'frac_mean': np.mean(frac),
                'frac_median': np.median(frac),
                'frac_iqr': np.diff(np.percentile(frac, [25, 75]))[0],
                'frac_sem': stats.sem(frac),
                'ibi_mean': np.mean(ibi), 
                'ibi_median': np.median(ibi), 
                'ibi_iqr': np.diff(np.percentile(ibi, [25, 75]))[0], 
                'ibi_sem': stats.sem(ibi)
            }
            datalist.append(data_stats)
    dataframe = pd.DataFrame(datalist, columns=['filename',
                                                'dbcount',
                                                'frac_mean',
                                                'frac_median',
                                                'frac_iqr',
                                                'frac_sem',
                                                'ibi_mean', 
                                                'ibi_median', 
                                                'ibi_iqr', 
                                                'ibi_sem'])
    dataframe.to_csv(outfile)
    print 'Saved data in', outfile
Example #24
0
def population_vm_spectrum(filename, trange=(0, 1e9)):
    result = {}
    data = TraubData(makepath(filename))
    npts = int(data.simtime / data.simdt)
    t = np.arange(0, npts, 1.0) * data.simdt
    for celltype in data.cellcounts._fields:
        count = data.cellcounts._asdict()[celltype]
        if count == 0:
            continue
        vm_sum = np.zeros(npts)    
        for cellname in data.fdata['Vm']:
            if cellname.startswith(celltype):
                cellvm = data.fdata['Vm'][cellname]
                assert len(cellvm) == len(vm_sum)                                          
                vm_sum = vm_sum + cellvm
        vm_sum = vm_sum[(t >= trange[0]) & (t < trange[1])] / count
        ps = np.abs(np.fft.fft(vm_sum))**2
        freq = np.fft.fftfreq(ps.size, data.simdt)
        idx = np.argsort(freq)
        result[celltype] = (freq[idx].copy(), ps[idx].copy())
    return result
def dump_psth_peaks(ffname, outprefix, celltype, window=100e-3, binwidth=5e-3):
    """Dump the population spike histogram values."""
    with open(
            '{}_psth_{}_{}ms_window_{}ms_bins.csv'.format(
                outprefix, celltype, window * 1e3, binwidth * 1e3),
            'wb') as fd:
        writer = csv.writer(fd,
                            delimiter=',',
                            quotechar='"',
                            quoting=csv.QUOTE_MINIMAL)
        dbcnt_flist = get_dbcnt_dict(ffname)
        bins = np.arange(-window / 2.0, window / 2.0 + 0.5 * binwidth,
                         binwidth)
        writer.writerow(['dbcount', 'filename'] +
                        list(np.asarray(np.round(bins[1:] * 1e3), dtype=int)))
        for dbcnt, flist in dbcnt_flist.items():
            for fname in flist:
                data = TraubData(makepath(fname))
                pop_train_list = []
                bgtimes, probetimes = get_stim_times(data, correct_tcr=True)
                if (len(bgtimes) == 0) and (len(probetimes) == 0):
                    print 'EE: {} has no TCR spiking on stimulus.'.format(
                        fname)
                    continue
                stim_times = np.concatenate((bgtimes, probetimes))
                stim_times.sort()
                # print '###', stim_times
                for cell, train in data.spikes.items():
                    if cell.startswith(celltype):
                        pop_train_list.append(train)
                pop_train = np.concatenate(pop_train_list)
                pop_train.sort()

                bgpsth, b = psth(pop_train,
                                 stim_times,
                                 window=window,
                                 bins=bins)
                bgpsth /= (data.cellcounts._asdict()[celltype] * binwidth)
                writer.writerow([dbcnt, fname] + list(bgpsth))
Example #26
0
def main():
	parser = optparse.OptionParser()

	parser.add_option("-g", "--generation", dest="generation", type="int", 
			default=0, help="generation to spawn (generation 0 is fully random)")

	parser.add_option("-f", "--folder", dest="folder", type="string",
			default="run", help="folder to create generation in")
	
	parser.add_option("-n", "--population", dest="population", type="int",
			default=20, help="population size")

	parser.add_option("-p", "--processes", dest="processes", type="int",
			default=1, help="number of processes to use")

	parser.add_option("-d", "--dry", dest="dry", action="store_true",
			help="spawn generation without programming/testing")

	options, args = parser.parse_args(sys.argv)

	# make base folder for this generation
	initial = os.getcwd()
	base = ("%s/%s/" + GENERATION) % (initial, options.folder, options.generation)
	util.makepath(base, delete=True)
	os.chdir(base)

	# get or set environment variable for the scripts path
	scripts = util.setenv("EVOLUTION_SCRIPTS", initial)
	
	# get or set environment variable for verilog folder path
	verilog = util.setenv("EVOLUTION_VERILOG", initial + "/verilog")

	print "generation %d:" % options.generation, options.population, "individuals"
	
	# create seeds from previous generation
	if options.generation > 0:
		prev = options.generation - 1
		print "creating seeds from generation", prev
		inputs = glob.glob("../%s/%s" % (GENERATION % prev, re.sub("%.*", "*.csv", INDIVIDUAL)))
		
		if len(inputs) == 0:
			print "ERROR: no seeds available in generation", prev
			sys.exit(1)

		for i in range(options.population):
			in1 = random.choice(inputs)
			in2 = random.choice(inputs)
			out = (INDIVIDUAL + ".seed") % i
			util.execute("%s/merge_individuals.py %s %s %s" % \
					(scripts, in1, in2, out), redirect="merge.log")

	# don't test individuals in the generate script
	os.unsetenv("EVOLUTION_RUN")

	# run processes
	if options.processes > 1:
		# create that many processes
		print "creating", options.processes, "parallel processes"
		pids = list()
		for i in range(options.processes):
			pid = os.fork()
			if pid > 0:
				pids.append(pid)
			else:
				run_process(i, options.processes, options.population, base, scripts)
				sys.exit(0)

		# wait for all processes to return
		for pid in pids:
			os.waitpid(pid, 0)

	else:
		# just run it directly if there's only one thread
		run_process(0, 1, options.population, base, scripts)
	
	outputs = glob.glob(re.sub("%.*", "*.sof", INDIVIDUAL))

	print "%d/%d individuals were generated sucessfully" % \
			(len(outputs), options.population)

	if options.dry:
		return
	
	for sof in outputs:
		test_individual(sof, 1, "/dev/ttyUSB0", scripts)
from traubdata import TraubData
from trbhist import get_spiketime_hist
from util import makepath, smooth_gaussian
from config import cellcolor

plt.rc('font', size=12)
#plt.rc('figure', figsize=(3, 2))  # ineffective

from fig_2_b_spike_raster import datafile_figure_2b #= 'data_20140724_125424_236053_compute-0-2.local.h5' # normally distributed synaptic conductance

if __name__ == '__main__':
    figfilename = 'figures/Figure_2D_population_firing_rates.svg'
    binsize=5e-3
    binsize = 5e-3
    trange = (2, 20)
    data = TraubData(makepath(datafile_figure_2b))
    celltypes = ['SpinyStellate', 'DeepBasket', 'DeepLTS']
    fig = plt.figure(figsize=(3, 2))
    ax = fig.add_subplot(111)
    for celltype in celltypes:
        y, bins = get_spiketime_hist(data, celltype,
                                        timerange=trange, binsize=binsize,
                                        percell=True, pertime=True, density=False)
        y = smooth_gaussian(y, binsize=binsize, twindow=50e-3, std=1.0)
        ax.plot((bins[1:] + bins[:-1]) * 0.5, y, color=cellcolor[celltype])
    ax.set_xlabel('Time (s)')
    ax.set_xlim((5, 8))
    ax.set_ylabel('Firing rate (Hz)')
    ax.set_yticks((0, 100, 200))
    ax.spines['top'].set_visible(False)
    ax.spines['right'].set_visible(False)
# Code:
"""Plots spike rasters for populations when simulated without synaptic
connections.

"""

from collections import defaultdict
import numpy as np
import pylab as plt
from util import makepath
from traubdata import TraubData
from trbhist import get_spiketime_hist
from util import makepath, smooth_gaussian
from config import cellcolor
from plotutil import plot_spike_raster

plt.rc('font', size=12)
plt.rc('figure', figsize=(3, 4))

unconnected_filename = 'data_20140423_101740_1735.h5'

if __name__ == '__main__':
    figfilename = 'figures/Figure_3C_spike_raster_disconnected.png'
    trange = (5, 6)
    fig, ax = plot_spike_raster(makepath(unconnected_filename), trange)
    plt.savefig(figfilename, transparent=True)
    plt.show()

#
# figure_2_c_nosynapse_spikeraster.py ends here
Example #29
0
+30 deep LTS

"""
import os
import sys
import numpy as np
from matplotlib import pyplot as plt
from util import makepath
from config import cellcolor, mdict
from plotutil import plot_spike_raster
from traubdata import TraubData, cellcount_tuple

# datafile_figure_1b = 'data_20120922_195344_13808.h5' # This is a simulation with 30 deep basket, fixed snaptic conductance

datafile_figure_2b = 'data_20140724_125424_236053_compute-0-2.local.h5'  # normally distributed synaptic conductance

from matplotlib import rc
rc('font', size=12)
rc('figure', figsize=(3, 4))

if __name__ == '__main__':
    figfilename = 'figures/Figure_2B_spike_raster.png'
    fig, ax = plot_spike_raster(makepath(datafile_figure_2b), (5, 8))
    fig.tight_layout()
    fig.savefig(figfilename, transparent=True)
    print 'Saved figure in %s' % (figfilename)
    plt.show()

#
# figure_1_b.py ends here
# 

# Code:

import os
import sys
import numpy as np
from matplotlib import pyplot as plt
from util import makepath
from config import cellcolor, mdict
from plotutil import plot_spike_raster
from traubdata import TraubData, cellcount_tuple
from matplotlib import rc
rc('font', size=12)
rc('figure', figsize=(3, 4))

if __name__ == '__main__':
    datafile = 'data_20141026_001859_109751_compute-0-15.local.h5' # 1.2x

    figfilename='figures/Figure_4B_spike_raster_high_GABA.png'
    fig, ax = plot_spike_raster(makepath(datafile), (5, 8))
    # fig.tight_layout()
    fig.savefig(figfilename, transparent=True)
    print 'Saved figure in %s' % (figfilename)
    plt.show()



# 
# fig_4_b_high_gaba_spike_raster.py ends here
def spiceMerge(spices, username):
    """
    Manages the merger of a spice into the development repository.

    @param spices: list of (spicename, dirname)
    """
    log('[spicemanager] In func:spiceMerge')
    goto_dir(makepath('spicerackdev'))
    command('git', 'pull')
    goto_dir(makepath(''))

    spice_to_metadata = {}
    for dirname in spices:
        data = jsonload(makepath('spicerackdev', dirname, 'metadata.json'))
        spicename = data['name']
        if 'spicename' not in data:
            data['spicename'] = spicename
        if 'width' not in data:
            data['width'] = 0
            data['height'] = 0
            data['dependencies'] = []

        Statsd.increment('spices.spicerackdev.%s' % spicename.replace(' ', '-'))

        mid = get('spice-id:%s' % spicename.replace(' ', '-'))
        if mid is None:
            mid = str(uuid.uuid4())
            put('spice-id:%s' % spicename.replace(' ', '-'), mid)

        log('[spicemanager] Processing %s %s' % (spicename, mid))

        version = 1 + int(dget('version:%s' % mid, 0))
        put('version:%s' % mid, version)
        log('[spicemanager] Version: %s' % version)

        spice_to_metadata[mid] = {
            'spicename' : data['spicename'],
            'gituser' : username,
            'author' : data['author'],
            'tagline' : data['description'],
            'description' : data['description'],
            'version' : version,
            'hotkey' : False,
            'main' : data['main'],
            'width' : data['width'],
            'height' : data['height'],
            'dependencies' : data['dependencies']
            }

        put('metadata:%s' % mid, spice_to_metadata[mid])
        
        jsondump(makepath('spicerackdev', dirname, 'metadata.json'), spice_to_metadata[mid])
                   
        # Tar the spice directory into /spices/{mid}.tar.gz
        command('tar', '-cvf', makepath('spices', '%s.tar.gz' % mid), makepath('spicerackdev', dirname))

        # Copy the icon file into /icons
        iconpath = makepath('spicerackdev', dirname, 'icon.png')
        command('cp', '-f', iconpath, makepath('icons', '%s.png' % mid))
        
        iconpath = makepath(dirname, 'icon.png')
        if not os.path.exists(iconpath):
            command('tar', '-cvf', makepath('spices', '%s.tar.gz' % mid), makepath('spicerackdev', dirname))
        else:
            iconpath = iconpath.strip('icon.png')
            command('tar',
                    '-cvf',
                    makepath('spices', '%s.tar.gz' % mid),
                    makepath('spicerackdev', dirname),
                    '--directory=%s' % iconpath,
                    'icon.png')

        goto_dir(makepath('spicerackdev'))
        command('git', 'stash')
        goto_dir(makepath(''))

        heroku_metadata = {
            'spicename' : data['spicename'],
            'gituser' : username,
            'author' : data['author'],
            'tagline' : data['description'],
            'description' : data['description'],
            'version' : version,
            'hotkey' : False,
            'main' : data['main']
            }

        log('HEROKU: %s %s', type(heroku_metadata), heroku_metadata)
        for key, val in heroku_metadata.iteritems():
            log('KEY %s VAL %s %s', key, type(val), val)

        resp, content = send_post({str(mid) : heroku_metadata}, '/spices/refresh')
        log('[spicemanager] Heroku response: %s' % str(resp))

    return 'success'
Example #32
0
# Floor, Boston, MA 02110-1301, USA.
#
#

# Code:

from collections import defaultdict
import numpy as np
import pylab as plt
from util import makepath
from traubdata import TraubData
from trbhist import get_spiketime_hist
from util import makepath, smooth_gaussian
from config import cellcolor
from plotutil import plot_spike_raster

plt.rc('font', size=12)
plt.rc('figure', figsize=(3, 4))

data_filename = 'data_20140724_125424_3938_compute-0-1.local.h5'

if __name__ == '__main__':
    figfilename = 'figures/Figure_5A_spikeraster_30DB.png'
    trange = (5, 8)
    fig, ax = plot_spike_raster(makepath(data_filename), trange)
    plt.savefig(figfilename, transparent=True)
    plt.show()

#
# fig_5_b_spikeraster_30DB.py ends here
"""
import os
import sys
import numpy as np
from matplotlib import pyplot as plt
from util import makepath
from config import cellcolor, mdict
from plotutil import plot_spike_raster
from traubdata import TraubData, cellcount_tuple

# datafile_figure_1b = 'data_20120922_195344_13808.h5' # This is a simulation with 30 deep basket, fixed snaptic conductance

datafile_figure_2b = 'data_20140724_125424_236053_compute-0-2.local.h5' # normally distributed synaptic conductance

from matplotlib import rc
rc('font', size=12)
rc('figure', figsize=(3, 4))


if __name__ == '__main__':
    figfilename='figures/Figure_2B_spike_raster.png'
    fig, ax = plot_spike_raster(makepath(datafile_figure_2b), (5, 8))
    fig.tight_layout()
    fig.savefig(figfilename, transparent=True)
    print 'Saved figure in %s' % (figfilename)
    plt.show()

# 
# figure_1_b.py ends here
Example #34
0
#
#

# Code:

import os
import sys
import numpy as np
from matplotlib import pyplot as plt
from util import makepath
from config import cellcolor, mdict
from plotutil import plot_spike_raster
from traubdata import TraubData, cellcount_tuple
from matplotlib import rc

rc('font', size=12)
rc('figure', figsize=(3, 4))

if __name__ == '__main__':
    datafile = 'data_20141026_001859_109751_compute-0-15.local.h5'  # 1.2x

    figfilename = 'figures/Figure_4B_spike_raster_high_GABA.png'
    fig, ax = plot_spike_raster(makepath(datafile), (5, 8))
    # fig.tight_layout()
    fig.savefig(figfilename, transparent=True)
    print 'Saved figure in %s' % (figfilename)
    plt.show()

#
# fig_4_b_high_gaba_spike_raster.py ends here
Example #35
0
def collect_legacy_feedback(data):
    userid = data['userid']
    feedback = data['feedback']
    fileappend(makepath('feedback.txt'), feedback)
"""Plots spike rasters for populations when simulated without synaptic
connections.

"""

from collections import defaultdict
import numpy as np
import pylab as plt
from util import makepath
from traubdata import TraubData
from trbhist import get_spiketime_hist
from util import makepath, smooth_gaussian
from config import cellcolor
from plotutil import plot_spike_raster

plt.rc('font', size=12)
plt.rc('figure', figsize=(3, 4))

unconnected_filename = 'data_20140423_101740_1735.h5'

if __name__ == '__main__':
    figfilename = 'figures/Figure_3C_spike_raster_disconnected.png'
    trange = (5, 6)
    fig, ax = plot_spike_raster(makepath(unconnected_filename), trange)
    plt.savefig(figfilename, transparent=True)
    plt.show()
    

# 
# figure_2_c_nosynapse_spikeraster.py ends here
Example #37
0
def main():
    parser = optparse.OptionParser()

    parser.add_option(
        "-g",
        "--generation",
        dest="generation",
        type="int",
        default=0,
        help="generation to spawn (generation 0 is fully random)")

    parser.add_option("-f",
                      "--folder",
                      dest="folder",
                      type="string",
                      default="run",
                      help="folder to create generation in")

    parser.add_option("-n",
                      "--population",
                      dest="population",
                      type="int",
                      default=20,
                      help="population size")

    parser.add_option("-p",
                      "--processes",
                      dest="processes",
                      type="int",
                      default=1,
                      help="number of processes to use")

    parser.add_option("-d",
                      "--dry",
                      dest="dry",
                      action="store_true",
                      help="spawn generation without programming/testing")

    options, args = parser.parse_args(sys.argv)

    # make base folder for this generation
    initial = os.getcwd()
    base = ("%s/%s/" + GENERATION) % (initial, options.folder,
                                      options.generation)
    util.makepath(base, delete=True)
    os.chdir(base)

    # get or set environment variable for the scripts path
    scripts = util.setenv("EVOLUTION_SCRIPTS", initial)

    # get or set environment variable for verilog folder path
    verilog = util.setenv("EVOLUTION_VERILOG", initial + "/verilog")

    print "generation %d:" % options.generation, options.population, "individuals"

    # create seeds from previous generation
    if options.generation > 0:
        prev = options.generation - 1
        print "creating seeds from generation", prev
        inputs = glob.glob(
            "../%s/%s" %
            (GENERATION % prev, re.sub("%.*", "*.csv", INDIVIDUAL)))

        if len(inputs) == 0:
            print "ERROR: no seeds available in generation", prev
            sys.exit(1)

        for i in range(options.population):
            in1 = random.choice(inputs)
            in2 = random.choice(inputs)
            out = (INDIVIDUAL + ".seed") % i
            util.execute("%s/merge_individuals.py %s %s %s" % \
              (scripts, in1, in2, out), redirect="merge.log")

    # don't test individuals in the generate script
    os.unsetenv("EVOLUTION_RUN")

    # run processes
    if options.processes > 1:
        # create that many processes
        print "creating", options.processes, "parallel processes"
        pids = list()
        for i in range(options.processes):
            pid = os.fork()
            if pid > 0:
                pids.append(pid)
            else:
                run_process(i, options.processes, options.population, base,
                            scripts)
                sys.exit(0)

        # wait for all processes to return
        for pid in pids:
            os.waitpid(pid, 0)

    else:
        # just run it directly if there's only one thread
        run_process(0, 1, options.population, base, scripts)

    outputs = glob.glob(re.sub("%.*", "*.sof", INDIVIDUAL))

    print "%d/%d individuals were generated sucessfully" % \
      (len(outputs), options.population)

    if options.dry:
        return

    for sof in outputs:
        test_individual(sof, 1, "/dev/ttyUSB0", scripts)
from traubdata import TraubData
from trbhist import get_spiketime_hist
from util import makepath, smooth_gaussian
from config import cellcolor

plt.rc('font', size=12)
plt.rc('figure', figsize=(3, 2))

uninhibited_filename = 'data_20130107_175609_8221.h5'

if __name__ == '__main__':
    figfilename = 'figures/Figure_3A_uninhibited_firing_rate.svg'
    binsize = 5e-3
    trange = (2, 20)
    filename = 'data_20130107_175609_8221.h5'
    data = TraubData(makepath(uninhibited_filename))
    celltype = 'SpinyStellate'
    y, bins = get_spiketime_hist(data, celltype,
                                    timerange=trange, binsize=binsize,
                                    percell=True, pertime=True, density=False)
    y = smooth_gaussian(y, binsize=binsize, twindow=50e-3, std=1.0)
    plt.plot((bins[1:] + bins[:-1]) * 0.5, y, color=cellcolor[celltype])
    plt.xlabel('Time (s)')
    plt.xlim((5, 7))
    plt.ylabel('Firing rate (Hz)')
    plt.yticks((100, 200, 300))
    plt.gca().spines['top'].set_visible(False)
    plt.gca().spines['right'].set_visible(False)
    plt.gca().get_xaxis().tick_bottom()
    plt.gca().get_yaxis().tick_left()
    plt.xticks([5.0, 6.0, 7.0])
Example #39
0
import sys
import numpy as np
from matplotlib import pyplot as plt
from util import makepath
from config import cellcolor, mdict, CELLTYPES

from traubdata import TraubData, cellcount_tuple
from matplotlib import rc
from fig_2_b_spike_raster import datafile_figure_2b
rc('font', size=12)
rc('figure', figsize=(4, 3))
start = 5.5
end = 6.0
if __name__ == '__main__':
    data = TraubData(makepath(datafile_figure_2b))
    ax = None
    for ii, celltype in enumerate(CELLTYPES):
        for node in data.fdata['Vm']:
            if node.startswith(celltype):
                ax = plt.subplot(len(CELLTYPES), 1, ii+1, sharex=ax, sharey=ax)
                # plt.title(celltype)
                Vm = data.fdata['Vm'][node][:]
                ts = np.linspace(0, data.simtime, Vm.shape[0])
                idx = np.flatnonzero((ts > start) & (ts < end))
                ts = ts[idx].copy()
                Vm = Vm[idx].copy()
                plt.plot(ts, Vm*1e3, color=cellcolor[celltype])
                plt.gca().spines['top'].set_visible(False)
                plt.gca().spines['right'].set_visible(False)
                plt.gca().get_xaxis().tick_bottom()
# 

# Code:

from collections import defaultdict
import numpy as np
import pylab as plt
from util import makepath
from traubdata import TraubData
from trbhist import get_spiketime_hist
from util import makepath, smooth_gaussian
from config import cellcolor
from plotutil import plot_spike_raster

plt.rc('font', size=12)
plt.rc('figure', figsize=(3, 4))

data_filename = 'data_20140526_110240_20695.h5'  #'data_20140525_130513_20265.h5'  #'data_20140825_113047_105368_compute-0-2.local.h5'

if __name__ == '__main__':
    figfilename = 'figures/Figure_5B_spikeraster_80DB.png'
    trange = (5, 8)
    fig, ax = plot_spike_raster(makepath(data_filename), trange)
    plt.savefig(figfilename, transparent=True)
    plt.show()



# 
# fig_5_b_spikeraster_30DB.py ends here