Пример #1
0
def main():
    cfg = tools.load_config("rep_graph_mc")
    db = tools.get_db(cfg["db"], 'r')

    for ns in range(1, 4):
        ups_key = "ups%ds" % ns
        axis = cfg[ups_key]['axis']
        binning = tools.axis2bins(axis)

        hists_years = {}
        for data_key in ["mc2011", "mc2012"]:
            print data_key
            db_ups = db[data_key][ups_key]
            hists = [pyroot.h1_axis(axis), pyroot.h1_axis(axis), pyroot.h1_axis(axis)]
            for h in hists:
                h.blue() if data_key == "mc2011" else h.red()

            for i, bin in enumerate(binning, start=1):
                sigmas = []
                for np in pdg.VALID_UPS_DECAYS[ns]:
                    chib_key = 'chib1%dp' % np
                    sigmas.append(db_ups[bin][chib_key]['sigma'])
                sigma = VE(str(sigmas[0]))
                hists[0][i] = sigma
                for j, s in enumerate(sigmas[1:], start=1):
                    hists[j][i] = VE(str(s)) / sigma
            hists_years[data_key] = hists
        print "%s - %s" % (data_key, ups_key)
        shell()
Пример #2
0
def main():
    cfg = tools.load_config("rep_width_data_mc")

    axis = cfg["axis"]
    binning = tools.axis2bins(axis)
    db_mc = tools.get_db(cfg["mc"], 'r')
    db_data = tools.get_db(cfg["data"], 'r')

    for year in ["2011", "2012"]:
        h_mc = pyroot.h1_axis(axis)
        h_mc.red()
        h_data = pyroot.h1_axis(axis)
        h_data.blue()

        db_mc_year = db_mc["mc%s" % year]["ups1s"]
        db_data_year = db_data[year]

        for i, bin in enumerate(binning, start=1):
            h_mc[i] = VE(str(db_mc_year[bin]['chib11p']['sigma']))
            h_data[i] = VE(str(db_data_year[bin]['sigma_b1_1p']))

        h_ratio = h_data / h_mc
        h_ratio.Draw()
        h_ratio.Fit("pol0")
        tools.save_figure("%s/sigma_data_mc_%s" % (cfg["output_dir"], year))
        shell()
Пример #3
0
def main():
    cli_args = docopt(__doc__, version="1.0")

    cfg = tools.load_config("rep_graph_fits")
    cfg_profile = cfg['profiles'][cli_args['--profile']]

    db = tools.get_db(cfg_profile['db'])
    binning = tools.axis2bins(cfg_profile['axis'])

    for plot in cfg_profile['plots']:
        key = plot['key']
        graphs = []
        for data_key in ['2011', '2012']:
            values = []
            for bin in binning:
                ve = pyroot.VE(str(db[data_key][bin][key]))
                values.append((bin, ve))
            graphs.append(graph.Graph(color=data_key, values=values))

        ymin = plot.get("ymin", None)
        ymax = plot.get("ymax", None)
        mg = graph.MultiGraph(graphs=graphs, ymin=ymin, ymax=ymax)

        mg.draw()

        level = plot.get("level", None)
        if level:
            graphs[0].h.level(level)

        filename = "%s/%s" % (cfg_profile["output_dir"], key)
        tools.save_figure(filename)
    shell()
Пример #4
0
def process(data_key, tree, models, ns, nb, np, cut, pt_axis,
            is_unbinned, binning, is_save, output_db, output_figs):
    def fit():
        model.fitData()
        print(model)
        if is_save:
            save(
                output_db=output_db, output_figs=output_figs,
                data_key=data_key, params=model.params(), ns=ns,
                nb=nb, np=np, pt_bin=pt_bin
            )
        if model.status:
            log.info("OK")
        else:
            log.err("BAD: type 'fit()'")

    new_cut = dict(cut)
    field = "dmplusm%ds" % ns
    new_cut[field] = tuple(binning[1:])
    new_cut["np"] = np
    new_cut["nb"] = nb

    log = Logger()
    list_id = pyroot.rootID()
    cut_str = tools.cut_dict2str(new_cut)

    log.info("cut:" + cut_str)
    tree.Draw(">>%s" % list_id, cut_str, "entrylist")
    elist = ROOT.gROOT.FindObject(list_id)
    tree.SetEntryList(elist)

    model = get_model(models[ns - 1], np, binning)
    for pt_bin in tools.axis2bins(pt_axis):
        bin_cut = {"pt_ups": pt_bin}
        if is_unbinned:
            data = source.dataset(tree=tree, cut=bin_cut, field=field,
                                  has_splot=True)
        else:
            data = source.histogram(tree=tree,
                                    cut=bin_cut,
                                    field=field,
                                    nbins=binning[0])
        model.data = data
        canvas.SetTitle(
            "%s: chib%d%dp to Y(%dS) (%d, %d) " % (data_key, nb, np, ns,
                                                   pt_bin[0], pt_bin[1]))

        fit()
        if not model.status:
            shell()
    tree.SetEntryList(0)
Пример #5
0
def reprocess_storm(atcf_stormfilename):

    from IPython import embed as shell; shell()

    datelist = [startdt.strftime('%Y%m%d')]
    for nn in range((enddt - startdt).days + 2):
        datelist += [(startdt + timedelta(nn)).strftime('%Y%m%d')]

    hourlist = []
    for ii in range(24):
        hourlist += [(enddt-timedelta(hours=ii)).strftime('%H')]
    hourlist.sort()
    # Do newest first
    datelist.sort(reverse=True)

    for sat,sensor in [('gcom-w1','amsr2'),
                       ('gpm','gmi'),
                       ('npp','viirs'),
                       ('aqua','modis'),
                       ('terra','modis'),
                       ('himawari8','ahi'),
                       ('goesE','gvar'),
                       ('goesW','gvar')
                       ]:
        for datestr in datelist:
            process_overpass(sat,sensor,
                productlist=None,
                sectorlist=[startstormsect.name],
                sectorfiles=None,
                extra_dirs=None,
                sector_file=sector_file,
                datelist=[datestr],
                hourlist=hourlist,
                queue=os.getenv('DEFAULT_QUEUE'),
                mp_max_cpus=3,
                allstatic=False,
                alldynamic=True,
                # list=True will just list files and not actually run
                #list=True,
                list=False,
                quiet=True,
                start_datetime = startdt,
                end_datetime = enddt,
                )
Пример #6
0
def shell():
    """Initialize an interactive shell for Duende.

    When a config file is not given as argument duende.ini will
    be used as default.

    Example:
        # paster shell duende.ini

    """
    if len(sys.argv) < 2:
        config_file = 'duende.ini'
    else:
        config_file = sys.argv[1]

    config_path = os.path.abspath(config_file)
    if not os.path.isfile(config_path):
        raise Exception(u'Config file %s not found' % config_path)

    cur_dir = os.path.dirname(config_path)
    if cur_dir not in sys.path:
        sys.path.insert(0, cur_dir)

    print("Loading duende app config from {0}".format(config_path))
    config_key = 'config:%s' % config_path
    wsgiapp = loadapp(config_key)

    try:
        import IPython

        if IPython.__version__ < '0.11':
            from IPython.Shell import IPShellEmbed

            shell = IPShellEmbed(['-quick'])
        else:
            from IPython import embed as shell
    except ImportError:
        # when ipython is not installed use normal console
        shell = SimpleConsole()
        
    shell(header=BANNER)
    def trial_params(self):
        blinks_nr = np.zeros(self.nr_trials)
        number_blinks = np.zeros(self.nr_trials)
        for t in range(self.nr_trials):
            try:
                blinks_nr[t] = sum(
                    (self.blink_start_times > self.sound_times[t]) *
                    (self.blink_start_times < self.response_times[t] + 1000))
            except:
                shell()
        sacs_nr = np.zeros(self.nr_trials)
        sacs_dur = np.zeros(self.nr_trials)
        sacs_vel = np.zeros(self.nr_trials)
        for t in range(self.nr_trials):
            saccades_in_trial_indices = (
                self.saccade_start_times > self.sound_times[t] - 500) * (
                    self.saccade_start_times < self.response_times[t] + 1500)
            sacs_nr[t] = sum(saccades_in_trial_indices)
            sacs_dur[t] = sum(self.saccade_durs[saccades_in_trial_indices])
            if sacs_nr[t] != 0:
                sacs_vel[t] = max(
                    self.saccade_peak_velocities[saccades_in_trial_indices])

        run_nr = int(self.alias.split('_')[-2])
        session_nr = int(self.alias.split('_')[-1])
        self.parameters['omissions'] = self.omission_indices
        self.parameters['omissions_sac'] = self.omission_indices_sac
        self.parameters['omissions_blinks'] = self.omission_indices_blinks
        self.parameters['blinks_nr'] = blinks_nr
        self.parameters['sacs_nr'] = sacs_nr
        self.parameters['sacs_dur'] = sacs_dur
        self.parameters['sacs_vel'] = sacs_vel
        self.parameters['trial'] = np.arange(self.nr_trials)
        self.parameters['run'] = run_nr
        self.parameters['session'] = session_nr
        self.ho.data_frame_to_hdf(self.alias, 'parameters2', self.parameters)

        print '{} total trials'.format(self.nr_trials)
        print '{} omissions'.format(sum(self.omission_indices))
        print ''
Пример #8
0
    def __init__(self,
                 subjects,
                 experiment_name,
                 project_directory,
                 sample_rate_new=50):

        self.subjects = subjects
        self.nr_subjects = len(self.subjects)
        self.experiment_name = experiment_name
        self.project_directory = project_directory
        self.sample_rate_new = int(sample_rate_new)
        self.downsample_rate = int(1000 / sample_rate_new)

        parameters = []
        for s in self.subjects:
            self.base_directory = os.path.join(self.project_directory,
                                               self.experiment_name, s)
            self.hdf5_filename = os.path.join(self.base_directory, 'processed',
                                              s + '.hdf5')
            self.ho = hedfpy.HDFEyeOperator(self.hdf5_filename)

            try:
                parameters.append(
                    self.ho.read_session_data('', 'parameters_joined'))
            except:
                shell()
        self.parameters_joined = pd.concat(parameters)
        self.omissions = np.array(self.parameters_joined['omissions'])
        self.omissions = self.omissions + (np.array(
            self.parameters_joined['correct']) == -1)
        self.omissions = self.omissions + (np.array(
            self.parameters_joined['trial']) == 0)
        # self.omissions = self.omissions + (np.array(self.parameters_joined['missing_data']) > 0.25)

        # regress out RT per session:
        for subj in np.unique(self.parameters_joined.subject):
            for s in np.unique(self.parameters_joined.session[
                    self.parameters_joined.subject == subj]):
                ind = (self.parameters_joined.subject
                       == subj) * (self.parameters_joined.session == s)
                rt = np.array(self.parameters_joined['rt'][ind]) / 1000.0
                pupil_d = np.array(self.parameters_joined['pupil_d'][ind])
                pupil_d = myfuncs.lin_regress_resid(pupil_d,
                                                    [rt]) + pupil_d.mean()
                self.parameters_joined['pupil_d'][ind] = pupil_d

        self.parameters_joined = self.parameters_joined[-self.omissions]
        self.rt = np.array(self.parameters_joined['rt'])
        self.hit = np.array(self.parameters_joined['hit'], dtype=bool)
        self.fa = np.array(self.parameters_joined['fa'], dtype=bool)
        self.miss = np.array(self.parameters_joined['miss'], dtype=bool)
        self.cr = np.array(self.parameters_joined['cr'], dtype=bool)
        self.yes = np.array(self.parameters_joined['yes'], dtype=bool)
        self.no = -np.array(self.parameters_joined['yes'], dtype=bool)
        self.run = np.array(self.parameters_joined['run'], dtype=int)
        self.session = np.array(self.parameters_joined['session'], dtype=int)
        try:
            self.present = np.array(self.parameters_joined['signal_present'],
                                    dtype=bool)
        except:
            self.present = np.array(
                self.parameters_joined['target_present_in_stimulus'],
                dtype=bool)
        self.absent = -self.present
        self.correct = np.array(self.parameters_joined['correct'], dtype=bool)
        self.error = -np.array(self.parameters_joined['correct'], dtype=bool)
        self.pupil_b = np.array(self.parameters_joined['pupil_b'])
        self.pupil_d = np.array(self.parameters_joined['pupil_d'])
        self.pupil_t = np.array(self.parameters_joined['pupil_t'])
        self.subj_idx = np.concatenate(
            np.array([
                np.repeat(
                    i,
                    sum(self.parameters_joined['subject'] == self.subjects[i]))
                for i in range(len(self.subjects))
            ]))
        self.criterion = np.array([
            np.array(self.parameters_joined[self.parameters_joined['subject']
                                            == subj]['criterion'])[0]
            for subj in self.subjects
        ])

        #########

        # pupil split:
        self.pupil_l_ind = []
        self.pupil_h_ind = []
        for subj_idx in self.subjects:
            d = self.parameters_joined[self.parameters_joined.subject ==
                                       subj_idx]
            p_h = []
            p_l = []
            for s in np.array(np.unique(d['session']), dtype=int):
                pupil = np.array(d['pupil_d'])[np.array(d.session) == s]
                p_l.append(pupil <= np.percentile(pupil, 40))
                p_h.append(pupil >= np.percentile(pupil, 60))
            self.pupil_l_ind.append(np.concatenate(p_l))
            self.pupil_h_ind.append(np.concatenate(p_h))
        self.pupil_l_ind = np.concatenate(self.pupil_l_ind)
        self.pupil_h_ind = np.concatenate(self.pupil_h_ind)
        self.pupil_rest_ind = -(self.pupil_h_ind + self.pupil_l_ind)

        # self.pupil_l_ind = []
        # self.pupil_h_ind = []
        # for subj_idx in self.subjects:
        #     d = self.parameters_joined[self.parameters_joined.subject == subj_idx]
        #     p_h = []
        #     p_l = []
        #     for r in np.array(np.unique(d['run']), dtype=int):
        #         pupil = np.array(d['pupil_b_lp'])[np.array(d.run) ==r]
        #         p_l.append( (pupil <= np.percentile(pupil, 25)) + (pupil >= np.percentile(pupil, 75)) )
        #         p_h.append( (pupil > np.percentile(pupil, 25)) & (pupil < np.percentile(pupil, 75)) )
        #     self.pupil_l_ind.append(np.concatenate(p_l))
        #     self.pupil_h_ind.append(np.concatenate(p_h))
        # self.pupil_l_ind = np.concatenate(self.pupil_l_ind)
        # self.pupil_h_ind = np.concatenate(self.pupil_h_ind)
        # self.pupil_rest_ind = -(self.pupil_h_ind + self.pupil_l_ind)

        # initialize behavior operator:
        d = {
            'subj_idx': pd.Series(self.subj_idx),
            'choice_a': pd.Series(np.array(self.yes, dtype=int)),
            'stimulus': pd.Series(np.array(self.present, dtype=int)),
            'rt': pd.Series(np.array(self.rt)) / 1000.0,
            'pupil_b': pd.Series(np.array(self.pupil_b)),
            'pupil_d': pd.Series(np.array(self.pupil_d)),
            'pupil_t': pd.Series(np.array(self.pupil_t)),
            'pupil_high': pd.Series(self.pupil_h_ind),
            'run': pd.Series(np.array(self.run, dtype=int)),
            'session': pd.Series(np.array(self.session, dtype=int)),
        }
        self.df = pd.DataFrame(d)
        self.behavior = myfuncs.behavior(self.df)
Пример #9
0
    def compute_omission_indices(self):
        """
        Here we're going to determine which trials should be counted as omissions due to
        (i) fixation errors (in decision interval):
                ->gaze of 150px or more away from fixation
                ->10 percent of time gaze of 75px or more away from fixation.
        (ii) blinks (in window 0.5s before decision interval till 0.5s after decision interval)
        (iii) too long (>3000ms, >2500ms) or too short (<250ms) RT
        (iv) first two trials
        (v) indicated by subject (only in exp 1)
        """

        self.omission_indices_answer = (self.rt > 4000.0)
        # self.omission_indices_answer = (self.rt > 4000.0) * (np.array(self.parameters.answer == 0))

        self.omission_indices_sac = np.zeros(self.nr_trials, dtype=bool)
        self.omission_indices_blinks = np.zeros(self.nr_trials, dtype=bool)
        if self.artifact_rejection == 'strict':
            # based on sacs:
            middle_x = 0
            middle_y = 0
            cut_off = 75
            x_matrix = []
            y_matrix = []
            for t in range(self.nr_trials):
                try:
                    indices = (self.time > self.cue_times[t]) * (
                        self.time < self.choice_times[t])
                except:
                    shell()
                x = self.gaze_x[indices]
                x = x - bn.nanmean(x)
                y = self.gaze_y[indices]
                y = y - bn.nanmean(y)
                if (x < -175).sum() > 0 or (x > 175).sum() > 0:
                    self.omission_indices_sac[t] = True
                if (y < -175).sum() > 0 or (y > 175).sum() > 0:
                    self.omission_indices_sac[t] = True
                if ((x > middle_x + cut_off).sum() +
                    (x < middle_x - cut_off).sum()) / float(
                        self.rt[t]) * 100 > 10:
                    self.omission_indices_sac[t] = True
                if ((y > middle_y + cut_off).sum() +
                    (y < middle_y - cut_off).sum()) / float(
                        self.rt[t]) * 100 > 10:
                    self.omission_indices_sac[t] = True
            # based on blinks:
            for t in range(self.nr_trials):
                if sum((self.blink_start_times > self.cue_times[t]) *
                       (self.blink_end_times < self.choice_times[t])) > 0:
                    self.omission_indices_blinks[t] = True

        self.omission_indices_rt = np.zeros(self.nr_trials, dtype=bool)
        for t in range(self.nr_trials):
            if self.rt[t] < 250:
                self.omission_indices_rt[t] = True

        self.omission_indices_first = np.zeros(self.nr_trials, dtype=bool)
        # self.omission_indices_first[0] = True
        if self.experiment == 1:
            self.omission_indices_subject = np.array(
                self.parameters['confidence'] == -1)
            self.omission_indices_subject = np.array(
                self.parameters['correct'] == -1)
        else:
            self.omission_indices_subject = np.zeros(self.nr_trials,
                                                     dtype=bool)
        self.omission_indices = self.omission_indices_answer + self.omission_indices_sac + self.omission_indices_blinks + self.omission_indices_rt + self.omission_indices_first + self.omission_indices_subject
Пример #10
0
                ecolor='k')

ax.set_ylabel('RT improvement (ms)', fontsize='12')
ax.set_xlabel('Reward Location', fontsize='12')
ax.yaxis.set_label_coords(-0.07,
                          0.5)  # -.1 on x-axis, .5 = halfway along y-axis
ax.xaxis.set_label_coords(0.45, -0.07)
ax.axhline(y=0, color='k')
ax.set_ylim(0, 65)
ax.set_xlim(0, 1.5)
ax.set_xticks([0.2, 0.65, 1.1])
ax.set_xticklabels(('High', 'Low', 'No'))
ax.legend((rects1[0], rects2[0]), ('90 degrees', '180 degrees'))
# Use following 2 lines to put significance marker between high and low reward location
ax.axhline(y=60, color='k', xmin=0.13, xmax=0.42)
ax.text(0.39, 60.5, '**', fontweight='bold')
ax.axhline(y=56, color='k', xmin=0.17, xmax=0.46)
ax.text(0.44, 56.5, '***', fontweight='bold')
ax.axhline(y=52, color='k', xmin=0.17, xmax=0.76)
ax.text(0.68, 52.5, '**', fontweight='bold')

os.chdir(
    '/Users/bronagh/Documents/Spatial_Reward_Toolbox/experiment_1/results/figs'
)
plt.savefig('Test_gain_groups.pdf')
plt.savefig('Test_gain_groups.png')
plt.show()
plt.close()

shell()
Пример #11
0
    def interpolate_blinks(self,
                           method='linear',
                           lin_interpolation_points=[[-200], [200]],
                           spline_interpolation_points=[[-0.15, -0.075],
                                                        [0.075, 0.15]],
                           coalesce_period=500):
        """
		interpolate_blinks interpolates blink periods with method, which can be spline or linear.
		Use after self.blink_detection_pupil().
		spline_interpolation_points is a 2 by X list detailing the data points around the blinks
		(in s offset from blink start and end) that should be used for fitting the interpolation spline.

		The results are stored in self.interpolated_pupil, self.interpolated_x and self.interpolated_y
		without affecting the self.raw_... variables

		After calling this method, additional interpolation may be performed by calling self.interpolate_blinks2()
		"""
        self.logger.info('Interpolating blinks using interpolate_blinks')
        # set all missing data to 0:
        self.raw_pupil[self.raw_pupil < 1] = 0

        # blinks to work with -- preferably eyelink!
        if hasattr(self, 'eyelink_blink_data'):
            for i in range(len(self.blink_starts_EL)):
                self.raw_pupil[self.blink_starts_EL[i]:self.blink_ends_EL[
                    i]] = 0  # set all eyelink-identified blinks to 0:
        else:
            self.blinks_indices = pd.rolling_mean(
                np.array(self.raw_pupil < threshold_level, dtype=float),
                int(coalesce_period)) > 0
            self.blinks_indices = np.array(self.blinks_indices, dtype=int)
            self.blink_starts = self.timepoints[:-1][np.diff(
                self.blinks_indices) == 1]
            self.blink_ends = self.timepoints[:-1][np.diff(self.blinks_indices)
                                                   == -1]
            # now make sure we're only looking at the blnks that fall fully inside the data stream
            try:
                if self.blink_starts[0] > self.blink_ends[0]:
                    self.blink_ends = self.blink_ends[1:]
                if self.blink_starts[-1] > self.blink_ends[-1]:
                    self.blink_starts = self.blink_starts[:-1]
            except:
                shell()

        # we do not want to start or end with a 0:
        import copy
        self.interpolated_pupil = copy.copy(self.raw_pupil[:])
        self.interpolated_x = copy.copy(self.raw_gaze_X)
        self.interpolated_y = copy.copy(self.raw_gaze_Y)
        self.interpolated_pupil[:coalesce_period * 2] = max(
            np.percentile(
                self.interpolated_pupil[:int(self.sample_rate * 2.5)], 90),
            np.percentile(self.interpolated_pupil, 50))
        self.interpolated_pupil[-coalesce_period:] = max(
            np.percentile(
                self.interpolated_pupil[-int(self.sample_rate * 2.5):], 90),
            np.percentile(self.interpolated_pupil, 50))
        self.interpolated_x[:coalesce_period * 2] = np.percentile(
            self.interpolated_x[:int(self.sample_rate * 2.5)], 50)
        self.interpolated_x[-coalesce_period:] = np.percentile(
            self.interpolated_x[-int(self.sample_rate * 2.5):], 50)
        self.interpolated_y[:coalesce_period * 2] = np.percentile(
            self.interpolated_y[:int(self.sample_rate * 2.5)], 50)
        self.interpolated_y[-coalesce_period:] = np.percentile(
            self.interpolated_y[-int(self.sample_rate * 2.5):], 50)

        # detect zero edges (we just created from blinks, plus missing data):
        zero_edges = np.arange(self.interpolated_pupil.shape[0])[np.diff(
            (self.interpolated_pupil < 1))]
        if zero_edges.shape[0] == 0:
            pass
        else:
            zero_edges = zero_edges[:int(2 * np.floor(zero_edges.shape[0] /
                                                      2.0))].reshape(-1, 2)

        self.blink_starts = zero_edges[:, 0]
        self.blink_ends = zero_edges[:, 1]

        # check for neighbouring blinks (coalesce_period, default is 500ms), and string them together:
        start_indices = np.ones(self.blink_starts.shape[0], dtype=bool)
        end_indices = np.ones(self.blink_ends.shape[0], dtype=bool)
        for i in range(self.blink_starts.shape[0]):
            try:
                if self.blink_starts[
                        i + 1] - self.blink_ends[i] <= coalesce_period:
                    start_indices[i + 1] = False
                    end_indices[i] = False
            except IndexError:
                pass

        # these are the blink start and end samples to work with:
        if sum(start_indices) > 0:
            self.blink_starts = self.blink_starts[start_indices]
            self.blink_ends = self.blink_ends[end_indices]
        else:
            self.blink_starts = None
            self.blink_ends = None
        self.blink_starts = self.blink_starts[
            self.blink_starts > coalesce_period]
        self.blink_ends = self.blink_ends[self.blink_starts > coalesce_period]

        # do actual interpolation:
        if method == 'spline':
            points_for_interpolation = np.array(
                np.array(spline_interpolation_points) * self.sample_rate,
                dtype=int)
            for bs, be in zip(self.blink_starts, self.blink_ends):
                samples = np.ravel(
                    np.array([
                        bs + points_for_interpolation[0],
                        be + points_for_interpolation[1]
                    ]))
                sample_indices = np.arange(self.raw_pupil.shape[0])[np.sum(
                    np.array([self.timepoints == s for s in samples]), axis=0)]
                spline = interpolate.InterpolatedUnivariateSpline(
                    sample_indices, self.raw_pupil[sample_indices])
                self.interpolated_pupil[
                    sample_indices[0]:sample_indices[-1]] = spline(
                        np.arange(sample_indices[1], sample_indices[-2]))
                spline = interpolate.InterpolatedUnivariateSpline(
                    sample_indices, self.raw_gaze_X[sample_indices])
                self.interpolated_x[
                    sample_indices[0]:sample_indices[-1]] = spline(
                        np.arange(sample_indices[1], sample_indices[-2]))
                spline = interpolate.InterpolatedUnivariateSpline(
                    sample_indices, self.raw_gaze_Y[sample_indices])
                self.interpolated_y[
                    sample_indices[0]:sample_indices[-1]] = spline(
                        np.arange(sample_indices[1], sample_indices[-2]))
        elif method == 'linear':
            if self.blink_starts != None:
                points_for_interpolation = np.array(
                    [self.blink_starts, self.blink_ends],
                    dtype=int).T + np.array(lin_interpolation_points).T
                for itp in points_for_interpolation:
                    self.interpolated_pupil[itp[0]:itp[-1]] = np.linspace(
                        self.interpolated_pupil[itp[0]],
                        self.interpolated_pupil[itp[-1]], itp[-1] - itp[0])
                    self.interpolated_x[itp[0]:itp[-1]] = np.linspace(
                        self.interpolated_x[itp[0]],
                        self.interpolated_x[itp[-1]], itp[-1] - itp[0])
                    self.interpolated_y[itp[0]:itp[-1]] = np.linspace(
                        self.interpolated_y[itp[0]],
                        self.interpolated_y[itp[-1]], itp[-1] - itp[0])
Пример #12
0
def main():
    cli_args = docopt(__doc__, version="1.0")
    args = dict(cli_args)

    log = Logger()
    models = [
        chib1s_mc_model.ChibMCModel,
        chib2s_mc_model.ChibMCModel,
        chib3s_mc_model.ChibMCModel,
    ]
    tuples_cfg = tools.load_config("tuples")
    mc_cfg = tools.load_config("mc")
    mcfits_cfg = tools.load_config("mcfits")  # binning

    tree = ROOT.TChain("ChibAlg/Chib")
    utree = ROOT.TChain("UpsilonAlg/Upsilon")

    for filename in tuples_cfg[args["--data"]]:
        tree.Add(filename)
        utree.Add(filename)

    # TODO: create arrays with respect to ns
    if not args["--ns"]:
        ns_arr = [1, 2, 3]
    else:
        ns_arr = [int(args["--ns"])]

    if not args["--np"]:
        np_arr = [1, 2, 3]
    else:
        np_arr = [int(args["--np"])]
    if not args["--nb"]:
        nb_arr = [1, 2]
    else:
        nb_arr = [int(args["--nb"])]

    for ns in ns_arr:
        decay_cfg = mc_cfg["decays"]["ups%ds" % ns]
        for np in np_arr:
            for nb in nb_arr:
                if (ns == 2 and np == 1) or (ns == 3 and np < 3):
                    continue
                axis = mcfits_cfg["axis"]["ups%ds" % ns]
                process(
                    output_db=mcfits_cfg["output_db"],
                    output_figs=mcfits_cfg["output_figs"],
                    data_key=args["--data"],
                    tree=tree,
                    models=models,
                    ns=ns,
                    np=np,
                    nb=nb,
                    cut=decay_cfg["cut"],
                    pt_axis=axis,
                    is_unbinned=mc_cfg["unbinned?"],
                    binning=decay_cfg["binning"]["%d" % np],
                    is_save=args["-s"]
                )
                if args["-u"]:
                    print("Count upsilons")
                    count_upsilons(
                        name=mc_cfg["name"], data_key=args["--data"],
                        tree=utree, ns=ns, nb=nb, np=np,
                        pt_axis=axis,
                        cut=decay_cfg["ucut"], is_save=args["-s"])

    if args['-i']:
        db = tools.get_db(mcfits_cfg["output_db"])
        print db.keys()
        shell()
Пример #13
0
def thin_arrays(num_points,
                max_points=None,
                arrs=[],
                maskInds=False,
                sector=None):

    if sector is not None:
        from IPython import embed as shell
        shell()

#    if not max_points:
#        thinvalue = 5
#        #conussizex = 1500.0
#        globalrez = 10.0
#        #Size Test determines the percentage of the sector pixel size to the CONUS pixel size
#        sizetest = float((resolution)/(globalrez))
#        percenttest = sizetest*100
#        #shell()
#
#
#        if percenttest <= 75:
#            thinvalue = int((thinvalue*sizetest)+2)
#            #sizez = (sizez*sizetest)+6
#        if percenttest > 105:
#            #thinvalue = 9
#            thinvalue = int((thinvalue*sizetest)*2)
#            #sizez = 4
#            #sizez = float((sizez)/(sizetest*9))
#            #sizez = (sizez*sizetest)
#            #bold = 1
#            #bold = float((bold)/(sizetest*3))
#
#    #shell()

    if max_points == None:
        return arrs

    thinval = 1
    if num_points > max_points:
        thinval = int(num_points / max_points)
    retarrs = []

    # If we are masking the supplied indices in place within the passed arrs,
    # thin as requested and mask the thinned values.
    if maskInds is not False:
        log.info(
            'Masking values to thin: orig {0} points, by thin value {1} to new {2} points'
            .format(num_points, thinval, max_points))
        maskInds = (maskInds[0][0:num_points:thinval],
                    maskInds[1][0:num_points:thinval])
        for arr in arrs:
            log.info('        Number unmasked before thinning {1}: {0}'.format(
                np.ma.count(arr), arr.name))
            arr.mask = True
            arr.mask[maskInds] = False
            log.info('        Number unmasked after thinning {1}: {0}'.format(
                np.ma.count(arr), arr.name))
        return arrs

    # Only want to return original array if maskInds was not passed
    if thinval == 1:
        return arrs

    # If we are returning a smaller array, thin and return smaller arrays
    try:
        for arr in arrs:
            newthinval = int(math.sqrt(thinval))
            retarrs += [arr[::newthinval, ::newthinval]]
            log.info(
                'Thinned 2D array {3}: orig {0} points, by thin value {1} to new {2} points'
                .format(num_points, newthinval, max_points, arr.name))

    except IndexError:
        for arr in arrs:
            log.info(
                'Thinning 2D array {3}: orig {0} points, by thin value {1} to new {2} points'
                .format(num_points, thinval, max_points, arr.name))
            retarrs += [arr[::thinval]]

    return retarrs
Пример #14
0
def main():
    cli_args = docopt(__doc__, version='v1.0')

    def fit(niters=1):
        for iter in range(niters):
            model.fitData()
            print(model)

            if cfg['save?']:
                save(cfg['name'], model, cli_args["--year"],
                     (int(cli_args["--ptbegin"]), int(cli_args["--ptend"])))

            if model.status:
                log.info("OK")
                break
            else:
                log.err("BAD")

    log = Logger()
    # cli_args = get_cli_args()
    if cli_args["--complete"]:
        complete()
        exit(0)
    tuples_cfg = tools.load_config("tuples")

    if cli_args["--year"] != "all":
        tuples = [tuples_cfg[cli_args["--year"]]]
    else:  # all
        tuples = [tuples_cfg[year] for year in ['2011', '2012']]
    log.info("Tuples: " + str(tuples))

    cfg = tools.load_config(cli_args["--decay"])
    cfg.update(cfg['profiles'].get(cli_args["--profile"], {}))
    del cfg["profiles"]

    tree = ROOT.TChain(cfg["tree"])
    for file_name in tuples:
        tree.Add(file_name)

    fitter = get_fitter(cli_args["--decay"])
    canvas = ROOT.TCanvas("c_fit",
                          "{year} {start}-{end} {name}".format(
                              year=cli_args["--year"],
                              start=cli_args["--ptbegin"],
                              end=cli_args["--ptend"],
                              name=cfg["name"])
                          )
    cut = cfg['cut']
    cut["pt_ups"] = (int(cli_args["--ptbegin"]), int(cli_args["--ptend"]))

    log.info("Cut: %s" % tools.cut_dict2str(cfg['cut']))

    # is_unbinned = (
    #     True if cfg["unbinned?"] and int(
    #         cli_args["--ptbegin"]) >= 10 else False
    # )
    
    is_unbinned = cfg["unbinned?"]
    if is_unbinned:
        data = source.dataset(tree=tree,
                              cut=cut,
                              field=cfg['field'],
                              has_splot=cfg['splot?'])
    else:
        data = source.histogram(tree=tree,
                                cut=cut,
                                field=cfg['field'],
                                nbins=cfg['nbins'])

    # mc = None

    log.info("Profile:" + pprint.pformat(cfg, indent=2))
    model = fitter.prepare_model(
        canvas=canvas,
        data=data,
        year=cli_args['--year'],
        interval=cfg['cut'][cfg['field']],
        nbins=cfg['nbins'],
        name=cfg['name'],
        has_splot=cfg['splot?'],
        profile=cfg,
        pt_ups=cut["pt_ups"]
    )

    fit()

    if not model.status:
        fit()

    if cli_args["--interactive"] or not model.status:
        shell()
Пример #15
0
                            )
        # model.chib.sigma.fix(0.0205)
        cut = {"np": p, "nb": b, "dmplusm1s": [x1, x2]}
        for bin in binning:
            # model.user_labels = user_labels(bin[0], bin[1])
            cut["pt_ups"] = bin
            f = fit.Fit(model=model,
                        tuples=tuples,
                        cut=cut,
                        field="dm",
                        has_splot=True,
                        is_unbinned=cfg["is_unbinned"],
                        nbins=nbins
                        )
            db_key = "cb%d%d" % (b, p)
            f.process()
            # shell()
            is_good = f.run()
            print f.model
            if not is_good:
                print t.red("Bad fit:"), name
                shell()

            result[bin][db_key] = model.params()

            # model.save_image("figs/mc/fits/%s.pdf" % image_name)
            image_name = "%s_%d_%s.pdf" % (db_key, bin[0], str(bin[1]))
            utils.savemcfit(result, model.canvas, cfg["name"], image_name)

print result
Пример #16
0
def run(subj):
    # set paths
    folder = os.getcwd()
    filepath = os.path.join(folder,subj)# path to ascii fil
    # include also csv file, for color info
    out_path = '/home/ede/projects/TwoDimensionsTrace/data/eye_csv'
    csv_path = '/home/ede/projects/TwoDimensionsTrace/data/os_csv/'+subj[:-3] + 'csv'
    # create the output folder if necessary
    if not os.path.exists(out_path):
        os.makedirs(out_path)  
    writepath = os.path.join(out_path,subj[:-4]+'_edf.csv')

    print "start processing", subj
    shell()
    # read out asc file
    eye_data= []
    f = open(filepath, 'r')
    for line in f:
        values= str2list(line)
        eye_data.append(values)
    shell()

    include_col = ['block_no', 'correct','order','stim_type','stim2_x',\
        'stim2_y','stim2_col','df','fixated_target',\
        'practice','stim1_x','stim1_y','stim1_col','stim2_col','stim3_col',\
        'stim3_x','stim3_y','stim3_ori','stim2_ori','stim1_ori','trial_duration','trial_no',\
        'subject_nr','mode','stim_on','prep','miss','target1','target2','sleep',\
        's1_color','s1_ori','s2_color','s2_ori','s3_color','s3_ori','s4_color','s4_ori']
    
    # read in the csv file, but include only one relevant column
    csv_df = pd.read_csv(csv_path,usecols = include_col)

    # initialize variables necessary to keep track of trials/fixations/etc
    # while looping over eye data
    inTrial = False
    start_trial = ['Begin','search','canvas']# trigger of trial start
    stop_trial = ['circle' ,'fixated'] # message OpenSesame sent to eyetracker to indicate stop recording
    start_block = ['start_trial','start_block']
    trial_count = -1 
    line_count  = 0
    trials = []
    #shell()
    try:
        for idx,line in enumerate(eye_data):
            # skip not informative lines in ascii file
        
            if len(line)== 0:
                continue
    
            elif line[0]=='MSG':
                # first learn about the start and end of a trial
            
                if line[-1] in start_block:
                    previousTarget = None
                    switch_time = -9999999
                    no_rep = 1 
            
                elif line[-3:] == start_trial:
                    fix_count = 1
                    switch_interval = None
                    trial_count += 1
                    inTrial = True
                    multiple_sac = False
                    trial_start_time = line[1] # used to calculate eff.fix.dur
                    
                # end of trial
                elif line[-1] == stop_trial[-1]:
                    inTrial = False
    
        
            elif line[0]=='EFIX' and inTrial:
                fixatedTarget = csv_df['fixated_target'][trial_count]
                
                if fixatedTarget == None or previousTarget == None:
                    color_switch = None
                elif fixatedTarget == previousTarget:
                    color_switch = 0
                    no_rep += 1
                elif fixatedTarget != previousTarget:
                    color_switch = 1
                    no_rep = 0 
                    switch_interval = csv_df['stim_on'][trial_count]- switch_time
                    switch_time = csv_df['stim_on'][trial_count]
                    
                if fix_count >1:
                    multiple_sac = True
    
                trials.append({'subj':subj,'line_count':line_count,\
                'fix_no':fix_count,'begin_fix':line[2],'end_fix':line[3],\
                'fix_duration':line[4],'fix_x':line[5],'fix_y':line[6],
                'mult_sac':multiple_sac})
    
                if multiple_sac == True:            
                    trials[-2]['mult_sac'] = True    
    
                trials[-1]['target_category']= fixatedTarget
                trials[-1]['color_switch']= color_switch
                trials[-1]['no_rep']= no_rep
                trials[-1]['switch_interval']= switch_interval
                
                #add trial parameters to trial container, too
                columns = csv_df[:1].columns.values
                for key in columns:
                    try:
                        trials[-1][key]=csv_df[key][trial_count].replace(',',';')     
                    except:
                        trials[-1][key]=csv_df[key][trial_count] 
                # correct duration of first fixation in a trial
                if fix_count == 1:
                    trials[-1]["effective_fix_dur"]=line[3]-trial_start_time
                else:
                    trials[-1]["effective_fix_dur"]=line[4]
                 
                previousTarget = fixatedTarget
                line_count += 1
                fix_count += 1
                                
    except:
        pass
        #shell()
    
    # Write to file
    f = open(writepath, 'w')
    
    write2CSV(f, trials[0].keys())
    for trial in trials:
        write2CSV(f, trial.values())
        #print trial.values()
        #shell()
    f.close()
    print 'subjected completed successfully'
def run_archer(xarray_obj, varname):
    ''' Run archer on the variable varname found in the xarray_obj'''
    KtoC_conversion = -273.15
    if varname in ['tb89h', 'tb89v', 'tb89hA', 'tb89hB', 'tb89vA', 'tb89vB', 'tb85h', 'tb85v', 'tb91h', 'tb91v']:
        archer_channel_type = '89GHz'
    elif varname in ['tb37h', 'tb37v', 'tb36h', 'tb36v']:
        archer_channel_type = '37GHz'
    elif 'BT' in varname:
        archer_channel_type = 'IR'
    elif 'Ref' in varname:
        archer_channel_type = 'Vis'
    else:
        LOG.warning('Unsupported sensor %s / channel %s type for ARCHER, returning without recentering',
                    xarray_obj.source_name, varname)
        return {}, {}, {}
    image = {}
    attrib = {}
    first_guess = {}
    attrib['archer_channel_type'] = archer_channel_type
    # This is currently unused in ARCHER, so just use GeoIPS platform names for attrib['sat']
    attrib['sat'] = xarray_obj.platform_name
    out_fname = 'archer_test_{0}_{1}_{2}.png'.format(xarray_obj.platform_name, xarray_obj.source_name,
                                                     archer_channel_type)
    image['lat_grid'] = xarray_obj['latitude'].to_masked_array()
    image['lon_grid'] = xarray_obj['longitude'].to_masked_array()
    image['data_grid'] = xarray_obj[varname].to_masked_array()
    import numpy
    num_masked = numpy.ma.count_masked(image['data_grid'])
    if num_masked > 0:
        LOG.warning('There are %s masked values in array of size %s, not attempting to run ARCHER', num_masked,
                    image['data_grid'].size)
        from IPython import embed as shell; shell()
        return {}, {}, {}
    # image['time_arr'] = Does not exist

    if 'BT' in varname:
        if xarray_obj[varname].units == 'celsius':
            image['data_grid'] = xarray_obj[varname].to_masked_array() - KtoC_conversion

    if xarray_obj.platform_name == 'himawari8':
        attrib['sensor'] = 'Imager'
        attrib['scan_type'] = 'Geo'
        attrib['nadir_lon'] = 140.7
    if xarray_obj.source_name == 'ssmis':
        attrib['sensor'] = 'SSMIS'
        attrib['scan_type'] = 'Conical'
    if xarray_obj.source_name == 'ssmi':
        attrib['sensor'] = 'SSMI'
        attrib['scan_type'] = 'Conical'
    if xarray_obj.source_name == 'tmi':
        attrib['sensor'] = 'TMI'
        attrib['scan_type'] = 'Conical'
    if xarray_obj.source_name == 'amsre':
        attrib['sensor'] = 'AMSRE'
        attrib['scan_type'] = 'Conical'
    if xarray_obj.source_name == 'amsr2':
        attrib['sensor'] = 'AMSR2'
        attrib['scan_type'] = 'Conical'
    if xarray_obj.source_name == 'gmi':
        attrib['sensor'] = 'GMI'
        attrib['scan_type'] = 'Conical'
    if xarray_obj.source_name == 'amsub':
        attrib['sensor'] = 'AMSUB'
        attrib['scan_type'] = 'Crosstrack'
    if xarray_obj.source_name == 'mhs':
        attrib['sensor'] = 'MHS'
        attrib['scan_type'] = 'Crosstrack'
    if xarray_obj.source_name == 'atms':
        attrib['sensor'] = 'ATMS'
        attrib['scan_type'] = 'Crosstrack'

    import calendar
    # This is currently unused by ARCHER - but it should probably be best track, not fx if using deck files ?
    first_guess['source'] = 'fx'
    first_guess['time'] = calendar.timegm(xarray_obj.start_datetime.timetuple())
    first_guess['vmax'] = xarray_obj.area_def.sector_info['wind_speed']
    first_guess['lat'] = xarray_obj.area_def.sector_info['clat']
    first_guess['lon'] = xarray_obj.area_def.sector_info['clon']

    from archer.archer4 import archer4
    in_dict, out_dict, score_dict = \
        archer4(image, attrib, first_guess, para_fix=True, display_filename=out_fname)
    return in_dict, out_dict, score_dict
Пример #18
0
def pmw_mint(xarray_datasets, area_def, arg_dict=None):
    ''' Process xarray_dataset (xarray_datasets expected to be length 1 list) over area_def, with optional arg_dict.
    input xarray-based variables are defined in the readers with the GEOIPS2 framework

    Args:
        xarray_datasets (list) : list of xarray Dataset objects - for pmw_mint products, expect a length one list.
        area_def (AreaDefinition) : pyresample AreaDefinition specifying initial region to process.
        arg_dict (dict) : Dictionary of optional arguments (command_line_args are passed through)
    Returns:
        (list) : List of full paths to all products produed through pmw_mint processing
    '''

    LOG.info(arg_dict)
    final_products = []

    full_xarray = xarray_datasets[0]

    # DATASET_INFO is imported from readers.mint_ncdf - contains list of possible variables for each dataset
    for varname in DATASET_INFO[full_xarray.dataset_name]:

        if varname not in full_xarray.variables.keys():
            LOG.info('SKIPPING variable %s, not in current xarray object',
                     varname)
            continue

        # Interpolation radius of influence is set for each dataset separately in the mint_ncdf reader - adjust
        # in readers/mint_ncdf.py ROI_INFO dictionary
        # set_roi(full_xarray, varname)

        if area_def.sector_start_datetime:
            LOG.info('Trying to sector %s with dynamic time %s, %s points',
                     area_def.area_id, area_def.sector_start_datetime,
                     full_xarray['latitude'].size)
        else:
            LOG.info('Trying to sector %s, %s points', area_def.area_id,
                     full_xarray['latitude'].size)

        # Compile a list of variables that will be used to sector - the current data variable, and we will add in
        # the appropriate latitude and longitude variables (of the same shape as data), and if it exists the
        # appropriately shaped timestamp array
        vars_to_sect = [varname]  # create a new sect to list intended products

        # we have to have 'latitude','longitude" in the full_xarray, and 'timestamp' if we want temporal sectoring
        if 'latitude' in full_xarray.variables.keys():
            vars_to_sect += ['latitude']
        if 'longitude' in full_xarray.variables.keys():
            vars_to_sect += ['longitude']
        if 'timestamp' in full_xarray.variables.keys():
            vars_to_sect += ['timestamp']

        # I believe ARCHER can not have any masked data within the data grid, so create a separate smaller sector for
        # running archer.  The size of the "new" ARCHER sector could probably use some tweaking, though this worked
        # "out of the box" for my test case.
        # Probably in the end want to just run ARCHER first, get the new center, then create a new area_def with
        # the ARCHER center. and sector / register based on the ARCHER centered area_def. Ok, I'll just do that
        # really quickly.
        archer_area_def = set_atcf_area_def(area_def.sector_info,
                                            num_lines=500,
                                            num_samples=500,
                                            pixel_width=10000,
                                            pixel_height=10000)
        archer_xarray = sector_xarray_dataset(full_xarray, archer_area_def,
                                              vars_to_sect)

        try:
            from geoips2.sector_utils.atcf_tracks import run_archer
            in_dict, out_dict, score_dict = run_archer(archer_xarray, varname)
        except ValueError:
            from IPython import embed as shell
            shell()
            continue

        recentered_area_def = recenter_area_def(area_def, out_dict)

        # The list of variables in vars_to_sect must ALL be the same shape
        sect_xarray = sector_xarray_dataset(full_xarray, recentered_area_def,
                                            vars_to_sect)

        # numpy arrays fail if numpy_array is None, and xarrays fail if x_array == None
        if sect_xarray is None:
            LOG.info('No coverage - skipping')
            return final_products

        sect_xarray.attrs[
            'area_def'] = recentered_area_def  # add name of this sector to sector attribute
        if hasattr(sect_xarray, 'timestamp'):
            from geoips2.xarray_utils.timestamp import get_min_from_xarray_timestamp
            from geoips2.xarray_utils.timestamp import get_max_from_xarray_timestamp
            sect_xarray.attrs[
                'start_datetime'] = get_min_from_xarray_timestamp(
                    sect_xarray, 'timestamp')
            sect_xarray.attrs['end_datetime'] = get_max_from_xarray_timestamp(
                sect_xarray, 'timestamp')
            # Note:  need to test whether above two lines can reselect min and max time_info for this sector

        LOG.info('Sectored data start/end datetime: %s %s, %s points',
                 sect_xarray.start_datetime, sect_xarray.end_datetime,
                 numpy.ma.count(sect_xarray[varname].to_masked_array()))

        array_nums = [None]  # data points?
        if len(sect_xarray[varname].shape) == 3:
            array_nums = range(0, sect_xarray[varname].shape[2])

        for array_num in array_nums:
            # selection of an intepolation scheme

            from geoips2.xarray_utils.interpolation import interp_nearest
            try:
                [interp_data] = interp_nearest(recentered_area_def,
                                               sect_xarray,
                                               varlist=[varname],
                                               array_num=array_num)
            except ValueError:
                from IPython import embed as shell
                shell()
            final_products += plot_interp_data(interp_data, sect_xarray,
                                               recentered_area_def, varname)

            from geoips2.xarray_utils.interpolation import interp_scipy_grid
            interp_data = interp_scipy_grid(recentered_area_def,
                                            sect_xarray,
                                            varname,
                                            array_num=array_num,
                                            method='linear')
            prodname = '{0}_{1}_GriddataLinear'.format(sect_xarray.source_name,
                                                       varname)
            final_products += plot_interp_data(interp_data,
                                               sect_xarray,
                                               recentered_area_def,
                                               varname,
                                               product_name=prodname)

    return final_products
Пример #19
0
def main():
    cli_args = docopt(__doc__, version='v1.0')

    cfg_tuples = tools.load_config("tuples")
    cfg_pol = tools.load_config("polarization")
    chib_chain = ROOT.TChain("ChibAlg/Chib")
    ups_chain = ROOT.TChain("UpsilonAlg/Upsilon")
    cfg_decays = tools.load_config("mc")["decays"]
    # ups_chain = ROOT.TChain("UpsilonAlg/Upsilon")

    name = cli_args["--name"]
    for data_key in cfg_pol["data_keys"]:
        save_to = "{name}/{data_key}/".format(name=name, data_key=data_key)
        chib_chain.Reset()
        ups_chain.Reset()
        for ntuple_file in cfg_tuples[data_key]:
            print "NTuple ", ntuple_file
            chib_chain.Add(ntuple_file)
            ups_chain.Add(ntuple_file)

        for ns in cfg_pol["ns"]:
            cfg_cuts = cfg_decays["ups%ds" % ns]
            chib_cut = cfg_cuts["cut"]
            ups_cut = cfg_cuts["ucut"]
            # tools.tree_preselect(chib_chain, chib_cut)
            # tools.tree_preselect(ups_chain, ups_cut)
            for np in cfg_pol["np"]:
                if np not in pdg.VALID_UPS_DECAYS[ns]:
                    continue

                axis = cfg_pol["axis"]["ups%ds" % ns][str(np)]

                for nb in cfg_pol["nb"]:

                    d, dangles = process(ns=ns, nb=nb, np=np, chain=chib_chain,
                                         cut=chib_cut, axis=axis)
                    n, nangles = process(ns=ns, nb=nb, np=np, chain=ups_chain,
                                         cut=ups_cut, axis=axis)
                    # ref = d[3] // n[3]
                    res = []

                    for i in range(3):
                        if nb == 1 and i > 1:
                            continue
                        # h_old = (d[i] // n[i]) / ref
                        h = d[3].Clone(pyroot.hID())
                        for j in h:
                            r1 = (d[i][j] / d[3][j]).value()
                            r2 = (n[i][j] / n[3][j]).value()
                            s2d = ((d[i][j].error() ** 2 -
                                    d[i][j].value() ** 2 / d[3][j].value()) /
                                   (d[3][j] - 1) ** 2)
                            s2n = ((n[i][j].error() ** 2 -
                                    n[i][j].value() ** 2 / n[3][j].value()) /
                                   (n[3][j] - 1) ** 2)
                            h[j] = (
                                pyroot.VE(r1, s2d) /
                                pyroot.VE(r2, s2n)
                            )
                        h.red()
                        # h_old.blue()
                        res.append(h)

                        h.Draw()
                        h.level(1)
                        if cli_args['--save']:
                            tools.save_figure(
                                name=(save_to +
                                      "chib{nb}{np}p_ups{ns}s_w{w}_ratio"
                                      .format(nb=nb, np=np, ns=ns, w=i))
                            )
                    for angle in dangles:
                        hunpol = dangles[angle][3]
                        hunpol.scale()

                        for i in range(0, 3):
                            h = dangles[angle][i]
                            if not h.GetEntries():
                                continue
                            h.scale()
                            wname = "w%d" % i
                            tools.draw_hists([h, hunpol], minimum=0)
                            if cli_args['--save']:
                                tools.save_figure(
                                    save_to +
                                    "/angles/{wname}_{angle}_chib{nb}{np}p_ups{ns}s".format(
                                        wname=wname, angle=angle, nb=nb, np=np, ns=ns)
                                )
                    if cli_args['--save']:
                        save(data_key, ns, np, nb, res,  d, n)
            # chib_chain.SetEntryList(0)
            # ups_chain.SetEntryList(0)
    shell()
def drift_diffusion_hddm(data,
                         samples=10000,
                         n_jobs=6,
                         run=True,
                         parallel=True,
                         model_name='model',
                         model_dir='.',
                         accuracy_coding=False):

    import hddm
    import os

    # run the model:
    if run:
        if parallel:
            job_server = pp.Server(ppservers=(), ncpus=n_jobs)
            start_time = time.time()
            jobs = [(trace_id,
                     job_server.submit(run_model,
                                       (trace_id, data, model_dir, model_name,
                                        samples, accuracy_coding), (),
                                       ('hddm', )))
                    for trace_id in range(n_jobs)]
            results = []
            shell()
            for s, job in jobs:
                results.append(job())
            print "Time elapsed: ", time.time() - start_time, "s"
            job_server.print_stats()

            # save:
            for i in range(n_jobs):
                model = results[i]
                model.save(
                    os.path.join(model_dir, '{}_{}'.format(model_name, i)))
        else:
            start_time = time.time()
            model = run_model(3, data, model_dir, model_name, samples,
                              accuracy_coding)
            model.save(os.path.join(model_dir, '{}_md{}'.format(model_name,
                                                                3)))

            # print point estimates
            results = model.gen_stats()
            results.to_csv(os.path.join(fig_dir, 'diagnostics',
                                        'results3.csv'))

            # dic:
            text_file = open(os.path.join(fig_dir, 'diagnostics', 'DIC3.txt'),
                             'w')
            text_file.write("Model {}: {}\n".format(m, model.dic))
            text_file.close()
            print "Time elapsed: ", time.time() - start_time, "s"

    # load the models:
    else:
        print 'loading existing model(s)'
        if parallel:
            model = []
            for i in range(n_jobs):
                model.append(
                    hddm.load(
                        os.path.join(model_dir, '{}_{}'.format(model_name,
                                                               i))))
        else:
            model = hddm.load(
                os.path.join(model_dir, '{}_md{}'.format(model_name, 1)))
    return model
Пример #21
0
def do_simulations(params):
    rt = []
    response = []
    stimulus = []
    traces = []
    for stim in [1, 0]:

        # get traces:
        x = get_DDM_traces(
            v=params['v'],
            z=params['z'],
            dc=params['dc'],
            dc_slope=params['dc_slope'],
            sv=params['sv'],
            stim=stim,
            nr_trials=params['nr_trials'],
            tmax=tmax,
            dt=dt,
        )

        # get bounds:
        if params['bound'] == 'default':
            b1, b0 = _bounds(a=params['a'], tmax=tmax, dt=dt)
        elif params['bound'] == 'collapse_linear':
            b1, b0 = _bounds_collapse_linear(a=params['a'],
                                             c1=params['c1'],
                                             c0=params['c0'],
                                             tmax=tmax,
                                             dt=dt)
        elif params['bound'] == 'collapse_hyperbolic':
            b1, b0 = _bounds_collapse_hyperbolic(a=params['a'],
                                                 c=params['c'],
                                                 tmax=tmax,
                                                 dt=dt)

        # apply bounds:
        rt_dum, response_dum = apply_bounds_diff_trace(x=x, b1=b1, b0=b0)

        # store results:
        rt.append((rt_dum * dt) + ndt)
        response.append(response_dum)
        stimulus.append(np.ones(params['nr_trials']) * stim)
        traces.append(x)

    df = pd.DataFrame()
    df.loc[:, 'rt'] = np.concatenate(rt)
    df.loc[:, 'response'] = np.concatenate(response)
    df.loc[:, 'stimulus'] = np.concatenate(stimulus)
    df.loc[:, 'correct'] = np.array(
        np.concatenate(stimulus) == np.concatenate(response), dtype=int)
    df.loc[:, 'subj_idx'] = params['subj_idx']
    df.to_csv(os.path.join(data_folder,
                           'df_{}.csv'.format(params['subj_idx'])))

    traces = np.vstack(traces)
    for i in range(traces.shape[0]):
        if sum(traces[i, :] > params['a']) > 0:
            traces[i,
                   np.where(traces[i, :] > params['a'])[0][0]:] = params['a']
        if sum(traces[i, :] < 0) > 0:
            traces[i, np.where(traces[i, :] < 0)[0][0]:] = 0

    hit = np.array((df['stimulus'] == 1) & (df['response'] == 1))
    fa = np.array((df['stimulus'] == 0) & (df['response'] == 1))
    miss = np.array((df['stimulus'] == 1) & (df['response'] == 0))
    cr = np.array((df['stimulus'] == 0) & (df['response'] == 0))

    shell()

    fig = plt.figure(figsize=(2, 2))

    for t in traces[hit, :][0:5000]:
        plt.plot(t, alpha=0.005, lw=1, color='black')
    # for t in traces[fa,:][0:500]:
    #     plt.plot(t, alpha=0.02, color='orange')
    # for t in traces[miss,:][0:500]:
    #     plt.plot(t, alpha=0.02, color='green')
    # for t in traces[cr,:][0:500]:
    #     plt.plot(t, alpha=0.02, color='green')

    for trial, color, alpha in zip([hit, fa, miss, cr],
                                   ['orange', 'orange', 'green', 'green'],
                                   [1, 0.5, 0.5, 1]):
        y = np.nanmean(traces[trial, :], axis=0)
        x = np.arange(y.shape[0])
        ind = np.zeros(y.shape[0], dtype=bool)
        ind[0:20] = True
        (m, b) = sp.polyfit(x[ind], y[ind], 1)
        regression_line = sp.polyval([m, b], x)
        plt.plot(y, color=color, lw=2, alpha=alpha)
        plt.plot(x[ind], regression_line[ind], color='black', lw=1, ls='--')
        print(m)
    plt.axhline(0, color='k', lw=0.5)
    plt.axhline(params['a'], color='k', lw=0.5)
    plt.axhline(params['z'], color='k', lw=0.5)
    plt.xlim(0, 30)
    plt.ylim(-0.1, params['a'] + 0.1)
    plt.xlabel('Timesteps')
    plt.ylabel('Decision variable')
    sns.despine()
    plt.tight_layout()
    fig.savefig(
        os.path.join(fig_folder,
                     'simulate_slopes_{}.pdf'.format(params['subj_idx'])))
Пример #22
0
    def process(self, sdoc):
        un_number = sdoc.property(UN_NUMBER)
        doc = None
        if un_number:
            try:
                doc = dm.Document.objects.get(un_number=un_number)
            except ObjectDoesNotExist:
                pass

        if not doc:
            doc = dm.Document(mfiles_id=sdoc.mfiles_id)
            doc.un_number = un_number
            doc.date_publication = sdoc.created
            doc.date_last_update = sdoc.modified
            doc.vault = sdoc.vault.name
            doc.period_start = sdoc.created
            doc.period_end = sdoc.modified

            players = sdoc.property(PLAYER)
            if players:
                countries = ', '.join([pycountry.countries.get(
                    name=p).alpha2 for p in players if p in pycountry.countries.indices['name']])
                authors = ', '.join(
                    [p for p in players if p not in pycountry.countries.indices['name']])

                doc.country = countries
                if authors:
                    doc.author = authors
            try:
                doc.save()
            except Exception:
                self.stderr.write("->" + str(doc) + str(sdoc))
                shell()
                return

            dm.Term.objects.bulk_create(
                [dm.Term(doc=doc, value=term)
                 for term in sdoc.property(TERMS, as_list=True)
                 ]
            )

            dm.Program.objects.bulk_create(
                [dm.Program(doc=doc, value=term)
                 for term in sdoc.property(PROGRAMMES, as_list=True)
                 ]
            )

            dm.Chemical.objects.bulk_create(
                [dm.Program(doc=doc, value=term)
                 for term in sdoc.property(CHEMICALS, as_list=True)
                 ]
            )

            classes = set()
            for cl in CLASSES:
                classes.update(sdoc.property(cl, as_list=True))

            dm.DocType.objects.bulk_create(
                [dm.DocType(doc=doc, value=term)
                 for term in classes
                 ]
            )

        self.process_lang_and_files(sdoc=sdoc, doc=doc)