Exemplo n.º 1
0
 def split_files(files, n):
     from pylab import split, array
     rest = len(files) % n
     if rest:
         super_list = split(array(files)[:-rest], len(files) // n)
         super_list.append(files[-rest:])
     else:
         super_list = split(array(files), len(files) // n)
     return super_list
Exemplo n.º 2
0
 def split_files(files,n):
     from pylab import split, array
     rest = len(files)%n
     if rest:
         super_list = split(array(files)[:-rest],len(files)/n)
         super_list.append(files[-rest:])
     else:
         super_list = split(array(files),len(files)/n)
     return super_list
Exemplo n.º 3
0
def getLowDimensionalSegments(highDimensionalData,n_components=2,plt=False,title="Latent space segments"):
	(lowDimensionalData,explainedVariance) = pca.pca(highDimensionalData,n_components)
	(mins,maxs) = segment.segmentationPoints(lowDimensionalData[:,0])
	segments = pl.split(lowDimensionalData,maxs)[1:-1]
	if plt:
		plot.plotGridOf2Ds(segments,title)
	return (segments,explainedVariance)
Exemplo n.º 4
0
def normal_drawer(plane_normal, plane_seed):
    print( " Drawing plane Normals")
    for plane in range(len(plane_normal)):

        pl= plane_seed[plane]
        pl=pl[pl.find('(') :]
        pl=pl[pl.find('[')+1 :]
        pl=pl[: pl.find(']')]
        pt=pl.split(',')
        #print(pt)
        p0x=float(pt[0])
        p0y=float(pt[1])
        p0z=float(pt[2])
        
        pn=plane_normal[plane]
        p0 = [p0x, p0y, p0z]
        p1 = [p0x+pn[0], p0y+pn[1], p0z+pn[2]]


        # Create a vtkPoints object and store the points in it
        points = vtk.vtkPoints()
        #points.InsertNextPoint(origin)
        points.InsertNextPoint(p0)
        points.InsertNextPoint(p1)

        # Create a cell array to store the lines in and add the lines to it
        lines = vtk.vtkCellArray()

        line = vtk.vtkLine()
        line.GetPointIds().SetId(0,0)
        line.GetPointIds().SetId(1,1)
        lines.InsertNextCell(line)

        # Create a polydata to store everything in
        linesPolyData = vtk.vtkPolyData()
 
        # Add the points to the dataset
        linesPolyData.SetPoints(points)
 
        # Add the lines to the dataset
        linesPolyData.SetLines(lines)
 
        # Setup actor and mapper
        mapper = vtk.vtkPolyDataMapper()
        if vtk.VTK_MAJOR_VERSION <= 5:
            mapper.SetInput(linesPolyData)
        else:
            mapper.SetInputData(linesPolyData)
 
        actor = vtk.vtkActor()
        actor.SetMapper(mapper)
        actor.GetProperty().SetColor(1,0,0)
        renderer.AddActor(actor)
def normal_drawer(plane_normal, plane_seed):
    print(" Drawing plane Normals")
    for plane in range(len(plane_normal)):

        pl = plane_seed[plane]
        pl = pl[pl.find('('):]
        pl = pl[pl.find('[') + 1:]
        pl = pl[:pl.find(']')]
        pt = pl.split(',')
        #print(pt)
        p0x = float(pt[0])
        p0y = float(pt[1])
        p0z = float(pt[2])

        pn = plane_normal[plane]
        p0 = [p0x, p0y, p0z]
        p1 = [p0x + pn[0], p0y + pn[1], p0z + pn[2]]

        # Create a vtkPoints object and store the points in it
        points = vtk.vtkPoints()
        #points.InsertNextPoint(origin)
        points.InsertNextPoint(p0)
        points.InsertNextPoint(p1)

        # Create a cell array to store the lines in and add the lines to it
        lines = vtk.vtkCellArray()

        line = vtk.vtkLine()
        line.GetPointIds().SetId(0, 0)
        line.GetPointIds().SetId(1, 1)
        lines.InsertNextCell(line)

        # Create a polydata to store everything in
        linesPolyData = vtk.vtkPolyData()

        # Add the points to the dataset
        linesPolyData.SetPoints(points)

        # Add the lines to the dataset
        linesPolyData.SetLines(lines)

        # Setup actor and mapper
        mapper = vtk.vtkPolyDataMapper()
        if vtk.VTK_MAJOR_VERSION <= 5:
            mapper.SetInput(linesPolyData)
        else:
            mapper.SetInputData(linesPolyData)

        actor = vtk.vtkActor()
        actor.SetMapper(mapper)
        actor.GetProperty().SetColor(1, 0, 0)
        renderer.AddActor(actor)
Exemplo n.º 6
0
def maxima(data, local_max=True):
    """
    :param data: finite difference data to determine local maxima and minima
    :param peak: Set to True for local maxima, False for local minima
    :return: Time and Amplitude arrays specifying the locations of max/min
    """
    thres = max(data) / 2.0

    if local_max:
        I = find(data > thres)
    else:
        I = find(data < -thres)     # local minima
    #print("I")
    #print I
    # Peak detections
    #test = np.diff(I)
    J = find(np.diff(I) > 1)
    #print("J")
    #print J

    xpeak_index = []
    ypeak_loc = []
    #Start_Time_idx = []

    for K in split(I, J+1):
        ytag = data[K]
        if local_max:
            peak = find(ytag == max(ytag))
        else:
            peak = find(ytag == min(ytag))
        #print("ytag %s, peak %s", ytag, peak)
        xpeak_index.append(peak[0] + K[0]+ 2)      # need to add 2 to start time to correct for offset b/c of second diff
        ypeak_loc.append(ytag[peak[0]])

        #dftime = df_raw["Start_Time"][peak[0]+K[0]]
        #Start_Time_idx.append(dftime)

    return xpeak_index, ypeak_loc
Exemplo n.º 7
0
Arquivo: first.py Projeto: ttm/ipb
import networkx as x, pylab as p

with open('../data/first.txt', 'r') as f:
    ppl = f.read().split('\n')[:-1]

nodes = {
    'teachers': [],
    'who': '',
    'students': []
}

for p in ppl:
    p_ = p.split(' ')
    quality = int(p_[0])
    name = ' '.join(p_[1:])
    if quality > 0:
        nodes['teachers'].append(name)
    elif quality == 0:
        nodes['who'] = name
    else:
        nodes['students'].append(name)

g = x.DiGraph()

for t in nodes['teachers']:
    g.add_edge(t, nodes['who'])

for s in nodes['students']:
    g.add_edge(nodes['who'], s)

x.draw_networkx(g)
Exemplo n.º 8
0
import argparse

parser = argparse.ArgumentParser(
    description='Plot orbit data on x-y, y-z, x-z plane')
parser.add_argument('filename', type=str)
parser.add_argument('-s',
                    '--scale',
                    type=float,
                    help='scale length',
                    default=1.)
args = parser.parse_args()

d = pl.loadtxt(args.filename).T

fig, axs = pl.subplots(2, 2)
for p in pl.split(d, d.shape[0] / 9):
    x, y, z, vx, vy, vz, ax, ay, az = p
    x /= args.scale
    y /= args.scale
    z /= args.scale
    axs[0, 0].plot(x[::], y[::], '.')
    axs[0, 1].plot(x, z, ',-')
    axs[1, 0].plot(y, z, ',-')

axs[0, 0].set_xlabel('x')
axs[0, 0].set_ylabel('y')

axs[0, 1].set_xlabel('x')
axs[0, 1].set_ylabel('z')

axs[1, 0].set_xlabel('y')
Exemplo n.º 9
0
    def get_kin_from_idict(self, sid, tid, idict, cachedir='./cache'):
        """
        This function is a wrapper for get_kin_at_phase, to compute the
        kinematic state at the apices which are mentioned in "idict" (idict
        stems from a stored SLIP data file).

        e.g.::

            import mutils.io
            import subjData 
            sd = subjData.sdata()
            _, _, _, _, idict = mutils.io.get_data(sid, tid, ... )
            kin = sd.get_kin_from_phase(sid, tid, idict)

        ''''''''''
        Parameter:
        ''''''''''
        sid : *integer* 
            subject id
        tid : *integer*
            trial-type id
        idict : *dictionary* (special format required, see description above)
            the 'information dictionary' that contains information about at
            which phases to get the data

        ''''''''
        Returns:
        ''''''''        
        dat_l : *list*
            a list of arrays that contain the states at a left apex, defined 
            by '*self*.selection', at the phases defined in idict.
        dat_r : *list*
            same as dat_l, only for right apices

        """

        # look for cached files
        cindexname = cachedir + os.sep + 'cachefiles.dict'
        try:
            mfile = gzip.open(cindexname, mode='rb')
            cfiles = pickle.load(mfile)
            mfile.close()
        except (IOError, EOFError, KeyError):
            print 'no cache index found'
            cfiles = {}

        # check marker list and subject Id and trial ID
        cfilefound = False
        for fname, fcontent in cfiles.iteritems():
            if (fcontent['selection'] == self.selection
                    and fcontent['sid'] == sid and fcontent['tid'] == tid
                    and fcontent['reps'] == idict['reps']):
                print 'cached file (', fname, ') found in index'
                dat_l, dat_r = mload(cachedir + os.sep + fname)
                cfilefound = True
                break

        if not cfilefound:
            print 'no cache file found - accessing database'
            # create list of phases
            splitpoints = cumsum(idict['n_in_ws'])[:-1]
            lop_l = split(idict['phases_l'], splitpoints)
            lop_r = split(idict['phases_r'], splitpoints)

            dat_l = self.get_kin_at_phase(sid,
                                          tid,
                                          lop_l,
                                          reps=idict['reps'],
                                          reloadData=True)
            dat_r = self.get_kin_at_phase(sid,
                                          tid,
                                          lop_r,
                                          reps=idict['reps'],
                                          reloadData=False)
            nfnames = os.listdir(cachedir)
            fid = uuid.uuid4().hex
            while fid in nfnames:
                fid = uuid.uuid4().hex
            print 'storing data in cache'
            msave(cachedir + os.sep + fid, [dat_l, dat_r])
            cfiles.update({
                fid: {
                    'sid': sid,
                    'tid': tid,
                    'reps': idict['reps'],
                    'selection': self.selection
                }
            })
            msave(cindexname, cfiles)

        return dat_l, dat_r
Exemplo n.º 10
0
def getQuaternionSegmentsByRawData(highDimensionalData,quaternionData):
	(lowDimensionalData,explainedVariance) = pca.pca(highDimensionalData,n_components=1)
	(mins,maxs) = segment.segmentationPoints(lowDimensionalData[:,0])
	segments = pl.split(quaternionData,maxs)[1:-1]
	return segments
Exemplo n.º 11
0
def getHighAndLowDimSegments(highDimensionalData, n_components=3, smoothingWindow=100):
	(lowDimensionalData,explainedVariance) = pca.pca(highDimensionalData,n_components)
	(mins,maxs) = segment.segmentationPoints(lowDimensionalData[:,0], windowSize=smoothingWindow)
	HDsegments = pl.split(highDimensionalData,maxs)[1:-1]
	LDsegments = pl.split(lowDimensionalData,maxs)[1:-1]
	return (HDsegments,LDsegments,explainedVariance)
Exemplo n.º 12
0
arg = xx.argsort()
xx = xx[arg]
yy = yy[arg]

# limit orb phase to [0.8,1.2]
lt = xx < 1.2
gt = xx > 0.8
xx = xx[lt*gt]
yy = yy[lt*gt]

# average lightcurve in N bins
N = 40

# try-except block takes away points at the end of the array if array cannot be split in N equal parts
try:
    xx = pl.average(pl.split(xx,N),1)
    yy = pl.average(pl.split(yy,N),1)
except:
    l = int(len(xx)/N)*N
    xx = pl.average(pl.split(xx[0:l],N),1)
    yy = pl.average(pl.split(yy[0:l],N),1)

# save the lightcurve
temp = []
temp.append(xx)
temp.append(yy)
pl.save('ave_lightcurve.dat',pl.array(temp).transpose())

pl.plot(xx,yy,'.')
pl.show()
Exemplo n.º 13
0
pp = pp[lt*gt]
aa = aa[lt*gt]
sa = sa[lt*gt]
sp = sp[lt*gt]


#print xx,aa,pp,sa,sp 



# average lightcurve in N bins
N = 20

# try-except block takes away points at the end of the array if array cannot be split in N equal parts
try:
    xx = pl.average(pl.split(xx,N),1)
    pp = pl.average(pl.split(pp,N),1)
    aa = pl.average(pl.split(aa,N),1)
    sp = pl.average(pl.split(sp**2,N),1)**0.5
    sa = pl.average(pl.split(sa**2,N),1)**0.5
    
    
except:
    
    l = int(len(xx)/N)*N
    dl = len(xx)-l
    print 'Dropped %s of %s points' % (dl,len(xx))
    xx = pl.average(pl.split(xx[dl/2:-dl/2],N),1)
    pp = pl.average(pl.split(pp[dl/2:-dl/2],N),1)
    aa = pl.average(pl.split(aa[dl/2:-dl/2],N),1)
    sp = pl.average(pl.split(sp[dl/2:-dl/2]**2,N),1)**0.5
Exemplo n.º 14
0
    def get_kin_from_idict(self, sid, tid, idict, cachedir='./cache'):
        """
        This function is a wrapper for get_kin_at_phase, to compute the
        kinematic state at the apices which are mentioned in "idict" (idict
        stems from a stored SLIP data file).

        e.g.::

            import mutils.io
            import subjData 
            sd = subjData.sdata()
            _, _, _, _, idict = mutils.io.get_data(sid, tid, ... )
            kin = sd.get_kin_from_phase(sid, tid, idict)

        ''''''''''
        Parameter:
        ''''''''''
        sid : *integer* 
            subject id
        tid : *integer*
            trial-type id
        idict : *dictionary* (special format required, see description above)
            the 'information dictionary' that contains information about at
            which phases to get the data

        ''''''''
        Returns:
        ''''''''        
        dat_l : *list*
            a list of arrays that contain the states at a left apex, defined 
            by '*self*.selection', at the phases defined in idict.
        dat_r : *list*
            same as dat_l, only for right apices

        """

        # look for cached files
        cindexname = cachedir + os.sep + 'cachefiles.dict'
        try:
            mfile = gzip.open(cindexname, mode='rb')
            cfiles = pickle.load(mfile)
            mfile.close()
        except (IOError, EOFError, KeyError):
            print 'no cache index found'
            cfiles = {}

        # check marker list and subject Id and trial ID
        cfilefound = False
        for fname, fcontent in cfiles.iteritems():
            if (fcontent['selection'] == self.selection and
                    fcontent['sid'] == sid and
                    fcontent['tid'] == tid and
                    fcontent['reps'] == idict['reps']):
                print 'cached file (', fname, ') found in index'
                dat_l, dat_r = mload(cachedir + os.sep + fname)
                cfilefound = True
                break

        if not cfilefound:
            print 'no cache file found - accessing database'
            # create list of phases
            splitpoints = cumsum(idict['n_in_ws'])[:-1]
            lop_l = split(idict['phases_l'], splitpoints)
            lop_r = split(idict['phases_r'], splitpoints)
            
            dat_l = self.get_kin_at_phase(sid, tid, lop_l, reps=idict['reps'], 
                    reloadData=True)
            dat_r = self.get_kin_at_phase(sid, tid, lop_r, reps=idict['reps'], 
                    reloadData=False)
            nfnames = os.listdir(cachedir)
            fid = uuid.uuid4().hex
            while fid in nfnames:
                fid = uuid.uuid4().hex
            print 'storing data in cache'
            msave(cachedir + os.sep + fid, [dat_l, dat_r])
            cfiles.update({ fid : {'sid' : sid, 'tid' : tid, 'reps' :
                idict['reps'], 'selection' : self.selection} })
            msave(cindexname, cfiles)

        return dat_l, dat_r
Exemplo n.º 15
0
    def _get_session_predictors(self, session):
        '''Calculate and return values of predictor variables for all trials in session.
        '''

        # Evaluate base (non-lagged) predictors from session events.

        choices, transitions_AB, second_steps, outcomes = ut.CTSO_unpack(session.CTSO, dtype = bool)
        trans_state = session.blocks['trial_trans_state']    # Trial by trial state of the tranistion matrix (A vs B)

        if self.mov_ave_CR:
            trans_mov_ave = np.zeros(len(choices))
            trans_mov_ave[1:] = (5./3.) * ut.exp_mov_ave(transitions_AB - 0.5, self.tau, 0.)[:-1] # Average of 0.5 for constant 0.8 transition prob.
            transitions_CR = 2 * (transitions_AB - 0.5) * trans_mov_ave
            transition_CR_x_outcome = 2. * transitions_CR * (outcomes - 0.5) 
            choices_0_mean = 2 * (choices - 0.5)
        else:  
            transitions_CR = transitions_AB == trans_state
            transition_CR_x_outcome = transitions_CR == outcomes 

        bp_values = {} 

        for p in self.base_predictors:

            if p == 'correct':  # 0.5, 0, -1 for high poke being correct, neutral, incorrect option.
                bp_values[p] = 0.5 * (session.blocks['trial_rew_state'] - 1) * \
                              (2 * session.blocks['trial_trans_state'] - 1)  
      
            elif p == 'side': # 0.5, -0.5 for left, right side reached at second step. 
                bp_values[p] = second_steps - 0.5

            elif p == 'side_x_out': # 0.5, -0.5.  Side predictor invered by trial outcome.
                bp_values[p] = (second_steps == outcomes) - 0.5

            # The following predictors all predict stay probability rather than high vs low.
            # e.g the outcome predictor represents the effect of outcome on stay probabilty.
            # This is implemented by inverting the predictor dependent on the choice made on the trial.

            elif p ==  'choice': # 0.5, - 0.5 for choices high, low.
                bp_values[p] = choices - 0.5

            elif p == 'good_side': # 0.5, 0, -0.5 for reaching good, neutral, bad second link state.
                bp_values[p] = 0.5 * (session.blocks['trial_rew_state'] - 1) * (2 * (second_steps == choices) - 1)
                    
            elif p ==  'outcome': # 0.5 , -0.5 for  rewarded , not rewarded.
                bp_values[p] = (outcomes == choices) - 0.5

            elif p ==  'block':     # 0.5, -0.5 for A , B blocks.
                bp_values[p] = (trans_state == choices) - 0.5

            elif p == 'block_x_out': # 0.5, -0.5 for A , B blocks inverted by trial outcome.
                bp_values[p] = ((outcomes == trans_state) == choices) - 0.5

            elif p ==  'trans_CR': # 0.5, -0.5 for common, rare transitions.     
                if self.mov_ave_CR:            
                    bp_values[p] = transitions_CR * choices_0_mean 
                else: 
                    bp_values[p] = ((transitions_CR) == choices)  - 0.5

            elif p == 'trCR_x_out': # 0.5, -0.5 for common, rare transitions inverted by trial outcome.
                if self.mov_ave_CR: 
                    bp_values[p] = transition_CR_x_outcome * choices_0_mean 
                else:
                    bp_values[p] = (transition_CR_x_outcome  == choices) - 0.5

            elif p ==  'trans_CR_rew': # 0.5, -0.5, for common, rare transitions on rewarded trials, otherwise 0.
                    if self.mov_ave_CR: 
                        bp_values[p] = transitions_CR * choices_0_mean * outcomes
                    else: 
                        bp_values[p] = (((transitions_CR) == choices)  - 0.5) * outcomes

            elif p ==  'trans_CR_non_rew': # 0.5, -0.5, for common, rare transitions on non-rewarded trials, otherwise 0.
                    if self.mov_ave_CR: 
                        bp_values[p] = transitions_CR * choices_0_mean * ~outcomes
                    else: 
                        bp_values[p] = (((transitions_CR) == choices)  - 0.5) * ~outcomes

        # predictor orthogonalization.

        if self.orth: 
            for A, B in self.orth: # Remove component of predictor A that is parrallel to predictor B. 
                bp_values[A] = bp_values[A] - ut.projection(bp_values[B], bp_values[A])

        # predictor normalization.
        if self.norm:
            for p in self.base_predictors:
                bp_values[p] = bp_values[p] * 0.5 / np.mean(np.abs(bp_values[p]))

        # Generate lagged predictors from base predictors.

        predictors = np.zeros([session.n_trials, self.n_predictors])

        for i,p in enumerate(self.predictors):  
            if '-' in p: # Get lag from predictor name.
                lag = int(p.split('-')[1]) 
                bp_name = p.split('-')[0]
            else:        # Use default lag.
                lag = 1
                bp_name = p
            predictors[lag:, i] = bp_values[bp_name][:-lag]

        return predictors