Пример #1
0
def test_parrec2nii_sets_qform_sform_code1(*args):
    # Check that set_sform(), set_qform() are called on the new header.
    parrec2nii.verbose.switch = False

    parrec2nii.io_orientation.return_value = [[0, 1],[1, 1],[2, 1]] # LAS+

    nimg = Mock()
    nhdr = MagicMock()
    nimg.header = nhdr
    parrec2nii.nifti1.Nifti1Image.return_value = nimg

    pr_img = Mock()
    pr_hdr = Mock()
    pr_hdr.get_data_scaling.return_value = (npa([]), npa([]))
    pr_hdr.get_bvals_bvecs.return_value = (None, None)
    pr_hdr.get_affine.return_value = AN_OLD_AFFINE
    pr_img.header = pr_hdr
    parrec2nii.pr.load.return_value = pr_img

    opts = Mock()
    opts.outdir = None
    opts.scaling = 'off'
    opts.minmax = [1, 1]
    opts.store_header = False
    opts.bvs = False
    opts.vol_info = False
    opts.dwell_time = False

    infile = 'nonexistent.PAR'
    parrec2nii.proc_file(infile, opts)
    nhdr.set_qform.assert_called_with(AN_OLD_AFFINE, code=1)
    nhdr.set_sform.assert_called_with(AN_OLD_AFFINE, code=1)
Пример #2
0
def cuatro_cps(espacio,velocidad):
    espacio=npa(espacio,dtype=np.float32)
    velocidad = npa(velocidad,dtype=np.float32)
    aceleracion = npa([0,0],dtype=np.float32)
    pt = np.arange(0,time,h)
    pe=[]
    pv=[]
    pa=[]
    for t in pt:
        x1=espacio[0]+d
        x2=espacio[0]-d
        x3=espacio[0]
        y=espacio[1]
        y2=y-b
        aceleracion[0]=-x1/(x1**2+y**2)**(1.5)-x2/(x2**2+y**2)**(1.5)-x3/(x3**2+y2**2)**(1.5)
        aceleracion[1]=-y/(x1**2+y**2)**(1.5)-y/(x2**2+y**2)**(1.5)-y2/(x3**2+y2**2)**(1.5)
        velocidad+=(aceleracion*h)
        espacio+=(velocidad*h)
        if(abs(espacio[0])>xlim):break
        if(abs(espacio[1])>ylim):break 
        if((x1**2+y**2)**0.5<=r):break
        if((x2**2+y**2)**0.5<=r):break
        if((x3**2+y2**2)**0.5<=r):break
        pe.append(espacio.tolist())
        pv.append(velocidad.tolist())
        pa.append(aceleracion.tolist())
    return npa(pe)
def get_evmax(xObs, yObs, domain, lenscale, sigvar=None, noisevar=None):
    """currently, nObs needs to be constant (i.e. xObs and yObs need to be
        rectangular 2-tensors of shape (nExp x nObs) )"""
    if not sigvar: sigvar=1.
    if not noisevar: noisevar=1e-7  # default noiseless
    assert len(xObs.shape) == 2, 'dim xObs must = 2 (nExp x nObs)'
    assert len(domain.shape) == 1, 'dim domain must = 1'

    nExp, nObs = xObs.shape  # unpack

    # get conditioned posteriors
    print 'lenscale: ' + str(lenscale)
    postmus = (conditioned_mu(domain, xObs[iexp], yObs[iexp],\
                              lenscale, sigvar, noisevar)
               for iexp in xrange(nExp))
    # get maxes of posteriors
    imaxes = []
    xmaxes = []
    fmaxes = []
    for postmu in postmus:
        imax = postmu.argmax()
        imaxes.append(imax)# nExp x 1
        xmaxes.append(domain[imax])  # nExp x 1
        fmaxes.append(postmu[imax])  # nExp x 1
    # add experiment metadata for call to get_ranked_dists
    imaxes = npa(imaxes)
    fmaxes = npa(fmaxes)
    lenscales = [lenscale] * nExp
    iExps = arange(nExp)

    return {'xmax': xmaxes,
            'fmax': fmaxes,
            'imax': imaxes,
            'lenscale': lenscales,
            'iExp': iExps}
Пример #4
0
def smooth(data, winwidth_in):
    """
    Note: only 'gauss' smoothing employed
    Original location: main/util/smooth.m
    """
    if winwidth_in < 1:
        return data
    if not isinstance(data, np.ndarray):
        data = npa(data)
    if not len(data.shape) == 1:
        raise ValueError('data is not a vector. Shape: ' + str(data.shape))
    paddata = npa(np.hstack(
        (data[0], data, data[-1])))  # pad to remove border errors
    winwidth = int(np.floor(winwidth_in / 2) *
                   2)  # force even winsize for odd window
    window = norm.pdf(np.arange(0, winwidth + 1), winwidth / 2 + 1,
                      winwidth / 8)
    window = window / np.sum(window)  # normalize window

    data_ext = np.hstack(
        ((np.zeros(winwidth // 2) + 1) * paddata[0], paddata,
         (np.zeros(winwidth // 2) + 1) * paddata[-1]
         ))  # extend data to reduce convolution error at beginning and end

    sdata_ext = convolve(data_ext, window)  # convolute with window
    sdata = sdata_ext[winwidth + 1:-winwidth - 1]  # cut to data length
    return sdata
Пример #5
0
def Esigmoid2prod(signs1, signs2, mu, cov):
    r'''
    Computes
        E[(prod_i s_i s'_i): (x_i)_i ~ N(mu, cov)]
    where
        s_i = 1 if `signs1[i] == 0` or else sigmoid(`signs1[i]` * x_i)
        s'_i = 1 if `signs2[i] == 0` or else sigmoid(`signs2[i]` * x_i)
        sigmoid(x) = 0.5 (1 + erf(x))
        sgn_i = `signs`[i]
    Inputs:
        signs1: a vector with entries from {-1, 0, 1}
        signs2: a vector with entries from {-1, 0, 1}
    '''
    # the lazy way:
    # duplicate cov to reduce to the case with no repeat variables.
    signs1 = npa(signs1)
    signs2 = npa(signs2)
    mu = npa(mu)
    cov = npa(cov)
    n = cov.shape[0]
    newcov = np.zeros([2 * n , 2 * n])
    newcov[:n, :n] = newcov[:n, n:] = newcov[n:, :n] = newcov[n:, n:] = cov
    newmu = np.concatenate([mu, mu], axis=0)
    signs = np.concatenate([signs1, signs2], axis=0)
    zero_idx = set(np.argwhere(signs==0).reshape(-1))
    nonzero_idx = list(set(list(range(0, 2*n))) - zero_idx)
    signs = signs[nonzero_idx]
    newmu = newmu[nonzero_idx]
    newcov = newcov[nonzero_idx, :][:, nonzero_idx]
    return Esigmoidprod(signs, newmu, newcov)
Пример #6
0
    def obj_func(self, cam):

        fx, fy = x0_int[0], x0_int[1]
        cx, cy = x0_int[2], x0_int[3]
        distCoeffs = (0.0, 0.0, 0.0, 0.0, 0.0)  ## hack no distortion
        tvec = cam[0:3]  # x,y,z
        rvec = cam[3:6]  # rodrigues

        # project
        #point2Ds_p = cv.project(point3Ds, cam)
        cameraMatrix = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])
        point2Ds_p, jacobian = cv2.projectPoints(
            npa(self.point3Ds, dtype=np.float), rvec, tvec, cameraMatrix,
            distCoeffs)
        #print point2Ds_p
        point2Ds_pp = [list(p[0]) for p in point2Ds_p]
        diff = npa(point2Ds_pp, dtype=np.float) - npa(self.point2Ds,
                                                      dtype=np.float)
        diff = diff.flatten(1)
        #import pdb;
        #pdb.set_trace()
        #print diff
        res = np.linalg.norm(diff)
        print res / len(self.point3Ds)
        return diff
Пример #7
0
def import_data(raw_data, srate, downsample=1):
    """
    Sets leda2 object to its appropriate values to allow analysis
    Adapted from main/import/import_data.m
    """
    if not isinstance(raw_data, np.ndarray):
        raw_data = npa(raw_data, dtype='float64')
    time_data = utils.genTimeVector(raw_data, srate)
    conductance_data = npa(raw_data, dtype='float64')
    if downsample > 1:
        (time_data, conductance_data) = utils.downsamp(time_data,
                                                       conductance_data,
                                                       downsample, 'mean')
    leda2.data.samplingrate = srate / downsample
    leda2.data.time_data = time_data
    leda2.data.conductance_data = conductance_data
    leda2.data.conductance_error = np.sqrt(
        np.mean(pow(np.diff(conductance_data), 2)) / 2)
    leda2.data.N = len(conductance_data)
    leda2.data.conductance_min = np.min(conductance_data)
    leda2.data.conductance_max = np.max(conductance_data)
    (leda2.data.conductance_smoothData,
     leda2.data.conductance_smoothData_win) = utils.smooth_adapt(
         conductance_data, srate, .00001)
    analyse.trough2peak_analysis()
Пример #8
0
def cgd_get_gradient(x, error0, error_fcn, h):
    """
    Original location: analyze/cg/cgd_get_gradient.m
    """
    Npars = len(x)
    gradient = np.zeros(Npars)

    for i in range(Npars):
        xc = npa(x)
        xc[i] = xc[i] + h[i]

        (error1, _0) = error_fcn(xc)

        if error1 < error0:
            gradient[i] = (error1 - error0)

        else:  # try opposite direction
            xc = npa(x)
            xc[i] = xc[i] - h[i]
            (error1, _0) = error_fcn(xc)

            if error1 < error0:
                gradient[i] = -(error1 - error0)

            else:
                gradient[i] = 0
    return gradient
Пример #9
0
def get_experimentError(dfs, lenscale):
    pdb.set_trace()
    # lenscale = dfs.LENSCALE.iat[0]
    sigvar = dfs.SIGVAR.iat[0]
    KDOMAIN = K_se(DOMAIN, DOMAIN, lenscale, sigvar)

    out = dfs.groupby('itrial').apply(lambda df0:
            get_trialError(npa(df0.xObs.iat[0]),
                           npa(df0.yObs.iat[0]),
                           df0.xDrill.iat[0],
                           df0.yDrill.iat[0],
                           DOMAIN,
                           KDOMAIN,
                           lenscale))

    out = np.vstack(out.values)

    xerr = out[:, 0]
    yerr = out[:, 1]
    xhat = out[:, 2]
    yhat = out[:, 3]

    return {'xerr': xerr,
            'yerr': yerr,
            'xhat': xhat,
            'yhat': yhat}
Пример #10
0
def test_parrec2nii_sets_qform_sform_code1(*args):
    # Check that set_sform(), set_qform() are called on the new header.
    parrec2nii.verbose.switch = False

    parrec2nii.io_orientation.return_value = [[0, 1], [1, 1], [2, 1]]  # LAS+

    nimg = Mock()
    nhdr = MagicMock()
    nimg.header = nhdr
    parrec2nii.nifti1.Nifti1Image.return_value = nimg

    pr_img = Mock()
    pr_hdr = Mock()
    pr_hdr.get_data_scaling.return_value = (npa([]), npa([]))
    pr_hdr.get_bvals_bvecs.return_value = (None, None)
    pr_hdr.get_affine.return_value = AN_OLD_AFFINE
    pr_img.header = pr_hdr
    parrec2nii.pr.load.return_value = pr_img

    opts = Mock()
    opts.outdir = None
    opts.scaling = 'off'
    opts.minmax = [1, 1]
    opts.store_header = False
    opts.bvs = False
    opts.vol_info = False
    opts.dwell_time = False

    infile = 'nonexistent.PAR'
    parrec2nii.proc_file(infile, opts)
    nhdr.set_qform.assert_called_with(AN_OLD_AFFINE, code=1)
    nhdr.set_sform.assert_called_with(AN_OLD_AFFINE, code=1)
Пример #11
0
    def __init__(self, dt=0.1, map_size=(1, 1), minDist=0.1):
        """
        You can add/locate attractors/repulsor and change initial state after the object instance creation:
        env = ForceField()
        
        # attractor: [x, y, force]
        # force > 0: attraction, Force < 0: repulsion         
        env.attractors = npa([[0.70, 0.70, -0.02],
                                        [0.60, 0.60, -0.02],
                                        [0.78, 0.58, -0.02],  
                                        [0.20, 0.25,  0.04]])

         # initial state  x, y, vx, vy
        env.initState = npa([25, 30, 0, 0])
        
        :param dt: simulation time step
        :param map_size: map size
        :param minDist: virual miminal distance between agent and attractors
        """
        self.dt = dt
        self.map_size = map_size
        self.minDist = minDist

        self.attractors = npa([[0.70, 0.70, -0.02], [0.60, 0.60, -0.02],
                               [0.78, 0.58, -0.02], [0.20, 0.25, 0.04]])

        self.initState = npa([0.25, 0.30, 0, 0])  # initial state  x, y, vx, vy
        self.state = self.initState
def topixel(a, offset):
    c = npa(a)
    b = npa(a)
    for i in xrange(b.shape[0]):
        b[i, 0] = (c[i, 0] - offset[0] - (-0.15)) * (1000.0 / (0.15 * 2))
        b[i, 1] = (-(c[i, 1] - offset[1]) - (-0.15)) * (1000.0 / (0.15 * 2))
    return b.astype(int).tolist()
Пример #13
0
def symRange(v, centre=0):
    v -= centre
    if np.abs(np.max(v)) > np.abs(np.min(v)):
        lim = npa([-np.abs(np.max(v)), np.abs(np.max(v))])
    else:
        lim = npa([-np.abs(np.min(v)), np.abs(np.min(v))])

    lim += centre
    return lim
Пример #14
0
Файл: em.py Проект: zhmxu/main
def m_step(data, mean1, covar1, mean2, covar2, probs):
    new_mean1 = npa([0., 0.])
    new_mean2 = npa([0., 0.])
    for point, prob in zip(data, probs):
        new_mean1 += prob[0] * point
        new_mean2 += prob[1] * point
    new_mean1 /= sum(p[0] for p in probs)
    new_mean2 /= sum(p[1] for p in probs)
    return new_mean1, new_mean2
Пример #15
0
 def find_uniqueGrid(self, hist_xy, nBin=(21, 21)):
     binx = np.linspace(0, self.map_size[0], nBin[0])
     biny = np.linspace(0, self.map_size[1], nBin[1])
     ix = np.digitize(hist_xy[:, 0], binx) - 1
     iy = np.digitize(hist_xy[:, 1], biny) - 1
     ixy = npa([ix, iy]).transpose()
     ixy_unique = np.vstack({tuple(row) for row in ixy})
     xy_unique = npa([binx[ixy_unique[:, 0]],
                      binx[ixy_unique[:, 1]]]).transpose()
     return ixy_unique, xy_unique
def euler(espacio, velocidad, aceleracion, inicio=True):
    pe = []
    pv = []
    pa = []
    a = 1 if inicio else -1
    for t in pt:
        espacio += (velocidad * h * a)
        velocidad += (aceleracion * h * a)
        pe.append(espacio.tolist())
        pv.append(velocidad.tolist())
        pa.append(aceleracion.tolist())
    return npa(pe), npa(pv), npa(pa)
def deconv_apply():
    """
    Original location: Ledalab/analyze/deconvolution/sdeco.m>deconv_apply
    """
    # Prepare target data for full resolution analysis

    leda2.analysis0.target.tonicDriver = npa(
        leda2.analysis0.target.poly.__call__(leda2.data.time_data))
    leda2.analysis0.target.t = npa(leda2.data.time_data)
    leda2.analysis0.target.d = npa(leda2.data.conductance_data)
    leda2.analysis0.target.sr = leda2.data.samplingrate

    sdeconv_analysis(leda2.analysis0.tau, 0)

    leda2.analysis = leda2.analysis0  # remember we are copying by reference, look out for issues

    # SCRs reconvolved from Driver-Peaks
    t = leda2.data.time_data
    driver = leda2.analysis.driver
    (minL, maxL) = utils.get_peaks(driver)
    minL = np.hstack((minL[:len(maxL)], len(t)))

    # Impulse data
    leda2.analysis.impulseOnset = t[minL[:-1]]
    leda2.analysis.impulsePeakTime = t[maxL]  # = effective peak-latency
    leda2.analysis.impulseAmp = driver[maxL]

    # SCR data
    leda2.analysis.onset = leda2.analysis.impulsePeakTime
    leda2.analysis.amp = np.zeros(len(maxL))
    leda2.analysis.peakTime = np.zeros(len(maxL))
    for iPeak in range(len(maxL)):
        driver_segment = leda2.analysis.driver[minL[iPeak]:minL[
            iPeak + 1]]  # + 1 seems like a wrong idea...
        sc_reconv = convolve(driver_segment, leda2.analysis.kernel)
        leda2.analysis.amp[iPeak] = np.max(sc_reconv)
        mx_idx = np.flatnonzero(sc_reconv == np.max(sc_reconv))
        leda2.analysis.peakTime[iPeak] = t[minL[iPeak]] + mx_idx[
            0] / leda2.data.samplingrate  # SCR peak could be outside of SC time range

    negamp_idx = np.flatnonzero(
        leda2.analysis.amp < .001
    )  # criterion removes peaks at end of sc_reconv due to large negative driver-segments
    leda2.analysis.impulseOnset = np.delete(leda2.analysis.impulseOnset,
                                            negamp_idx)
    leda2.analysis.impulsePeakTime = np.delete(leda2.analysis.impulsePeakTime,
                                               negamp_idx)
    leda2.analysis.impulseAmp = np.delete(leda2.analysis.impulseAmp,
                                          negamp_idx)
    leda2.analysis.onset = np.delete(leda2.analysis.onset, negamp_idx)
    leda2.analysis.amp = np.delete(leda2.analysis.amp, negamp_idx)
    leda2.analysis.peakTime = np.delete(leda2.analysis.peakTime, negamp_idx)
def translate_ft(ft):
    A = npa([[0, -ft[2], ft[1]], [ft[2], 0, -ft[0]], [-ft[1], ft[0], 0]]).T
    b = npa(ft[3:6])
    print "det", np.linalg.det(A.T), "multiply", ft[0] * ft[1] * ft[2]
    print A
    r = np.linalg.solve(A, b)

    # calculate new r
    print r
    newr = r - npa([0, 0, 0.14])

    newft = ft[0:3] + np.cross(f, r).tolist()
    return newft
Пример #19
0
 def _compute_coords(self, mesh=None):
     coords = getattr(self, '_coords', None)
     if coords is None:
         xl, yl = mesh_bbox(mesh)
         y0 = npa(yl).mean()
         t = np.linspace(0, 1, self.resolution)
         coords = (np.outer(1-t, npa((xl[0], y0))) +
                   np.outer(t,   npa((xl[1], y0))))
         self._coords = coords
     df = self._df
     df['coord_x'] = coords.view()[:,0]
     df['coord_y'] = coords.view()[:,1]
     return coords
Пример #20
0
def process_input_image(mask_t_1, jpeg_t, flow_t_1=None):
    '''
    mask_t_1: PIL Image, mask at time step t-1
    jpeg_t: PIL Image, image at time step t
    flow_t_1: flo
    return: Jpeg PIL Image
    '''
    mask_t_1_dilated = npa(dilate_and_gaussian_mask(mask_t_1))
    jpeg_t = npa(jpeg_t)
    #jpeg_t = skimage.util.invert(jpeg_t) #INVERT
    jpeg_t = jpeg_t * np.expand_dims((mask_t_1_dilated / 255), 2)
    jpeg_t = jpeg_t.astype(np.uint8)
    return ifa(jpeg_t)
Пример #21
0
def trial_wrapper(dft, ls):
    if dft.shape[0] != 1:
        # errpool.append(dft.copy())
        # ii = randint(dft.shape[0])
        # dft = dft.iloc[ii:ii+1]
        trialError = None
    else:
        KX = K_se(X, X, ls, SIGVAR)
        xObs = npa(dft.xObs.iat[0])
        yObs = npa(dft.yObs.iat[0])
        xDrill = dft.xDrill.iat[0]
        yDrill = dft.yDrill.iat[0]
        trialError = get_trialError(xObs, yObs, xDrill, yDrill, X, KX, ls)
Пример #22
0
    def postrun(self):
        #get gradients and save them to the pickle files for ASE
        path = self.path
        for child in self.children:
            res = child.results
            name = child.name
            # don't rely on gradients beeing numpy arrays
            f = [ npa(vec) for vec in self.get_grad(res, eUnit='eV', lUnit='Angstrom') ]
            force = -1.0 * npa(self.reorder(res, f))
            filename = osPJ(path, name+'.pckl')
            with open(filename, 'wb') as f:
                pickleDump(force, f, protocol=2)

        self.results._vib = self._vib
Пример #23
0
def cgd_linesearch(x, error0, direction, error_fcn, h):
    """
    Original location: analyze/cg/cgd_linesearch
    """
    direction_n = direction / norm(direction, 2)
    error_list = npa(error0)
    stepsize = h
    maxSteps = 6
    count = 0
    factor = npa([0])
    for iStep in range(1, maxSteps):

        factor = np.hstack((factor, pow(2, (iStep - 1))))
        xc = x + (direction_n * stepsize) * factor[iStep]
        (catVal, xc) = error_fcn(xc)  # xc may be changed due to limits
        error_list = np.hstack((error_list, catVal))

        if error_list[-1] >= error_list[-2]:  # end of decline
            if iStep == 1:  # no success
                step = 0
                error1 = npa(error0)

            else:  # parabolic
                p = np.polyfit(factor, error_list, 2)
                fx = np.arange(factor[0], factor[-1] + .1, .1)
                fy = np.polyval(p, fx)
                idx = np.argmin(fy)
                fxm = fx[idx]
                xcm = x + (direction_n * stepsize) * fxm
                (error1,
                 xcm) = error_fcn(xcm)  # xc may be changed due to limits

                if error1 < error_list[iStep - 1]:
                    xc = xcm
                    step = fxm

                else:  # finding Minimum did not work
                    xc = x + (direction_n * stepsize
                              ) * factor[iStep - 1]  # before last point
                    (error1, xc) = error_fcn(
                        xc
                    )  # recalculate error in order to check for limits again
                    step = factor[iStep - 1]

            return (xc, error1, step)
        count = iStep
    step = factor[count]
    error1 = error_list[count]

    return (xc, error1, step)
Пример #24
0
def euler(espacio,velocidad):
    espacio=npa(espacio,dtype=np.float32)
    velocidad = npa(velocidad,dtype=np.float32)
    pt = np.arange(0,time,h)
    pe=[]
    for t in pt:
        aceleracion=(-espacio/(sum(espacio**2))**(1.5))
        velocidad+=(aceleracion*h)
        espacio+=(velocidad*h)
        if(abs(espacio[0])>xlim):break
        if(abs(espacio[1])>ylim):break
        if((sum(espacio**2))**0.5<=r):break
        pe.append(espacio.tolist())
    return npa(pe)
def euler2(espacio, velocidad, aceleracion):
    pe = []
    pv = []
    pa = []
    for t in pt:
        if (t == 3): velocidad += 3
        if (t == 8): velocidad = 3
        if (t == 10): velocidad += -5
        espacio += (velocidad * h)
        velocidad += (aceleracion * h)
        pe.append(espacio.tolist())
        pv.append(velocidad.tolist())
        pa.append(aceleracion.tolist())
    return npa(pe), npa(pv), npa(pa)
Пример #26
0
def euler(espacio,velocidad,aceleracion,inicio=True):
    espacio=np.array(espacio,dtype=np.float32)
    velocidad = np.array(velocidad,dtype=np.float32)
    aceleracion = np.array(aceleracion,dtype=np.float32)
    pe=[]
    pv=[]
    pa=[]
    a =1 if inicio else -1
    for t in pt:
        espacio+=(velocidad*h*a)
        velocidad+=(aceleracion*h*a)
        pe.append(espacio.tolist())
        pv.append(velocidad.tolist())
        pa.append(aceleracion.tolist())
    return npa(pe),npa(pv),npa(pa)
Пример #27
0
def img_subset(img):
    '''Transform a whole img to a set of patches.'''
    local_img = img
    img_set = []
    # 3*3 patches in total , each patch is 14pix*14pix
    # rows,cols = local_img.shape
    subnum = npa([3,3])
    subsize = npa([14,14])
    subdist = (local_img.shape - subsize) / (subnum - 1)
    for srow in range(subnum[0]):
        for scol in range(subnum[1]):
            row, col = subdist * (srow, scol)
            sub_img = local_img[row : row + subsize[0], col : col + subsize[1]]
            img_set.append(sub_img.reshape(1,-1))
    return npa(img_set)
Пример #28
0
Файл: ase.py Проект: sroet/PLAMS
def toASE(molecule):
    """Convert a PLAMS |Molecule| to an ASE molecule (``ase.Atoms`` instance). Translate coordinates, atomic numbers, and lattice vectors (if present). The order of atoms is preserved."""
    aseMol = aseAtoms()

    #iterate over PLAMS atoms
    for atom in molecule:

        #check if coords only consists of floats or ints
        if not all(isinstance(x, (int,float)) for x in atom.coords):
            raise ValueError("Non-Number in Atomic Coordinates, not compatible with ASE")

        #append atom to aseMol
        aseMol.append(aseAtom(atom.atnum, atom.coords))

    #get lattice info if any
    lattice = npz((3,3))
    pbc = [False,False,False]
    for i,vec in enumerate(molecule.lattice):

        #check if lattice only consists of floats or ints
        if not all(isinstance(x, (int,float)) for x in vec):
            raise ValueError("Non-Number in Lattice Vectors, not compatible with ASE")

        pbc[i] = True
        lattice[i] = npa(vec)

    #save lattice info to aseMol
    if any(pbc):
        aseMol.set_pbc(pbc)
        aseMol.set_cell(lattice)

    return aseMol
Пример #29
0
def signpeak(data, cccrimin, cccrimax, sigc):
    """
    Originally subfunction of segment_driver
    data, cccrimin, cccrimax must be row vectors of shape (x,)
    """
    if cccrimax is None:
        return (None, None)
    if not isinstance(data, np.ndarray):
        raise ValueError('data is not a numpy array')
    if not isinstance(cccrimin, np.ndarray):
        raise ValueError('cccrimin is not a numpy array')
    if not isinstance(cccrimax, np.ndarray):
        raise ValueError('cccrimax is not a numpy array')

    dmm = np.vstack((data[cccrimax] - data[cccrimin[:-1]],
                     data[cccrimax] - data[cccrimin[1:]]))
    maxL = npa(cccrimax[np.max(dmm, axis=0) > sigc])

    # keep only minima right before and after sign maxima
    minL = None
    for i in range(len(maxL)):
        minm1_idx = np.flatnonzero(cccrimin < maxL[i])
        before_smpl = cccrimin[minm1_idx[-1]]
        after_smpl = cccrimin[minm1_idx[-1] + 1]
        newStack = np.hstack((before_smpl, after_smpl))
        if minL is None:
            minL = newStack
        else:
            minL = np.vstack((minL, newStack))
    return (minL, maxL)
Пример #30
0
def get_iea37_cost(n_wt=9):
    """Cost component that wraps the IEA 37 AEP calculator"""
    wd = npa(range(16)) * 22.5  # only 16 bins
    site = IEA37Site(n_wt)
    wind_turbines = IEA37_WindTurbines()
    wake_model = IEA37SimpleBastankhahGaussian(site, wind_turbines)
    return PyWakeAEPCostModelComponent(wake_model, n_wt, wd=wd)
Пример #31
0
def parse_day(s):
    """
    >>> d = parse_day('Mon Apr 01 13:44:45 +0000 2011')
    >>> print d.strftime('%Y-%m-%d')
    2011-04-01
    """
    return datetime.strptime(' '.join(npa(s.split())[[1, 2, 5]]), '%b %d %Y')
Пример #32
0
def rod_to_quad(r):
    # q = [qx, qy, qz, qw]
    rotmat, jacobian = cv2.Rodrigues(npa(r))
    rotmat = np.append(rotmat, [[0, 0, 0]], 0)
    rotmat = np.append(rotmat, [[0], [0], [0], [1]], 1)
    q = tfm.quaternion_from_matrix(rotmat)
    return q.tolist()
Пример #33
0
def smooth_adapt(data, winwidth_max, err_crit):
    """
    Original location: main/util/smooth_adapt.m
    """
    success = 0
    ce = np.sqrt(np.mean(pow(np.diff(data), 2)) / 2)
    iterL = np.arange(0, winwidth_max + 4, 4)
    if len(iterL) < 2:
        iterL = npa([0, 2])
    count = 0
    for i in range(1, len(iterL)):
        count = i
        winwidth = iterL[i]
        scs = smooth(data, winwidth)
        scd = np.diff(scs)
        ce = np.hstack(
            (ce, np.sqrt(np.mean(pow(scd, 2)) / 2)))  # conductance_error
        if abs(ce[i] - ce[i - 1]) < err_crit:
            success = 1
            break

    if success:  # take before-last result
        if count > 1:
            scs = smooth(data, iterL[count - 1])
            winwidth = iterL[count - 1]
        else:  # data already satisfy smoothness criteria
            scs = data
            winwidth = 0
    return (scs, winwidth)
Пример #34
0
def bateman(time, onset, amp, tau1, tau2):
    """
    original location: analyze/template/bateman.m
    time must be a numpy array
    """
    if not isinstance(time, np.ndarray):
        time = npa(time)
    if tau1 < 0 or tau2 < 0:
        raise ValueError('tau1 or tau2 < 0: ({:f}, {:f})'.format(tau1, tau2))

    if tau1 == tau2:
        raise ValueError('tau1 == tau2 == {:f}'.format(tau1))

    conductance = np.zeros(time.shape)
    rang = np.flatnonzero(time > onset)
    if len(rang) == 0:
        return None
    xr = time[rang] - onset

    if amp > 0:
        maxx = tau1 * tau2 * math.log(tau1 / tau2) / (tau1 - tau2)  # b' = 0
        maxamp = abs(math.exp(-maxx / tau2) - math.exp(-maxx / tau1))
        c = amp / maxamp
    else:  # amp == 0: normalized bateman, area(bateman) = 1/sr
        sr = round(1 / np.mean(np.diff(time)))
        c = 1 / ((tau2 - tau1) * sr)

    if tau1 > 0:
        conductance[rang] = c * (np.exp(-xr / tau2) - np.exp(-xr / tau1))
    else:
        conductance[rang] = c * np.exp(-xr / tau2)

    return conductance
Пример #35
0
    def obj_func(self, cam):
        c0 = cam[0]
        c1 = cam[1]
        c2 = cam[2]
        diff = []

        #print matrix_from_xyzrpy([0,0,0, c,0,0])
        for i in xrange(len(self.point3Ds)):
            point3D = self.point3Ds[i]
            point3D_pc = self.point3Ds_pc[i]
            #point3D_pc_trans = np.array(point3D_pc + [1.0]) * c
            #import ipdb; ipdb.set_trace()
            point3D_pc_trans = np.dot(
                matrix_from_xyzrpy([
                    c2 * point3D_pc[0] / point3D_pc[2],
                    c2 * point3D_pc[1] / point3D_pc[2], c2, 0, 0, 0
                ]),
                np.array([point3D_pc[0], point3D_pc[1], point3D_pc[2], 1.0]))

            #point3D_pc_trans[0] = point3D_pc[0] + c * point3D_pc[0] / point3D_pc[2]
            #point3D_pc_trans[1] = point3D_pc[1] + c * point3D_pc[1] / point3D_pc[2]
            #point3D_pc_trans[2] = point3D_pc[2] + c

            point3D_pc_world = np.dot(self.x0_ext, point3D_pc_trans)
            #print point3D, point3D_pc_world[0:3].tolist()
            diff_ = npa(point3D,
                        dtype=np.float) - point3D_pc_world[0:3].tolist()
            diff.extend(diff_.tolist())
        #import ipdb; ipdb.set_trace()
        res = np.linalg.norm(diff)
        print res / len(self.point3Ds)
        err = res / len(self.point3Ds)
        return err
Пример #36
0
    def _section_fire(self):  # {{{
        fire_origin = self._fire_origin()
        times, hrrs = self._draw_fire_development()
        fire_properties = self._draw_fire_properties(len(times))
        self._fire_obstacle()
        area = nround(npa(hrrs) / (self.hrrpua * 1000) + 0.1, decimals=1)
        fire_origin = self._fire_origin()

        txt = (
            '!! FIRE,compa,x,y,z,fire_number,ignition_type,ignition_criterion,ignition_target,?,?,name',
            fire_origin,
            '',
            '!! CHEMI,?,?,?,?',
            'CHEMI,1,1.8,0.3,0.05,0,0.283,{}'.format(
                fire_properties['heatcom']),
            '',
            'TIME,' + ','.join(str(i) for i in times),
            'HRR,' + ','.join(str(round(i, 3)) for i in hrrs),
            fire_properties['sootyield'],
            fire_properties['coyield'],
            fire_properties['trace'],
            'AREA,' + ','.join(str(i) for i in area),
            fire_properties['heigh'],
        )
        return "\n".join(txt) + "\n"
Пример #37
0
def generate_rand_obs(nExp, nObs, domainBounds, sigvar=None, rng=None):
    # create random sets of observations for each experiment
    if not sigvar: sigvar = 1.
    if not rng: rng = RandomState()

    domainBounds = npa(domainBounds)
    dimX = domainBounds.shape[0]
    minX = domainBounds[:, 0]
    maxX = domainBounds[:, 1]
    rangeX = maxX - minX
    xObs = rng.uniform(size=(nExp, nObs, dimX))
    xObs *= rangeX
    xObs += minX
    yObs = empty(shape=(nExp, nObs))
    for iexp in xrange(nExp):
        good = False
        while not good:
            yObs0 = rng.normal(size=(nObs))
            if yObs0.max() > 0: good = True
        yObs[iexp, :] = yObs0
    # yObs = rng.normal(size=(nExp, nObs, 1))
    yObs *= sigvar

    return {'x': xObs,
            'y': yObs}
Пример #38
0
def unpack_rngstate(json_rngstate):
    jrs = loads(json_rngstate)
    p0 = jrs[0].encode('ascii')  # numpy plays nice with ascii
    p1 = npa(jrs[1], dtype=uint32)  # unpack into numpy uint32 array
    p2 = int(jrs[2])
    p3 = int(jrs[3])
    p4 = float(jrs[4])
    return (p0, p1, p2, p3, p4)  # rngstate
Пример #39
0
    def read_obj(filename: str) -> object:
        obj = OBJ(path.basename(filename))

        def _type(v):
            try: return int(v or 0)
            except ValueError:
                return float(v)

        def _update(tar, val):
            if len(val) != np.size(getattr(obj.values, tar), 1):
                setattr(obj.values, tar, np.empty((0, len(val)), dtype=np.float32))
            setattr(obj.values, tar, np.append(getattr(obj.values, tar), [val], axis=0))

        parse = lambda t, *v: {
            'v' : lambda v: _update('vertex', tuple(map(_type, v))),
            'vn': lambda v: _update('normal', tuple(map(_type, v))),
            'vt': lambda v: _update('texcoord', tuple(map(_type, v))),
            'f' : lambda v: obj.faces.append([tuple(map(_type, z.split('/'))) for z in v]),
        }.get(t,  lambda v: None)(v)

        with open(filename) as file:
            any(parse(*line) for line in filter(None, map(str.split, file)))

        obj.face_info[0] = len(list(filter(lambda x: len(x) == 3, obj.faces)))
        obj.face_info[1] = len(list(filter(lambda x: len(x) == 4, obj.faces)))
        obj.face_info[2] = len(list(filter(lambda x: len(x) >= 5, obj.faces)))

        newface = list()
        for i, face in enumerate(obj.faces):
            if len(face) > 3:
                face = np.array(face)

                # real vertex coord data from parsed vertex
                m = npa(lambda x: obj.values.vertex[x-1], 1, np.array([face])[:,:,0].T).squeeze()
                # vectorize function to detect plane
                f = np.vectorize(lambda x: not np.any(m[:,x]-m[:,x][0]))
                # vectorize function to detect index from vertex value
                g = lambda t, a: np.where(npa(lambda x: np.allclose(x, t), 1, a) == True)[0][0]
                # 2-d vertex array without a plane
                p = np.delete(m, (np.where(f(np.arange(3)) == True)[0] or [0])[0], 1)

                newface.extend([[[ary[p] for ary in face.T]             # new face values
                                    for p in map(partial(g, a=p), tri)] # each triangle (convert vertex to id)
                                        for tri in OBJ.trianglize(p)])  # trianglize given polygon
            else:
                newface.append(face)

        obj.faces = np.asarray(newface)
        obj.buffer.vertex   = npa(obj.values.vertex.__getitem__, 0, obj.faces[:, :, 0].flatten()-1) if obj.values.vertex.size else None
        obj.buffer.normal   = npa(obj.values.normal.__getitem__, 0, obj.faces[:, 0, 2]-1) if obj.values.normal.size else None
        obj.buffer.texcoord = npa(obj.values.texcoord.__getitem__, 0, obj.faces[:, 0, 0]-1) if obj.values.texcoord.size else None

        w, h = obj.buffer.vertex.shape
        arange = np.arange(w).reshape(w, 1)
        ovn = npa(lambda x: np.stack((x,x,x)), 1, obj.buffer.normal).reshape(w, h)
        obj.array = npa(lambda x: np.stack((ovn[x], obj.buffer.vertex[x])), 1, arange).reshape(w*2, h).astype(np.float32)

        return obj
Пример #40
0
def test_alternative_header_field_names():
    # some V4.2 files had variant spellings for some of the fields in the
    # header.  This test reads one such file and verifies that the fields with
    # the alternate spelling were read.
    with ImageOpener(VARIANT_PAR, 'rt') as _fobj:
        HDR_INFO, HDR_DEFS = parse_PAR_header(_fobj)
    assert_equal(HDR_INFO['series_type'], 'Image   MRSERIES')
    assert_equal(HDR_INFO['diffusion_echo_time'], 0.0)
    assert_equal(HDR_INFO['repetition_time'], npa([ 21225.76]))
    assert_equal(HDR_INFO['patient_position'], 'HFS')
Пример #41
0
def bayesOpt_trialError(xObs, yObs, xSub, ySub, lenscale, sigvar):
    KDOMAIN = K_se(DOMAIN, DOMAIN, lenscale, sigvar)

    out = get_trialError(npa(xObs),
                         npa(yObs),
                         npa(xSub),
                         npa(ySub),
                         DOMAIN,
                         KDOMAIN,
                         lenscale)

    xerr = out[0]
    yerr = out[1]
    xhat = out[2]
    yhat = out[3]

    return {'xerr': xerr,
            'yerr': yerr,
            'xhat': xhat,
            'yhat': yhat}
Пример #42
0
def load_obj(obj_file):
  obj = map(lambda x: x.strip(), open(obj_file).read().split("\n"))

  # read vertices
  vertices = npa(map(lambda x: map(float, x.split()[1:]), filter(lambda x: x[0:2] == "v ", obj)))
  #vertices += K*10

  # read in faces
  tris = map(lambda x: parse_face(x, vertices), filter(lambda x: x[0:2] == "f ", obj))
  print len(tris), "triangles"

  return tris
Пример #43
0
def make_domain_grid(domainBounds, domainRes):
    """takes domainBounds and domainRes for each dim of input space and grids
    accordingly"""
    #TODO: add sparse version with ndm

    domainBounds = npa(domainBounds)
    # get domain from bounds and res
    dimX = domainBounds.shape[0]
    if isscalar(domainRes):
        # equal res if scalar
        domainRes = repeat(domainRes, dimX)
    else:
        assert len(domainRes) == dimX

    domainRes = npa(domainRes)

    domain = npa([linspace(domainBounds[dim][0],
                           domainBounds[dim][1],
                           domainRes[dim])
                  for dim in xrange(dimX)])

    domain = cartesian(domain)
    return domain
Пример #44
0
  def project_point(pt):
    # vector from pt to origin
    vv = pt - origin
    v = vv/norm(vv)

    # real projection shit
    vx = npa((v[0], 0, v[2]))
    lx = npa((look[0], 0, look[2]))
    vy = npa((0, v[1], v[2]))
    ly = npa((0, look[1], look[2]))
    def ang(v1, v2):
      v1 /= norm(v1)
      v2 /= norm(v2)
      angl = np.dot(v1, v2)
      crs = np.cross(v1, v2)
      if np.sum(crs) >= 0.0:
        return np.arccos(angl)
      else:
        return -np.arccos(angl)

    x = (ang(vx, lx) / arcrad_per_pixel)
    y = (ang(vy, ly) / arcrad_per_pixel)

    # add z for z-buffering
    # z is the distance of the point from the plane formed by look and origin

    # project v on to look
    z = np.dot(v, look) * norm(vv)

    """
    print " *** "
    print v, K
    print pt, x, y
    """

    return int(round(x + X/2)),int(round(Y/2 - y)), z
Пример #45
0
def test_hebb_learning_vector():
    assert len(hebb_learning(Tns.v_in_1, Tns.s_out_1, Tns.v_w_1, 1)) == 5
    assert (hebb_learning(Tns.v_in_1, Tns.s_out_1, Tns.v_w_1, 1) == \
        Tns.v_w_1 + npa(Tns.s_out_1) * npa(Tns.v_in_1)).all()
    assert len(hebb_learning(Tns.v_in_2, Tns.s_out_2, Tns.v_w_2, 0.5)) == 5
    assert (hebb_learning(Tns.v_in_2, Tns.s_out_2, Tns.v_w_2, 0.5) == \
        Tns.v_w_2 + .5 * npa(Tns.s_out_2) * npa(Tns.v_in_2)).all()
    assert len(hebb_learning(Tns.v_in_3, Tns.s_out_3, Tns.v_w_3, 3.5)) == 5
    assert (hebb_learning(Tns.v_in_3, Tns.s_out_3, Tns.v_w_3, 3.5) == \
        Tns.v_w_3 + 3.5 * npa(Tns.s_out_3) * npa(Tns.v_in_3)).all()
def trainvaltest_split(Xseq, yseq, p_train, p_val, p_test=None, shuffle=False):
    ncase, seqlen, nbatch = Xseq.shape
    if not p_test: p_test = 1.-(p_train+p_val)
    # can be less if don't want to use full dataset
    assert npa([p_train, p_val, p_test]).sum() <= 1.
    ntrain, nval, ntest = [int(p * ncase) for p in [p_train, p_val, p_test]]

    if shuffle:
        neworder = rng.permutation(ncase)
        Xseq = Xseq[neworder]
        yseq = yseq[neworder]

    return {'train': {'X': Xseq[:ntrain], 'y': yseq[:ntrain]},
            'val': {'X': Xseq[ntrain:ntrain+nval], 'y': yseq[ntrain:ntrain+nval]},
            'test': {'X': Xseq[ntrain+nval:ntrain+nval+ntest], 'y': yseq[ntrain+nval:ntrain+nval+ntest]}
           }
Пример #47
0
def make_experiment(nTrial, nPassivePool, nActivePool, rng):
    nTrialType = len(nPassivePool) * len(nActivePool)
    assert nTrial % nTrialType == 0
    nPerNPassive = nTrial / len(nPassivePool)
    nPerNActive = nTrial / len(nActivePool)
    nPerTrialType = nTrial / nTrialType

    # col 0 is nPassive, col 1 is nActive
    # pdb.set_trace()
    trialTypeTuples = cartesian([nPassivePool, nActivePool])

    # get queue of how many obs per round
    trialParamsQueue = npa(make_nObsQueue(trialTypeTuples, nTrial, rng))
    nPassiveQueue = trialParamsQueue[:, 0]
    nActiveQueue = trialParamsQueue[:, 1]

    return {'nPassiveObsQueue': nPassiveQueue,
            'nActiveObsQueue': nActiveQueue}
Пример #48
0
def fast_triangle_mesh_drawer(tris, origin, look):
  # shouldn't do anything
  look /= norm(look)

  img = np.zeros((Y, X))
  zimg = np.ones((Y, X))*np.inf

  def project_point(pt):
    # vector from pt to origin
    vv = pt - origin
    v = vv/norm(vv)

    # real projection shit
    vx = npa((v[0], 0, v[2]))
    lx = npa((look[0], 0, look[2]))
    vy = npa((0, v[1], v[2]))
    ly = npa((0, look[1], look[2]))
    def ang(v1, v2):
      v1 /= norm(v1)
      v2 /= norm(v2)
      angl = np.dot(v1, v2)
      crs = np.cross(v1, v2)
      if np.sum(crs) >= 0.0:
        return np.arccos(angl)
      else:
        return -np.arccos(angl)

    x = (ang(vx, lx) / arcrad_per_pixel)
    y = (ang(vy, ly) / arcrad_per_pixel)

    # add z for z-buffering
    # z is the distance of the point from the plane formed by look and origin

    # project v on to look
    z = np.dot(v, look) * norm(vv)

    """
    print " *** "
    print v, K
    print pt, x, y
    """

    return int(round(x + X/2)),int(round(Y/2 - y)), z

  # project the triangles into 2D space
  # does this projection preserve the u and v
  DRAW_WIREFRAME = False
  if DRAW_WIREFRAME:
    lines = []
    for tr in tris:
      p0, p1, p2 = project_point(tr[0]), project_point(tr[1]), project_point(tr[2])
      lines.append((p0[0:2], p1[0:2]))
      lines.append((p1[0:2], p2[0:2]))
      lines.append((p2[0:2], p0[0:2]))

    for pt1, pt2 in lines:
      rr, cc, val = line_aa(pt1[1], pt1[0], pt2[1], pt2[0])

      # filter
      rr[np.logical_or(rr < 0, rr >= Y)] = 0
      cc[np.logical_or(cc < 0, cc >= X)] = 0

      img[rr, cc] = val
  else:
    # z-buffering, keeping the quality low in line with the rest of the program
    polys = []
    for tr in tris:
      xyz = npa(map(project_point, tr))

      # get min and max
      xmin, xmax = np.min(xyz[:, 0]), np.max(xyz[:, 0])
      ymin, ymax = np.min(xyz[:, 1]), np.max(xyz[:, 1])

      # on screen
      xmin = np.clip(xmin, 0, X).astype(np.int)
      xmax = np.clip(xmax, 0, X).astype(np.int)
      ymin = np.clip(ymin, 0, Y).astype(np.int)
      ymax = np.clip(ymax, 0, Y).astype(np.int)

      # triangle in 3 space
      vs1 = xyz[1][0:2] - xyz[0][0:2]
      vs2 = xyz[2][0:2] - xyz[0][0:2]
      vsx = np.cross(vs1, vs2)

      # shade
      shade = random.uniform(0.3, 1.0)

      for x in range(xmin, xmax):
        for y in range(ymin, ymax):
          q = npa([x,y]) - xyz[0][0:2]
          u = np.cross(q, vs2) / vsx
          v = np.cross(vs1, q) / vsx
          if u >= 0 and v >= 0 and u+v <= 1.0:
            pt_xyz = (1-u-v)*xyz[0] + u*xyz[1] + v*xyz[2]
            if pt_xyz[2] < zimg[y,x]:
              zimg[y,x] = pt_xyz[2]
              img[y,x] = shade
      #polys.append((np.mean(xyz[:, 2]), xyz))
     
    """
    polys = sorted(polys, reverse=True, key=lambda x: x[0])
    for _, xyz in polys:
      rr, cc = polygon(xyz[:, 1], xyz[:, 0])

      rr[np.logical_or(rr < 0, rr >= Y)] = 0
      cc[np.logical_or(cc < 0, cc >= X)] = 0
      img[rr, cc] = random.uniform(0.3, 1.0)
    """
  return img
Пример #49
0
get_ipython().magic(u'pylab inline')
import numpy as np
from numpy import array as npa

def gauss(mean, covar, x):
    """
    Bivariate Gaussian distribution, assuming diagonal covariance.
    1/(2*pi*v1*v2) * exp(- 1/2 * (x1-mean1)**2/v1 + (x2-mean2)**2/v2)
    """
    Xmu = x[0]-mean[0]
    Ymu = x[1]-mean[1]
    z = Xmu**2 / (covar[0]**2) + Ymu**2 / (covar[1]**2)
    denom = 2 * np.pi * covar[0] * covar[1]
    return np.exp(-z / 2.) / denom

data = np.array([npa([0.,0.02]), npa([.1,0.005]), npa([-0.1,.01]),
                 npa([1.01,1.]), npa([.99,0.9]), npa([1.02,1.1])])

mean1 = npa([-.5, -.5])
covar1 = npa([.4, .4])
mean2 = npa([1.5, 1.5])
covar2 = npa([.4, .4])

def plotme(mean1, covar1, mean2, covar2, data):
    x, y = np.mgrid[-1.5:2.5:.01, -1.5:2.5:.01]
    pos = np.empty(x.shape + (2,))
    pos[:, :, 0] = x; pos[:, :, 1] = y
    contourf(x, y,
             [gauss(mean1, covar1, [xi, yi]) + gauss(mean2, covar2, [xi, yi])
              for (xi, yi) in zip(x, y)])
    scatter([d[0] for d in data], [d[1] for d in data], marker='v', color='w')
def firstBookLengthN(n):
    """returns the index of the first book of length n"""
    # determine how many books with len < lenBook exist
    nShorter = npa([cardPool**l for l in xrange(n)]).sum()
    return nShorter  # no need for +1 bc of the empty book
Пример #51
0
		# Simpler distance
	
		# Get the pairwise comparison vectors
		for language in languages:
			language.get_pairwise()

		# Calculate distance between languages
		for l1 in range(lcount):
			for l2 in range(l1,lcount):
				add = 0
				for i in range(len(languages[l1].pairwise_ranking)):
					add += languages[l1].pairwise_ranking[i] - languages[l2].pairwise_ranking[i]
				distances[l1][l2] = add

	# Cluster!
	npdistances = npa(distances)
	thecluster = fastcluster.linkage(npdistances, method=m)
	# Using average as default; options: single, complete, average, weighted

	# Output the cluster as a png
#	dendrogram = scipy.cluster.hierarchy.dendrogram(thecluster, labels=languagenames)
#	plt.savefig('temp.png')
#	plt.cla()

	# Get labeled nodes from gold tree
	goldlabeled = []
	for family in families:
		goldlabeled += get_nodes(family.languages)
	rootall = set(languagenames)
	if rootall not in goldlabeled:
		goldlabeled.append(rootall)
Пример #52
0
def ei_trialSeries(trialSeries):
    wid = trialSeries.workerid
    condition = trialSeries.condition
    counterbalance = trialSeries.counterbalance
    lenscale = trialSeries.LENSCALE
    sigvar = trialSeries.SIGVAR
    noisevar2 = 1e-7
    DOMAIN = np.linspace(0, 1, 1028)
    KDOMAIN = gp.K_se(DOMAIN, DOMAIN, lenscale, sigvar)


    # trial by trial fits to expected improvement
    nPassiveObs = len(trialSeries.xPassiveObs)
    nActiveObs = len(trialSeries.xActiveObs)
    xActive = trialSeries.xActiveObs
    yActive = trialSeries.yActiveObs
    xPassive = trialSeries.xPassiveObs
    yPassive = trialSeries.yPassiveObs

    minidicts = []
    for iActive in xrange(nActiveObs):
        # get active obs to this point
        xAct = xActive[:iActive]
        yAct = yActive[:iActive]
        # combine all obs seen to this point
        xObs = npa(xAct + xPassive)
        yObs = npa(yAct + yPassive)
        xBest = xObs.max()
        yBest = yObs.max()
        # USING TRUE LENSCALE
        # get posterior
        mu = gp.conditioned_mu(DOMAIN, xObs, yObs, lenscale, sigvar, noisevar2)
        cm = gp.conditioned_covmat(DOMAIN, KDOMAIN, xObs, lenscale, sigvar, noisevar2)
        sd = np.diag(cm)
        # get EI guess
        eiout = acq.EI(yBest, mu, sd, DOMAIN)
        xEI = eiout['xmax']
        yEI = eiout['fmax']
        # get subject guess
        xSub = xActive[iActive]
        ySub = yActive[iActive]
        # compare
        xDiff = xSub - xEI
        # store
        minidicts.append({'xEI': xEI,
                        'yEI': yEI,
                        'xSub': xSub,
                        'ySub': ySub,
                        'xDiff': xDiff,
                        'iActive': iActive,
                        'xAct': xAct,
                        'yAct': yAct,
                        'xPassive': xPassive,
                        'yPassive': yPassive,
                        'exp_ls': lenscale,
                        'xAct': xAct,
                        'yAct': yAct,
                        'workerid': wid,
                        'condition': condition,
                        'counterbalance': counterbalance})
    return DataFrame(minidicts)
Пример #53
0
    ax1.set_xlim(0, len(bands.kpoints))

    # Density of state
    # ----------------

    ax2.set_yticklabels([])
    ax2.grid()
    ax2.set_xticks(np.arange(0, 1.5, 0.4))
    ax2.set_xticklabels(np.arange(0, 1.5, 0.4))
    ax2.set_xlim(1e-6, 1.5)
    ax2.hlines(y=0, xmin=0, xmax=1.5, color="k", lw=2)
    ax2.set_xlabel("Density of State")

    # s contribution
    ax2.plot(npa(dosrun.pdos[0][Orbital.s][Spin.up]) +
             npa(dosrun.pdos[1][Orbital.s][Spin.up]),
             dosrun.tdos.energies - dosrun.efermi,
             "r-", label="s", linewidth=2)

    # px py contribution
    ax2.plot(npa(dosrun.pdos[0][Orbital.px][Spin.up]) +
             npa(dosrun.pdos[1][Orbital.px][Spin.up]) +
             npa(dosrun.pdos[0][Orbital.py][Spin.up]) +
             npa(dosrun.pdos[1][Orbital.py][Spin.up]),
             dosrun.tdos.energies - dosrun.efermi,
             "g-",
             label="(px, py)",
             linewidth=2)

    # pz contribution
Пример #54
0
def jsonToNpa(jstr, npa_type=float):
    array = loads(jstr)
    array = map(npa_type, array)
    return npa(array)
Пример #55
0
LENSCALEPOWSOF2 = [2., 4., 6.]
LENSCALEPOOL = [1./2.**n for n in LENSCALEPOWSOF2]

DOMAINBOUNDS = [[0., 1.]]
DOMAINRES = 1024
DOMAIN = make_domain_grid(DOMAINBOUNDS, DOMAINRES).flatten()
EDGEBUF = 0.05 # samples for 2sams wont be closer than EDGEBUF from screen edge
# how close to domain edges passive observations can occur
XSAM_BOUNDS = [dim[:] for dim in DOMAINBOUNDS]  # deep copy of DOMAINBOUNDS
XSAM_BOUNDS[0][0] = EDGEBUF
XSAM_BOUNDS[0][1] -= EDGEBUF

# made with numpy.random.randint(4294967295, size=20)  # (number is max allowed on amazon linux)
RNGSEEDPOOL =\
    npa([1903799985, 1543581047, 1218602148,  764353219, 1906699770,
         951675775, 2101131205, 1792109879,  781776608, 2388543424,
         2154736893, 2773127409, 3304953852,  678883645, 3097437001,
         3696226994,  242457524,  991216532, 2747458246, 2432174005])

## LOAD GP STUFF INTO WORKSPACE
@custom_code.route('/init_experiment', methods=['GET'])
def init_experiment():
    if not request.args.has_key('condition'):
        raise ExperimentError('improper_inputs')  # i don't like returning HTML to JSON requests...  maybe should change this

    condition = int(request.args['condition'])
    counterbalance = int(request.args['counterbalance'])

    ## END FREE VARS
    lenscale = LENSCALEPOOL[condition]
    rngseed = RNGSEEDPOOL[counterbalance]
    rng = RandomState(rngseed)
Пример #56
0
def eiregret_trialSeries(trialSeries, ls=None, firstN=None):
    if not ls: ls=trialSeries.LENSCALE  # default use experiment lenscale
    wid = trialSeries.workerid
    itrial = trialSeries.itrial
    condition = trialSeries.condition
    counterbalance = trialSeries.counterbalance
    sigvar = trialSeries.SIGVAR
    noisevar2 = 1e-7
    DOMAIN = np.linspace(0, 1, 1028)
    KDOMAIN = gp.K_se(DOMAIN, DOMAIN, ls, sigvar)

    # trial by trial fits to expected improvement
    nPassiveObs = len(trialSeries.xPassiveObs)
    nActiveObs = len(trialSeries.xActiveObs)
    xActive = trialSeries.xActiveObs
    yActive = trialSeries.yActiveObs
    xPassive = trialSeries.xPassiveObs
    yPassive = trialSeries.yPassiveObs

    minidicts = []
    if firstN: nActiveObs = firstN
    for iActive in xrange(nActiveObs):  # run analysis for each active choice
        # get active obs to this point
        xAct = xActive[:iActive]
        yAct = yActive[:iActive]
        # combine all obs seen to this point
        xObs = npa(xAct + xPassive)
        yObs = npa(yAct + yPassive)
        xBest = xObs.max()
        yBest = yObs.max()
        # get posterior
        mu = gp.conditioned_mu(DOMAIN, xObs, yObs, ls, sigvar, noisevar2)
        cm = gp.conditioned_covmat(DOMAIN, KDOMAIN, xObs, ls, sigvar, noisevar2)
        sd = np.diag(cm)
        # get EI guess
        eiout = acq.EI(yBest, mu, sd, DOMAIN, return_whole_domain=True)
        xEI = eiout['xmax']
        yEI = eiout['fmax']
        domainEI = eiout['fall']
        iEI = np.where(DOMAIN>=xEI)[0][0]
        # get subject guess
        xSub = xActive[iActive]
        ySub = yActive[iActive]
        iSub = np.where(DOMAIN>=xSub)[0][0]
        # get regret
        evyEI = domainEI[iEI]
        evySub = domainEI[iSub]
        # compare
        regret = evyEI - evySub
        # store
        minidicts.append({'regret': regret,
                        'evyEI': evyEI,
                        'evySub': evySub,
                        'xEI': xEI,
                        'yEI': yEI,
                        'xSub': xSub,
                        'ySub': ySub,
                        'iActive': iActive,
                        'xPassive': xPassive,
                        'yPassive': yPassive,
                        'lenscale': ls,
                        'xAct': xAct,
                        'yAct': yAct,
                        'workerid': wid,
                        'itrial': itrial,
                        'condition': condition,
                        'counterbalance': counterbalance})
    return DataFrame(minidicts)
Пример #57
0
     [0.,         -3.75, 0.,          115.617    ],
     [0.86045705,  0.,   7.78655376, -27.91161211],
     [0.,          0.,   0.,           1.        ]])
# Affine from Philips-created NIfTI
PHILIPS_AFFINE = np.array(
    [[  -3.65  ,   -0.0016,    1.8356,  125.4881],
     [   0.0016,   -3.75  ,   -0.0004,  117.4916],
     [   0.8604,    0.0002,    7.7866,  -28.3411],
     [   0.    ,    0.    ,    0.    ,    1.    ]])

# Affines generated by parrec.py from test data in many orientations
# Data from http://psydata.ovgu.de/philips_achieva_testfiles/conversion2
PREVIOUS_AFFINES={
    "Phantom_EPI_3mm_cor_20APtrans_15RLrot_SENSE_15_1" :
    npa([[  -3.        ,    0.        ,    0.        ,  118.5       ],
         [   0.        ,   -0.77645714,   -3.18755523,   72.82738377],
         [   0.        ,   -2.89777748,    0.85410285,   97.80720486],
         [   0.        ,    0.        ,    0.        ,    1.        ]]),
    "Phantom_EPI_3mm_cor_SENSE_8_1" :
    npa([[  -3.  ,    0.  ,    0.  ,  118.5 ],
         [   0.  ,    0.  ,   -3.3 ,   64.35],
         [   0.  ,   -3.  ,    0.  ,  118.5 ],
         [   0.  ,    0.  ,    0.  ,    1.  ]]),
    "Phantom_EPI_3mm_sag_15AP_SENSE_13_1" :
    npa([[   0.        ,    0.77645714,    3.18755523,  -92.82738377],
         [  -3.        ,    0.        ,    0.        ,  118.5       ],
         [   0.        ,   -2.89777748,    0.85410285,   97.80720486],
         [   0.        ,    0.        ,    0.        ,    1.        ]]),
    "Phantom_EPI_3mm_sag_15FH_SENSE_12_1" :
    npa([[   0.77645714,    0.        ,    3.18755523,  -92.82738377],
         [  -2.89777748,    0.        ,    0.85410285,   97.80720486],
         [   0.        ,   -3.        ,    0.        ,  118.5       ],
Пример #58
0
# In[162]:

# Plot effect of relevance feedback as we change parameters.
import numpy as np
from numpy import array as npa
import random as rnd

def centroid(docs):
    return np.sum(docs, axis=0) / len(docs)

def rocchio(query, relevant, irrelevant, alpha, beta, gamma):
    return alpha * query + beta * centroid(relevant) - gamma * centroid(irrelevant) 

# Create some documents
relevant = npa([[1, 5], [1.1, 5.1], [0.9, 4.9], [1.0, 4.8]])
irrelevant = npa([[rnd.random()*6, rnd.random()*6] for i in range(30)])

# Create a query
query = npa([.1, .1])

# Compute two different Rocchio updates (beta=0.5, beta=0)
new_query_b5 = rocchio(query, relevant, irrelevant, 1., .75, .5)
new_query_b0 = rocchio(query, relevant, irrelevant, 1., .75, 0.)

# Plot them.
pos = scatter([p[0] for p in relevant], [p[1] for p in relevant])
neg = scatter([p[0] for p in irrelevant], [p[1] for p in irrelevant], marker='+', edgecolor='red')
q = scatter(query[0], query[1], marker='v', c=0.1, s=100)
newq_b5 = scatter([new_query_b5[0]], [new_query_b5[1]], marker='*', s=100, c=.9)
newq_b0 = scatter([new_query_b0[0]], [new_query_b0[1]], marker='d', s=100, c=.8)