Beispiel #1
0
def sqcover(A,n):
    edge = sp.sqrt(A) # the length of an edge
    d = edge/n # the distance between two adjacent points
    r = d/2 # the "radius of "
    end = edge - r # end point
    base = sp.linspace(r, end, n)
    first_line = sp.transpose(sp.vstack((base, r*sp.ones(n))))
    increment = sp.transpose(sp.vstack((sp.zeros(n), d*sp.ones(n))))
    pts = first_line
    y_diff = increment
    for i in range(n-1):
        pts = sp.vstack((pts, first_line + y_diff))
        y_diff = y_diff + increment
    
    # Color matter
    colors = []
    for p in pts:
        cval = n*p[0] + p[1] # the x-coord has a higher weight
        cval = colormap.Spectral(cval/((n+1)*end)) # normalize by the max value that cval can take.
        colors.append(cval)

    colors = sp.array(colors)

    cover = (pts, r, colors)
    return cover
Beispiel #2
0
def PESvsaq(optstate,persist,**para):
    para = copy.deepcopy(para)
    if persist==None:
        persist = {'n':0,'d':len(para['ub'])}
    n = persist['n']
    d = persist['d']
    if n<para['nrandinit']:
        persist['n']+=1
        
        return randomaq(optstate,persist,**para)
    logger.info('PESvsaq')
    #logger.debug(sp.vstack([e[0] for e in optstate.ev]))
    #raise
    x=sp.vstack(optstate.x)
    y=sp.vstack(optstate.y)
    s= sp.vstack([e['s'] for e in optstate.ev])
    dx=[e['d'] for e in optstate.ev]
    
    pesobj = PES.PES(x,y,s,dx,para['lb'],para['ub'],para['kindex'],para['mprior'],para['sprior'],DH_SAMPLES=para['DH_SAMPLES'],DM_SAMPLES=para['DM_SAMPLES'], DM_SUPPORT=para['DM_SUPPORT'],DM_SLICELCBPARA=para['DM_SLICELCBPARA'],mode=para['SUPPORT_MODE'],noS=para['noS'])
    
    
        
    [xmin,ymin,ierror] = pesobj.search_acq(para['cfn'],para['logsl'],para['logsu'],volper=para['volper'])
    
    logger.debug([xmin,ymin,ierror])
    para['ev']['s']=10**xmin[-1]
    xout = [i for i in xmin[:-1]]
    return xout,para['ev'],persist,{'HYPdraws':[k.hyp for k in pesobj.G.kf],'mindraws':pesobj.Z,'DIRECTmessage':ierror,'PESmin':ymin}

    return
Beispiel #3
0
def EIMAPaq(optstate,persist,ev=None, ub = None, lb=None, nrandinit=None, mprior=None,sprior=None,kindex = None,directmaxiter=None):
    para = copy.deepcopy(para)
    if persist==None:
        persist = {'n':0,'d':len(ub)}
    n = persist['n']
    d = persist['d']
    if n<nrandinit:
        persist['n']+=1
        return randomaq(optstate,persist,ev=ev,lb=lb,ub=ub)
    logger.info('EIMAPaq')
    #logger.debug(sp.vstack([e[0] for e in optstate.ev]))
    #raise
    x=sp.vstack(optstate.x)
    y=sp.vstack(optstate.y)
    s= sp.vstack([e['s'] for e in optstate.ev])
    dx=[e['d'] for e in optstate.ev]
    MAP = GPdc.searchMAPhyp(x,y,s,dx,mprior,sprior, kindex)
    logger.info('MAPHYP {}'.format(MAP))

    G = GPdc.GPcore(x,y,s,dx,GPdc.kernel(kindex,d,MAP))
    def directwrap(xq,y):
        xq.resize([1,d])
        a = G.infer_lEI(xq,[ev['d']])
        return (-a[0,0],0)
    
    [xmin,ymin,ierror] = DIRECT.solve(directwrap,lb,ub,user_data=[], algmethod=0, maxf = directmaxiter, logfilename='/dev/null')
    #logger.debug([xmin,ymin,ierror])
    persist['n']+=1
    return [i for i in xmin],ev,persist,{'MAPHYP':MAP,'logEImin':ymin,'DIRECTmessage':ierror}
def update():
    global i
    if i == tvec.shape[0]-1:
        i = 0
    else:
        i = i + 1
    
    if show_left:
        poi_left_scatter.setData(pos=sp.expand_dims(poi_left_pos[i],0))
        hand_left_scatter.setData(pos=sp.expand_dims(hand_left_pos[i],0))
        string_left_line.setData(pos=sp.vstack((hand_left_pos[i],poi_left_pos[i])))
#        arm_left.setData(pos=sp.vstack((hand_left_pos[i],[0,-1*shoulder_width/2,0])))
        arm_left.setData(pos=sp.vstack((hand_left_pos[i],[0,0,offset])))
    else:
        poi_left_scatter.hide()
        poi_left_line.hide()
        hand_left_scatter.hide()
        hand_left_line.hide()
        string_left_line.hide()
        arm_left.hide()
    
    if show_right:
        poi_right_scatter.setData(pos=sp.expand_dims(poi_right_pos[i],0))
        hand_right_scatter.setData(pos=sp.expand_dims(hand_right_pos[i],0))
        string_right_line.setData(pos=sp.vstack((hand_right_pos[i],poi_right_pos[i])))
#        arm_right.setData(pos=sp.vstack((hand_right_pos[i],[0,shoulder_width/2,0])))
        arm_right.setData(pos=sp.vstack((hand_right_pos[i],[0,0,offset])))
    else:
        poi_right_scatter.hide()
        poi_right_line.hide()
        hand_right_scatter.hide()
        hand_right_line.hide()
        string_right_line.hide()
        arm_right.hide()
Beispiel #5
0
    def Ei(self, Pp, i):
        """ Calculate E_i^P

        Parameters
        -------------
        Pp : ndarray, shape (n, k)
             Conditional choice probabilities for provinces
        i : int, 1 to k
            Province 

        Returns
        -----------
        Ei : ndarray, shape (n, )
             Values of :math:`E_i^P(l, a)` in part (b)

        Notes
        ----------
        
        .. math::
                        
           E_i^P(l, s) = \sum_{a=0}^1 P_i[a | l, s] E_i^P(a, l, s)

        """
        E = sp.vstack((self.Ei_ai(Pp, i, a) for a in (0, 1))).T
        W = sp.vstack((Pp[:, _pp(i, a)] for a in (0, 1))).T
        return (E * W).sum(1)
def calc_probability_matrix(trains_a, trains_b, metric, tau, z):
    """ Calculates the probability matrix that one spike train from stimulus X
    will be classified as spike train from stimulus Y.

    :param list trains_a: Spike trains of stimulus A.
    :param list trains_b: Spike trains of stimulus B.
    :param str metric: Metric to base the classification on. Has to be a key in
        :const:`metrics.metrics`.
    :param tau: Time scale parameter for the metric.
    :type tau: Quantity scalar.
    :param float z: Exponent parameter for the classifier.
    """

    with warnings.catch_warnings():
        warnings.filterwarnings("ignore", "divide by zero")
        dist_mat = calc_single_metric(trains_a + trains_b, metric, tau) ** z
    dist_mat[sp.diag_indices_from(dist_mat)] = 0.0

    assert len(trains_a) == len(trains_b)
    l = len(trains_a)
    classification_of_a = sp.argmin(sp.vstack((
        sp.sum(dist_mat[:l, :l], axis=0) / (l - 1),
        sp.sum(dist_mat[l:, :l], axis=0) / l)) ** (1.0 / z), axis=0)
    classification_of_b = sp.argmin(sp.vstack((
        sp.sum(dist_mat[:l, l:], axis=0) / l,
        sp.sum(dist_mat[l:, l:], axis=0) / (l - 1))) ** (1.0 / z), axis=0)
    confusion = sp.empty((2, 2))
    confusion[0, 0] = sp.sum(classification_of_a == 0)
    confusion[1, 0] = sp.sum(classification_of_a == 1)
    confusion[0, 1] = sp.sum(classification_of_b == 0)
    confusion[1, 1] = sp.sum(classification_of_b == 1)
    return confusion / 2.0 / l
def _pair_overlap(waves1, waves2, mean1, mean2, cov1, cov2):
    """ Calculate FP/FN estimates for two gaussian clusters
    """
    from sklearn import mixture

    means = sp.vstack([[mean1], [mean2]])
    covars = sp.vstack([[cov1], [cov2]])
    weights = sp.array([waves1.shape[1], waves2.shape[1]], dtype=float)
    weights /= weights.sum()

    # Create mixture of two Gaussians from the existing estimates
    mix = mixture.GMM(n_components=2, covariance_type="full", init_params="")
    mix.covars_ = covars
    mix.weights_ = weights
    mix.means_ = means

    posterior1 = mix.predict_proba(waves1.T)[:, 1]
    posterior2 = mix.predict_proba(waves2.T)[:, 0]

    return (
        posterior1.mean(),
        posterior2.sum() / len(posterior1),
        posterior2.mean(),
        posterior1.sum() / len(posterior2),
    )
Beispiel #8
0
def gpmapasrecc(optstate, **para):
    if para["onlyafter"] > len(optstate.y) or not len(optstate.y) % para["everyn"] == 0:
        return [sp.NaN for i in para["lb"]], {"didnotrun": True}
    logger.info("gpmapas reccomender")
    d = len(para["lb"])

    x = sp.hstack([sp.vstack(optstate.x), sp.vstack([e["xa"] for e in optstate.ev])])

    y = sp.vstack(optstate.y)
    s = sp.vstack([e["s"] for e in optstate.ev])
    dx = [e["d"] for e in optstate.ev]
    MAP = GPdc.searchMAPhyp(x, y, s, dx, para["mprior"], para["sprior"], para["kindex"])
    logger.info("MAPHYP {}".format(MAP))
    G = GPdc.GPcore(x, y, s, dx, GPdc.kernel(para["kindex"], d + 1, MAP))

    def directwrap(xq, y):
        xq.resize([1, d])
        xe = sp.hstack([xq, sp.array([[0.0]])])
        # print xe
        a = G.infer_m(xe, [[sp.NaN]])
        return (a[0, 0], 0)

    [xmin, ymin, ierror] = DIRECT.solve(
        directwrap, para["lb"], para["ub"], user_data=[], algmethod=1, volper=para["volper"], logfilename="/dev/null"
    )
    logger.info("reccsearchresult: {}".format([xmin, ymin, ierror]))
    return [i for i in xmin], {"MAPHYP": MAP, "ymin": ymin}
Beispiel #9
0
    def gettimes(ionocontlist):
        """
        This static method will take a list of files, or a single string, and
        deterimine the time ordering and give the sort order for the files to be in.
        Inputs
            ionocontlist- A list of IonoContainer h5 files. Can also be a single
            string of a file name.
        Outputs
            sortlist - A numpy array of integers that will chronilogically order
            the files
            outtime - A Nt x 2 numpy array of all of the times.
            timebeg - A list of beginning times
        """
        if isinstance(ionocontlist,string_types):
            ionocontlist=[ionocontlist]
        timelist=[]
        fileslist = []
        for ifilenum,ifile in enumerate(ionocontlist):
            with tables.open_file(str(ifile)) as f:
                times = f.root.Time_Vector.read()


            timelist.append(times)
            fileslist.append(ifilenum*sp.ones(len(times)))
        times_file =sp.array([i[:,0].min() for i in timelist])
        sortlist = sp.argsort(times_file)

        timelist_s = [timelist[i] for i in sortlist]
        timebeg = times_file[sortlist]
        fileslist = sp.vstack([fileslist[i][0] for i in sortlist]).flatten().astype('int64')
        outime = sp.vstack(timelist_s)
        return (sortlist,outime,fileslist,timebeg,timelist_s)
Beispiel #10
0
def MNEfit(stim,resp,order):
    # in order for dlogloss to work, we need to know -<g(yt(n),xt)>data
    # == calculate the constrained averages over the data set
    Nsamples = sp.size(stim,0)
    Ndim = sp.size(stim,1)
    psp = sp.mean(sp.mean(resp)) #spike probability (first constraint)
    avg = (1.0*stim.T*resp)/(Nsamples*1.0)
    avgs = sp.vstack((psp,avg))
    if(order > 1):
        avgsqrd = (stim.T*1.0)*(sp.array(sp.tile(resp,(1,Ndim)))*sp.array(stim))/(Nsamples*1.0)
        avgsqrd = sp.reshape(avgsqrd,(Ndim**2,1))
        avgs = sp.vstack((avgs,avgsqrd))
    
    #initialize params:
    pstart = sp.log(1/avgs[0,0] - 1)
    pstart = sp.hstack((pstart,(.001*(2*sp.random.rand(Ndim)-1))))
    if(order > 1):
        temp = .0005*(2*sp.random.rand(Ndim,Ndim)-1)
        pstart = sp.hstack((pstart,sp.reshape(temp+temp.T,(1,Ndim**2))[0]))
    
    #redefine functions with fixed vals:
    def logLoss(p):
        return LLF.log_loss(p, stim, resp, order)
    def dlogLoss(p):
        return LLF.d_log_loss(p, stim, avgs, order)
    #run the function:
    #pfinal = opt.fmin_tnc(logLoss,pstart,fprime=dlogLoss)
    # conjugate-gradient:
    pfinal = opt.fmin_cg(logLoss,pstart,fprime=dlogLoss)
    #pfinal = opt.fmin(logLoss,pstart,fprime=dlogLoss)
    return pfinal
Beispiel #11
0
 def infer_diag(self,X_i,D_i):
     ns=X_i.shape[0]
     D = [0 if sp.isnan(x[0]) else int(sum([8**i for i in x])) for x in D_i]
     R=sp.vstack([sp.empty([2,ns])]*self.size)
     libGP.infer_diag(self.s,cint(self.size), ns,X_i.ctypes.data_as(ctpd),(cint*len(D))(*D),R.ctypes.data_as(ctpd))
     m = sp.vstack([R[i*2,:] for i in xrange(self.size)])
     V = sp.vstack([R[i*2+1,:] for i in xrange(self.size)])
     return [m,V]
def my_bh_fdr(p_val_vec):
    index = scipy.argsort(p_val_vec)
    exp_err = scipy.vstack((float(len(p_val_vec))/scipy.arange(1,len(p_val_vec) + 1)*p_val_vec[index],
                                      scipy.tile(1, [1, len(p_val_vec)]))).min(axis = 0)
    exp_err = scipy.vstack((exp_err,exp_err[scipy.r_[0,scipy.arange(len(exp_err)-1)]])).max(axis=0)
    #scipy.r_[index[0], index[range(len(index)-1)]
    resort_index = scipy.argsort(index)                 
    return exp_err[resort_index]
Beispiel #13
0
def test_skip():
    """Test if only keeping every n'th sample works."""
    X = scipy.vstack((scipy.arange(25), scipy.arange(25)))
    X_ = skip(X, 2, 5)
    print X_
    des = scipy.vstack((scipy.array([0, 1, 2, 3, 4, 10, 11, 12, 13, 14, 20, 21, 22, 23 ,24]),
                       scipy.array([0, 1, 2, 3, 4, 10, 11, 12, 13, 14, 20, 21, 22, 23 ,24])))

    assert (X_ == des).all(), 'wrong result'
Beispiel #14
0
def extract_spikes(train, signals, length, align_time):
    """ Extract spikes with waveforms from analog signals using a spike train. 
    Spikes that are too close to the beginning or end of the shortest signal
    to be fully extracted are ignored.

    :type train: :class:`neo.core.SpikeTrain`
    :param train: The spike times.
    :param sequence signals: A sequence of :class:`neo.core.AnalogSignal`
        objects from which the spikes are extracted. The waveforms of
        the returned spikes are extracted from these signals in the
        same order they are given.
    :type length: Quantity scalar
    :param length: The length of the waveform to extract as time scalar.
    :type align_time: Quantity scalar
    :param align_time: The alignment time of the spike times as time scalar.
        This is the time delta from the start of the extracted waveform
        to the exact time of the spike.
    :returns: A list of :class:`neo.core.Spike` objects, one for each
        time point in ``train``. All returned spikes include their
        ``waveform`` property.
    :rtype: list
    """
    if len(set(s.sampling_rate for s in signals)) > 1:
        raise ValueError(
            'All signals for spike extraction need the same sampling rate')

    wave_unit = signals[0].units
    srate = signals[0].sampling_rate
    end = min(s.shape[0] for s in signals)

    aligned_train = train - align_time
    cut_samples = int((length * srate).simplified)

    st = sp.asarray((aligned_train * srate).simplified)

    # Find extraction epochs
    st_ok = (st >= 0) * (st < end - cut_samples)
    epochs = sp.vstack((st[st_ok], st[st_ok] + cut_samples)).T

    nspikes = epochs.shape[0]
    if not nspikes:
        return []

    # Create data
    data = sp.vstack([sp.asarray(s.rescale(wave_unit)) for s in signals])
    nc = len(signals)

    spikes = []
    for s in xrange(nspikes):
        waveform = sp.zeros((cut_samples, nc))
        for c in xrange(nc):
            waveform[:, c] = \
                data[c, epochs[s, 0]:epochs[s, 1]]
        spikes.append(neo.Spike(train[st_ok][s], waveform=waveform * wave_unit))

    return spikes
Beispiel #15
0
def stripe2():
    Y1 = sp.vstack((sp.ones((50,1)), sp.zeros((50,1))))
    Y2 = sp.vstack((sp.zeros((50,1)), sp.ones((50,1))))
    Y = sp.hstack([Y1, Y2])

    X1 = sp.random.multivariate_normal([-2,2], [[1,.8],[.8,1]],size=50)
    X2 = sp.random.multivariate_normal([2,-1], [[1,.8],[.8,1]], size=50)
    X = sp.hstack((sp.ones((100,1)),sp.vstack([X1,X2])))

    return Y, X
Beispiel #16
0
def load_single_player_data(use_existing=False, num_train=0):
    aa=np.load('/Users/amit/Desktop/Dropbox/Markov/IMSPL.npy')
    bb=np.load('/Users/amit/Desktop/Dropbox/Markov/IMSBGD.npy')
    aa=standardize_data(aa)
    bb=standardize_data(bb)


    #ii=np.int32(np.floor(np.random.rand(100)*bb.shape[0]))
    # py.figure(1)
    # for j,i in enumerate(ii):
    #     py.subplot(10,10,j+1)
    #     py.imshow(bb[i,:,:,:])
    #     py.axis('off')
    #     py.axis('equal')
    # py.show()
    if (num_train==0):
        num=aa.shape[0]
    else:
        num=np.minimum(aa.shape[0],num_train)
    if (not use_existing):
         ii=range(num)
         np.random.shuffle(ii)
         np.save('ii.npy',ii)
         aa=aa[ii,]
    else:
        if (os.path.isfile('ii.npy')):
            ii=np.load('ii.npy')
            aa=aa[ii,]
    train_num=np.int32(num/2)
    val_num=np.int32(num/4)
    test_num=np.int32(num/4)
    head=aa[:,0:25,:,:]
    body=aa[:,20:45,:,:]
    legs=aa[:,35:60,:,:]
    bgd=bb[:,20:45,:,:]
    val_start=train_num
    val_end=val_num+val_start
    test_start=val_end
    test_end=test_num+test_start
    X_train=scipy.vstack((head[0:train_num,],body[0:train_num,],legs[0:train_num],bgd[0:train_num,]))
    X_val=scipy.vstack((head[val_start:val_end,],body[val_start:val_end,],
                        legs[val_start:val_end,],bgd[val_start:val_end,]))
    X_test=scipy.vstack((head[test_start:test_end,],
                         body[test_start:test_end,],
                         legs[test_start:test_end,],
                         bgd[test_start:test_end,]))

    X_train=X_train.transpose((0,3,1,2)) #/256.
    X_val=X_val.transpose((0,3,1,2)) #/256.
    X_test=X_test.transpose((0,3,1,2)) #/256.
    y_train=np.repeat(range(4),train_num)
    y_val=np.repeat(range(4),val_num)
    y_test=np.repeat(range(4),test_num)

    return (np.float32(X_train),np.uint8(y_train),np.float32(X_val),np.uint8(y_val),np.float32(X_test),np.uint8(y_test))
def broyden(func, x1, x2, tol=1e-5, maxiter=50):
    """Calculate the zero of a multi-dimensional function using Broyden's method"""
    
    def isscalar(x):
        if isinstance(x, sp.ndarray):
            if x.size == 1:
                return x.flatten()[0]
            else:
                return x
        else:
            return x

    def update_Jacobian(preJac, ch_x, ch_F):
        """Update Jacobian from preX to newX
        preX and newX are assumed to be array objects of the same shape"""
                
        frac = (ch_F-(preJac.dot(ch_x)))/(la.norm(ch_x)**2)

        Jac = preJac+sp.dot(isscalar(frac),ch_x.T)
        return Jac
        
    #truncate list to two tiems and sort
    x1 = sp.vstack(x1.flatten())
    x2 = sp.vstack(x2.flatten())
    
    fx1 = func(x1)
    fx2 = func(x2)
    
    #check our original points for zeros
    if abs(fx1) < tol:
        return x1
    elif abs(fx2) < tol:
        return x2

    #Calculate initial Jacobian matrix
    jac = Jacobian(func)(x1)
    mi = maxiter
    while abs(fx2) > tol and mi > 0:        
        fx1 = func(x1)
        fx2 = func(x2)
        ch_x=x2-x1
        ch_F=fx2-fx1
        
        jac = update_Jacobian(jac, ch_x, ch_F)
        y = la.lstsq(jac, sp.array([-fx2]))[0]
        xnew = y+x2
        x1 = x2
        x2 = xnew
        mi -= 1
    
    if mi==0:
        raise StopIteration("Did not converge in {} iterations".format(maxiter))
    else:
        return x2, maxiter-mi
Beispiel #18
0
def TCA(X_S, X_T, m=40, mu=0.1, kernel_para=1, p=2, random_sample_T=0.01):

    X_S = sp.mat(X_S)
    X_T = sp.mat(X_T)

    n_S = X_S.shape[0]
    n_T = X_T.shape[0]
    if random_sample_T != 1:
        print str(int(n_T * random_sample_T)) + " samples taken from the task domain"
        index_sample = sp.random.choice([i for i in range(n_T)], size=int(n_T * random_sample_T))
        X_T = X_T[index_sample, :]

        n_T = X_T.shape[0]

    n = n_S + n_T

    if m > (n):
        print ("m is larger then n_S+n_T, so it has been changed")
        m = n

    L = sp.zeros(shape=(n, n))
    L_SS = sp.ones(shape=(n_S, n_S)) / (n_S ** 2)
    L_TT = sp.ones(shape=(n_T, n_T)) / (n_T ** 2)
    L_ST = -sp.ones(shape=(n_S, n_T)) / (n_S * n_T)
    L_TS = -sp.ones(shape=(n_T, n_S)) / (n_S * n_T)

    L[0:n_S, 0:n_S] = L_SS
    L[n_S : n_S + n_T, n_S : n_S + n_T] = L_TT
    L[n_S : n_S + n_T, 0:n_S] = L_TS
    L[0:n_S, n_S : n_S + n_T] = L_ST

    R = pdist(sp.vstack([X_S, X_T]), metric="euclidean", p=p, w=None, V=None, VI=None)

    K = Gaussian(R, kernel_para, p)

    Id = sp.zeros(shape=(n, n))
    H = sp.zeros(shape=(n, n))
    sp.fill_diagonal(Id, 1)
    sp.fill_diagonal(H, 1)
    H -= 1.0 / n

    Id = sp.mat(Id)
    H = sp.mat(H)
    K = sp.mat(K)
    L = sp.mat(L)

    matrix = sp.linalg.inv(K * L * K + mu * Id) * sp.mat(K * H * K)

    eigen_values = sp.linalg.eig(matrix)

    eigen_val = eigen_values[0][0:m]
    eigen_vect = eigen_values[1][:, 0:m]
    return (eigen_val, eigen_vect, K, sp.vstack([X_S, X_T]))
Beispiel #19
0
    def radius( self, frame ):
        '''
        Bubble radius at one frame.
        Method:
        1. Load the snapshot at frame
        2. Load x, y, z coordinates 
        3. Calculate density grid mesh at grid points
        4. Filter the shell grids with density between low * max density and high * max density
        5. Calculate the average radius
        '''
        start = time.clock()

        self.set_frame( frame )

        # Load x, y, z coordinates
        data = pd.DataFrame( list(self.universe.coord), columns=['x','y','z'])
        x    = data[ 'x' ].values
        y    = data[ 'y' ].values
        z    = data[ 'z' ].values

        # Density grid
        xyz  = scipy.vstack( [ x, y, z ] )
        kde  = scipy.stats.gaussian_kde( xyz )
        xmin, ymin, zmin = x.min(), y.min(), z.min()
        xmax, ymax, zmax = x.max(), y.max(), z.max()
        NI         = complex( imag=self.density_grid_length)
        xi, yi, zi = scipy.mgrid[ xmin:xmax:NI, ymin:ymax:NI, zmin:zmax:NI ]
        coords     = scipy.vstack([item.ravel() for item in [xi, yi, zi]])
        density    = kde(coords).reshape(xi.shape)

        # Filter density grid
        density_max  = density.max()
        density_low  = self.density_low * density_max
        density_high = self.density_high * density_max

        xyzs = []
        N = self.density_grid_length
        for idx, idy, idz in product( xrange(N), xrange(N), xrange(N) ):
            if density_low < density[ idx, idy, idz ] <= density_high:
                xyzs.append( [ xi[ idx, idy, idz ], yi[ idx, idy, idz ], zi[ idx, idy, idz ] ] )
        xyzs = np.array( xyzs )

        # Average radius
        center = xyzs.mean( axis=0 )
        rs = []
        for xyz_ele in xyzs:
            rs.append( np.linalg.norm( center - xyz_ele ) )

        duration = time.clock() - start
        print( "Radius for frame {} calculated in {:.2f} seconds".format( frame, duration ) )

        return center, scipy.mean( rs )
Beispiel #20
0
def store(old, new):
	old=old.reshape((1,len(old)))
	lold=old.shape[1]
	lnew=new.shape[1]
	if (lold==lnew):
		X=sc.vstack((old,new))
	elif (lold>lnew):
		new =sc.hstack(([0]*(lold-lnew),new))
		X=X=sc.vstack((old,new))
	elif (lnew>lold):
		old =sc.hstack((old,[0]*(lnew-lold)))
		X=X=sc.vstack((old,new))
	return(X)
Beispiel #21
0
    def load(filename, network=None):
        r"""
        Loads data onto the given network from an appropriately formatted
        'mat' file (i.e. MatLAB output).

        Parameters
        ----------
        filename : string (optional)
            The name of the file containing the data to import.  The formatting
            of this file is outlined below.

        network : OpenPNM Network Object
            The Network object onto which the data should be loaded.  If no
            Network is supplied than one will be created and returned.

        Returns
        -------
        If no Network object is supplied then one will be created and returned.

        """
        net = {}

        import scipy.io as _spio
        data = _spio.loadmat(filename)
        # Deal with pore coords and throat conns specially
        if 'throat_conns' in data.keys():
            net.update({'throat.conns': _sp.vstack(data['throat_conns'])})
            Nt = _sp.shape(net['throat.conns'])[0]
            net.update({'throat.all': _sp.ones((Nt,), dtype=bool)})
            del data['throat_conns']
        else:
            logger.warning('\'throat_conns\' not found')
        if 'pore_coords' in data.keys():
            net.update({'pore.coords': _sp.vstack(data['pore_coords'])})
            Np = _sp.shape(net['pore.coords'])[0]
            net.update({'pore.all': _sp.ones((Np,), dtype=bool)})
            del data['pore_coords']
        else:
            logger.warning('\'pore_coords\' not found')

        # Now parse through all the other items
        items = [i for i in data.keys() if '__' not in i]
        for item in items:
            element = item.split('_')[0]
            prop = item.split('_', maxsplit=1)[1]
            net[element+'.'+prop] = _sp.squeeze(data[item].T)

        if network is None:
            network = OpenPNM.Network.GenericNetwork()
        network = _update_network(network=network, net=net)
        return network
def optimize(f, left, right, tol=1e-3, n=200, **kwargs):
    x_obs = sp.array([[(left + right)/2.]])
    y_obs = f(x_obs)
    x_test = sp.linspace(left, right, n)[:, None]
    
    lcb = sp.array([[-sp.inf]])
    while lcb.min() - y_obs.min() < -tol:
        lcb = lower_confidence_bound(x_test, x_obs, y_obs, **kwargs)
        print lcb.min() - y_obs.min()
        x_new = x_test[sp.argmin(lcb)]
        x_obs = sp.vstack([x_obs, [x_new]])
        y_obs = sp.vstack([y_obs, f(x_new)])
        
    return x_obs, y_obs.flatten()
 def intersect(self, conset2):
     """Determine intersection between current constraint set and another"""
     def remredcons(A, b, verts):
         """Reduce a constraint set by removing unnecessary constraints."""
         eps = 10e-9
         #1 Co-planar constraints;
         #  Remove as not to affect 3rd check
         Ab = c_[A, b]
         Abnorms = ones((Ab.shape[0], 1))
         for i in range(Ab.shape[0]):
             Abnorms[i] = linalg.norm(Ab[i, :])
         Abn = Ab/Abnorms
         Abkeep = ones((0, Ab.shape[1]))
         Abtest = ones((0, Ab.shape[1]))
         for r1 in range(Abn.shape[0]):
             noocc = ones((1, 0))
             for r2 in range(Abn.shape[0]):
                 #print abs(Abn[r1, :] - Abn[r2, :])
                 if numpy.all(abs(Abn[r1, :] - Abn[r2, :]) < eps):
                     noocc = c_[noocc, r2]
             if noocc.size == 1:
                 Abtest = vstack([Abtest, Ab[r1, :]])
             else:
                 Abkeep = vstack([Abkeep, Ab[r1, :]])
         if Abkeep.shape[0] > 1:
             Abkeep = uniqm(Abkeep, eps)
         #2 Vert subset satisfying; no action needed (redundancy uncertain)
         #3 All vert satisfying constraints;
         A, b = splitAb(array(Abtest).ravel(), verts.shape[1])
         keepA = ones((0, A.shape[1]))
         keepb = ones((0, 1))
         bt = tile(b, (1, verts.shape[0]))
         k = mat(A)*mat(verts.T) - bt
         kk = sum(k > eps, axis=1)
         for i in range(len(kk)):
             if kk[i] != 0:
                 keepA = vstack([keepA, A[i, :]])
                 keepb = vstack([keepb, b[i, :]])
         outAb = vstack([c_[keepA, keepb], Abkeep])
         return splitAb(outAb.ravel(), verts.shape[1])
     #Combine constraints and vertices
     combA = vstack((self.A, conset2.A))
     combb = vstack((self.b, conset2.b))
     combv = vstack((self.vert, conset2.vert))
     #Remove redundant constraints
     ncombA, ncombb = remredcons(combA, combb, combv)
     #Calc and return intersection
     intcombvert = con2vert(combA, combb)[0]
     return intcombvert
Beispiel #24
0
def stripe3():
    zero = sp.zeros((33,1))
    ones = sp.ones((33,1))

    Y1 = sp.vstack([ones, zero, zero])
    Y2 = sp.vstack([zero, ones, zero])
    Y3 = sp.vstack([zero, zero, ones])
    Y = sp.hstack((Y1, Y2, Y3))

    X1 = sp.random.multivariate_normal([-2,2], [[1,.8],[.8,1]], size=33)
    X2 = sp.random.multivariate_normal([2,-2], [[1,.8],[.8,1]], size=33)
    X3 = sp.random.multivariate_normal([0,0], [[1,.8],[.8,1]], size=33)
    X = sp.hstack((sp.vstack((ones,ones,ones)),sp.vstack((X1,X2,X3))))

    return Y, X
Beispiel #25
0
 def save_network_tocsv(self,path='',filename='network'):
     r'''
     '''
     if path=='':
         path = os.path.abspath('')+'\\LocalFiles\\'
     Xp = self.get_pore_indices()
     Xt = self.get_throat_indices()
     for p in self._pore_data.keys():
         if sp.shape(sp.shape(self.get_pore_data(prop=p)))==(1,):
             Xp = sp.vstack((Xp,self.get_pore_data(prop=p)))
             sp.savetxt(path+'\\'+filename+'_pores_'+p+'.csv',self.get_pore_data(prop=p))
     for t in self._throat_data.keys():
         if sp.shape(sp.shape(self.get_throat_data(prop=t)))==(1,):
             Xt = sp.vstack((Xt,self.get_throat_data(prop=t)))
             sp.savetxt(path+'\\'+filename+'_throats_'+t+'.csv',self.get_throat_data(prop=t))
def diffmat(dims,order = 'C'):
    """ This function will return a tuple of difference matricies for data from an 
        Nd array that has been rasterized. The order parameter determines whether 
        the array was rasterized in a C style (python) of FORTRAN style (MATLAB).
        Inputs:
            dims- A list of the size of the x,y,z.. dimensions.
            order- Specifies the vectorization of the matrix
        Outputs:
            dx,dy,dy... - The finite difference operators for a vectorized array.
                If these are to be stacked together as one big operator then
                sp.sparse.vstack should be used.
    """
    # flip the dimensions around
    dims=[int(i) for i in dims]
    xdim = dims[0]
    ydim = dims[1]
    dims[0]=ydim
    dims[1]=xdim
    
    
    if order.lower() == 'c':
        dims = dims[::-1]

    outD = []
    for idimn, idim in enumerate(dims):
        if idim==0:
            outD.append(sp.array([]))
            continue
        e = sp.ones(idim)
        dthing = sp.vstack((-e,e))
        D = sp.sparse.spdiags(dthing,[0,1],idim-1,idim).toarray()
        D = sp.vstack((D,D[-1]))
        if idim>0:
            E = sp.sparse.eye(sp.prod(dims[:idimn]))
            D = sp.sparse.kron(D,E)

        if idimn<len(dims)-1:
            E = sp.sparse.eye(sp.prod(dims[idimn+1:]))
            D = sp.sparse.kron(E,D)

        outD.append(sp.sparse.csc_matrix(D))
    if order.lower() == 'c':
        outD=outD[::-1]
    Dy=outD[0]
    Dx = outD[1]
    outD[0]=Dx
    outD[1]=Dy
    return tuple(outD)
Beispiel #27
0
def _test_scz_():
    # Load Schizophrenia data
    
    singleton_snps = genotypes.simulate_k_tons(n=500, m=1000)
    doubleton_snps = genotypes.simulate_k_tons(k=2, n=500, m=1000)
    common_snps = genotypes.simulate_common_genotypes(500, 1000) 
    
    snps = sp.vstack([common_snps, singleton_snps, doubleton_snps])
    test_snps = sp.vstack([singleton_snps, doubleton_snps])
    print snps
    phen_list = phenotypes.simulate_traits(snps, hdf5_file_prefix='/home/bv25/tmp/test', num_traits=30, p=1.0)
    
    singletons_thres = []
    doubletons_thres = []
    common_thres = []
    for i, y in enumerate(phen_list):
        
        K = kinship.calc_ibd_kinship(snps)
        K = kinship.scale_k(K)
        lmm = lm.LinearMixedModel(y)
        lmm.add_random_effect(K)
        r1 = lmm.get_REML()
        print 'pseudo_heritability:', r1['pseudo_heritability']

        ex_res = lm.emmax(snps, y, K)
        plt.figure()
        plt.hist(y, 50)
        plt.savefig('/home/bv25/tmp/test_%d_phen.png' % i)
        plt.clf()
        agr.plot_simple_qqplots_pvals('/home/bv25/tmp/test_%d' % i,
                                      [ex_res['ps'][:1000], ex_res['ps'][1000:2000], ex_res['ps'][2000:]],
                                      result_labels=['Common SNPs', 'Singletons', 'Doubletons'],
                                      line_colors=['b', 'r', 'y'],
                                      num_dots=200, max_neg_log_val=3)
        
        # Now permutations..
        res = lm.emmax_perm_test(singleton_snps, y, K, num_perm=1000)
        print 1.0 / (20 * 1000.0), res['threshold_05']
        singletons_thres.append(res['threshold_05'][0])
        res = lm.emmax_perm_test(doubleton_snps, y, K, num_perm=1000)
        print 1.0 / (20 * 1000.0), res['threshold_05']
        doubletons_thres.append(res['threshold_05'][0])
        res = lm.emmax_perm_test(common_snps, y, K, num_perm=1000)
        print 1.0 / (20 * 1000.0), res['threshold_05']
        common_thres.append(res['threshold_05'][0])
    print sp.mean(singletons_thres), sp.std(singletons_thres)
    print sp.mean(doubletons_thres), sp.std(doubletons_thres)
    print sp.mean(common_thres), sp.std(common_thres)
Beispiel #28
0
 def plotstate2D(self,llimit,ulimit,allK=False):
     assert self.dim==2
     size=60
     Esum=sp.zeros([size,size])
     Msum=sp.zeros([size,size])
     for i,kf in enumerate(self.KFs):
         KyR = spl.cho_factor(self.buildKsym(kf,self.X))
         
         xaxis=sp.linspace(llimit[0],ulimit[0],size)
         yaxis=sp.linspace(llimit[1],ulimit[1],size)
         E=[]
         m=[]
         C=[]
         for y in yaxis:
             for x in xaxis:
                 (Et,mt,Ct)=self.evalEI(self.X,self.Y,KyR,kf,self.best[1],sp.matrix([x,y]))
                 E.append(Et[0,0])
                 m.append(mt)
                 C.append(Ct)
         
         Egrid=sp.vstack(np.split(np.array(E),size))
         mgrid=sp.vstack(np.split(np.array(m),size))
         Cgrid=sp.vstack(np.split(np.array(C),size))
         
         Esum=Esum+Egrid*sp.exp(self.llks[i])
         Msum=Msum+mgrid*sp.exp(self.llks[i])
         if allK:
             plt.figure(figsize=(20,5))
             plt.subplot(131)
             try:
                 plt.contour(xaxis,yaxis,mgrid,50)
             except ValueError:
                 pass
             plt.subplot(132)
             plt.contour(xaxis,yaxis,Cgrid,50)
             plt.subplot(133)
             plt.contour(xaxis,yaxis,Egrid,50)
     
     plt.figure(figsize=(20,5))
     plt.subplot(131)
     try:
         plt.contour(xaxis,yaxis,Msum,50)
     except ValueError:
         pass
     plt.subplot(133)
     plt.contour(xaxis,yaxis,Esum,50)
     plt.show()
     return
Beispiel #29
0
def invert_epochs(epochs, end=None):
    """inverts epochs inverted

    The first epoch will be mapped to [0, start] and the last will be mapped
    to [end of last epoch, :end:]. Epochs that accidentally become negative
    or zero-length will be omitted.

    :type epochs: ndarray
    :param epochs: epoch set to invert
    :type end: int
    :param end: If not None, it i taken for the end of the last epoch,
        else max(index-dtype) is taken instead.
        Default=None
    :returns: ndarray - inverted epoch set
    """

    # checks
    if end is None:
        end = sp.iinfo(INDEX_DTYPE).max
    else:
        end = INDEX_DTYPE.type(end)

    # flip them
    rval = sp.vstack((sp.concatenate(([0], epochs[:, 1])), sp.concatenate((epochs[:, 0], [end])))).T
    return (rval[rval[:, 1] - rval[:, 0] > 0]).astype(INDEX_DTYPE)
Beispiel #30
0
    def _read_iop_from_file(self, file_name):
        """
        Generic IOP reader that interpolates the iop to the common wavelengths defined in the constructor

        returns: interpolated iop
        """
        lg.info('Reading :: ' + file_name + ' :: and interpolating to ' + str(self.wavelengths))

        if os.path.isfile(file_name):
            iop_reader = csv.reader(open(file_name), delimiter=',', quotechar='"')
            wave = scipy.float32(iop_reader.next())
            iop = scipy.zeros_like(wave)
            for row in iop_reader:
                iop = scipy.vstack((iop, row))

            iop = scipy.float32(iop[1:, :])  # drop the first row of zeros
        else:
            lg.exception('Problem reading file :: ' + file_name)
            raise IOError

        try:
            int_iop = scipy.zeros((iop.shape[0], self.wavelengths.shape[1]))
            for i_iter in range(0, iop.shape[0]):
                # r = scipy.interp(self.wavelengths[0, :], wave, iop[i_iter, :])
                int_iop[i_iter, :] = scipy.interp(self.wavelengths, wave, iop[i_iter, :])
            return int_iop
        except IOError:
            lg.exception('Error interpolating IOP to common wavelength')
            return -1
Beispiel #31
0
batch_kalman = []
deltaT = sp.mean(t[1:] - t[0:-1])
state0 = sp.array([0, 0]).T
P0 = sp.identity(2) * 0.1
F0     = sp.array([[1, deltaT],\
                   [0, 1]])
H0 = sp.identity(2)
Q0 = sp.diagflat([0.005, 0.0001])
R0 = sp.diagflat([0.25, 0.001])

for i in range(BATCH_SIZE):
    data = Data1D(sp.squeeze(batch_x[i, :, 0]), sp.squeeze(batch_x[i, :, 1]),
                  [])
    filter1b = LinearKalmanFilter1D(F0, H0, P0, Q0, R0, state0)
    kalman_data = filter1b.process_data(data)
    batch_kalman.append(sp.vstack([kalman_data.x[1:], kalman_data.vx[1:]]).T)

xk_batch = sp.stack(batch_kalman)
print(xk_batch.shape)
#%% Plot the fit
plt.figure(figsize=(14, 16))
for batch_idx in range(BATCH_SIZE):
    out_x = sp.squeeze(out[batch_idx, :, 0])
    out_vx = sp.squeeze(out[batch_idx, :, 1])
    noisy_x = batch_x[batch_idx, :, 0]
    noisy_vx = batch_x[batch_idx, :, 1]
    true_x = batch_y[batch_idx, :, 0]
    true_vx = batch_y[batch_idx, :, 1]

    plt.subplot(20 + (BATCH_SIZE) * 100 + batch_idx * 2 + 1)
    if batch_idx == 0: plt.title('Location x')
Beispiel #32
0
    def _generate_throats(self):
        r"""
        Generate the throats connections
        """
        logger.info('Define connections between pores')
        pts = self['pore.coords']
        Np = len(pts)
        # Generate 6 dummy domains to pad onto each face of real domain This
        # prevents surface pores from making long range connections to each other

        x, y, z = self['pore.coords'].T
        if x.max() > self._Lx:
            Lx = x.max() * 1.05
        else:
            Lx = self._Lx
        if y.max() > self._Ly:
            Ly = y.max() * 1.05
        else:
            Ly = self._Ly
        if z.max() > self._Lz:
            Lz = z.max() * 1.05
        else:
            Lz = self._Lz

        # Reflect in X = Lx and 0
        Pxp = pts.copy()
        Pxp[:, 0] = 2 * Lx - Pxp[:, 0]
        Pxm = pts.copy()
        Pxm[:, 0] = Pxm[:, 0] * (-1)
        # Reflect in Y = Ly and 0
        Pyp = pts.copy()
        Pyp[:, 1] = 2 * Ly - Pxp[:, 1]
        Pym = pts.copy()
        Pym[:, 1] = Pxm[:, 1] * (-1)
        # Reflect in Z = Lz and 0
        Pzp = pts.copy()
        Pzp[:, 2] = 2 * Lz - Pxp[:, 2]
        Pzm = pts.copy()
        Pzm[:, 2] = Pxm[:, 2] * (-1)
        # Add dummy domains to real domain
        # Order important for boundary logic
        pts = np.vstack((pts, Pxp, Pxm, Pyp, Pym, Pzp, Pzm))
        # Perform tessellation
        logger.debug('Beginning tessellation')
        Tri = sptl.Delaunay(pts)
        logger.debug('Converting tessellation to adjacency matrix')
        adjmat = sprs.lil_matrix((Np, Np), dtype=int)
        for i in sp.arange(0, sp.shape(Tri.simplices)[0]):
            # Keep only simplices that are fully in real domain
            # this used to be vectorize, but it stopped working...change in scipy?
            for j in Tri.simplices[i]:
                if j < Np:
                    adjmat[j, Tri.simplices[i][Tri.simplices[i] < Np]] = 1
        # Remove duplicate (lower triangle) and self connections (diagonal)
        # and convert to coo
        adjmat = sprs.triu(adjmat, k=1, format="coo")
        logger.debug('Conversion to adjacency matrix complete')
        self['throat.conns'] = sp.vstack((adjmat.row, adjmat.col)).T
        self['pore.all'] = np.ones(len(self['pore.coords']), dtype=bool)
        self['throat.all'] = np.ones(len(self['throat.conns']), dtype=bool)

        # Do Voronoi diagram - creating voronoi polyhedra around each pore and save
        # vertex information
        self._vor = Voronoi(pts)
        all_vert_index = sp.ndarray(Np, dtype=object)
        for i, polygon in enumerate(self._vor.point_region[0:Np]):
            if -1 not in self._vor.regions[polygon]:
                all_vert_index[i] = \
                    dict(zip(self._vor.regions[polygon],
                             self._vor.vertices[self._vor.regions[polygon]]))

        # Add throat vertices by looking up vor.ridge_dict
        throat_verts = sp.ndarray(len(self['throat.conns']), dtype=object)
        for i, (p1, p2) in enumerate(self['throat.conns']):
            try:
                throat_verts[i] = \
                    dict(zip(self._vor.ridge_dict[(p1, p2)],
                             self._vor.vertices[self._vor.ridge_dict[(p1, p2)]]))
            except KeyError:
                try:
                    throat_verts[i] = \
                        dict(zip(self._vor.ridge_dict[(p2, p1)],
                                 self._vor.vertices[self._vor.ridge_dict[(p2, p1)]]))
                except KeyError:
                    logger.error(
                        'Throat Pair Not Found in Voronoi Ridge Dictionary')

        self['pore.vert_index'] = all_vert_index
        self['throat.vert_index'] = throat_verts
        logger.debug(sys._getframe().f_code.co_name + ': End of method')
import networkx
from itertools import chain
import scipy.stats as stats
import pandas as pd
import numpy as np
import mendelian_mutation


alt_scores = comorbidity_genetics.comorbidity_scores('mendelian_disease_alterations_nogermline.pkl','comorb_focal', set(cens_germline))

cg_score = (1 - stats.binom.cdf((coex_mat > rho_cut).sum(axis = 0) - 1, len(msel), (coexpression_c.loc[notm, csel] > rho_cut).sum(axis = 0)/float(len(notm))))
                    
sc = pd.DataFrame(np.array(cg_score),index = coex_mat.columns,columns=['p'])
v = sc.sort(['p'])

v = pd.DataFrame(scipy.vstack(((gene_scores < .05*float(nrand)).sum(axis=0),gscore_vs_background,gene_scores.min(axis=0), background_probability)),columns = rand_score[alt]['cancers'])
                                                       1 - stats.binom.cdf((gene_scores < .05*float(nrand)).sum(axis=0),gene_scores.shape[0],
                                                   background_probability),

gs = pd.DataFrame(gene_scores, columns = rand_score[alt]['cancers'], index = mend_gn)
                                background_probability)),
gscore_vs_background = [0]*len(rand_score[alt]['cancers'])
            for (c_i, c) in enumerate(rand_score[alt]['cancers']): 
                background_genes = background_set - alterations[alt][c][1]
                p = (background[alt].loc[background_genes, c] < .05).sum()/float(len(background_genes))
                background_probability[c_i] =p

net_scores = mendelian_mutation.open_network_scores()
alt_enrichments = mendelian_mutation.open_alteration_enrichments()

xx = zip(*tuple([stats.ranksums(coexpression_c.loc[notm, csel_g], coex_mat.loc[:,csel_g]) for csel_g in csel]))
def weighted_pmi_lrec_items(matrix_train,
                            embeded_matrix=np.empty((0)),
                            iteration=4,
                            lam=80,
                            rank=200,
                            alpha=100,
                            gpu=True,
                            seed=1,
                            root=1,
                            **unused):
    """
    Function used to achieve generalized projected lrec w/o item-attribute embedding
    :param matrix_train: user-item matrix with shape m*n
    :param embeded_matrix: item-attribute matrix with length n (each row represents one item)
    :param iteration: number of SVD iterations
    :param lam: parameter of penalty
    :param rank: the latent dimension/number of items
    :param alpha: weights of the U-I ratings
    :param gpu: whether use gpu power
    :return: prediction in sparse matrix
    """
    progress = WorkSplitter()
    matrix_input = matrix_train
    if embeded_matrix.shape[0] > 0:
        matrix_input = vstack((matrix_input, embeded_matrix.T))

    progress.subsection("Create PMI matrix")
    pmi_matrix = get_pmi_matrix(matrix_input, root)

    progress.subsection("Randomized SVD")
    start_time = time.time()
    P, sigma, Qt = randomized_svd(pmi_matrix, n_components=rank, n_iter=iteration, random_state=seed)
    print("Elapsed: {0}".format(inhour(time.time() - start_time)))

    start_time = time.time()
    if gpu:
        import cupy as cp
        progress.subsection("Create Cacheable Matrices")
        # RQ = matrix_input.dot(sparse.csc_matrix(Qt).T).toarray()

        # sqrt sigma injection
        RQ = matrix_input.dot(sparse.csc_matrix(Qt.T * np.sqrt(sigma))).toarray()

        # Exact
        matrix_B = cp.array(RQ)
        matrix_BT = matrix_B.T
        matrix_A = matrix_BT.dot(matrix_B) + cp.array((lam * sparse.identity(rank, dtype=np.float32)).toarray())

        # Approx
        # matrix_A = cp.array(sparse.diags(sigma * sigma + lam).todense())
        # matrix_B = cp.array(P*sigma)
        # matrix_BT = cp.array(matrix_B.T)
        print("Elapsed: {0}".format(inhour(time.time() - start_time)))


        progress.subsection("Item-wised Optimization")
        start_time = time.time()

        # For loop
        m, n = matrix_train.shape
        Y = []
        alpha = cp.array(alpha, dtype=cp.float32)
        for i in tqdm(xrange(n)):
            vector_r = matrix_train[:, i]
            vector_y = per_item_gpu(vector_r, matrix_A, matrix_B, matrix_BT, alpha)
            y_i_gpu = cp.asnumpy(vector_y)
            y_i_cpu = np.copy(y_i_gpu)
            Y.append(y_i_cpu)
        Y = scipy.vstack(Y)
        print("Elapsed: {0}".format(inhour(time.time() - start_time)))
    else:
        progress.subsection("Create Cacheable Matrices")
        RQ = matrix_input.dot(sparse.csc_matrix(Qt).T).toarray()

        # Exact
        matrix_B = RQ
        matrix_BT = RQ.T
        matrix_A = matrix_BT.dot(matrix_B) + (lam * sparse.identity(rank, dtype=np.float32)).toarray()

        # Approx
        # matrix_B = P * sigma
        # matrix_BT = matrix_B.T
        # matrix_A = sparse.diags(sigma * sigma - lam).todense()
        print("Elapsed: {0}".format(inhour(time.time() - start_time)))

        progress.subsection("Item-wised Optimization")
        start_time = time.time()

        # For loop
        m, n = matrix_train.shape
        Y = []
        for i in tqdm(xrange(n)):
            vector_r = matrix_train[:, i]
            vector_y = per_item_cpu(vector_r, matrix_A, matrix_B, matrix_BT, alpha)
            y_i_cpu = vector_y
            Y.append(y_i_cpu)
        Y = scipy.vstack(Y)
        print("Elapsed: {0}".format(inhour(time.time() - start_time)))
    return RQ, Y.T, None
Beispiel #35
0
    def score_2_dof(self, X, snp_dim="col", debug=False):
        """
        Parameters
        ----------
        X : (`N`, `1`) ndarray
            genotype vector (TODO: X should be small)

        Returns
        -------
        pvalue : float
            P value
        """
        import scipy as sp
        import scipy.linalg as la
        import scipy.stats as st

        # 1. calculate Qs and pvs
        Q_rho = sp.zeros(len(self.rho_list))
        Py = P(self.gp, self.y)
        for i in range(len(self.rho_list)):
            rho = self.rho_list[i]
            LT = sp.vstack((rho ** 0.5 * self.vec_ones, (1 - rho) ** 0.5 * self.Env.T))
            LTxoPy = sp.dot(LT, X * Py)
            Q_rho[i] = 0.5 * sp.dot(LTxoPy.T, LTxoPy)

        # Calculating pvs is split into 2 steps
        # If we only consider one value of rho i.e. equivalent to SKAT and used for interaction test
        if len(self.rho_list) == 1:
            rho = self.rho_list[0]
            L = sp.hstack((rho ** 0.5 * self.vec_ones.T, (1 - rho) ** 0.5 * self.Env))
            xoL = X * L
            PxoL = P(self.gp, xoL)
            LToxPxoL = 0.5 * sp.dot(xoL.T, PxoL)
            try:
                pval = davies_pvalue(Q_rho[0], LToxPxoL)
            except AssertionError:
                eighQ, UQ = la.eigh(LToxPxoL)
                pval = mod_liu_corrected(Q_rho[0], eighQ)
            # Script ends here for interaction test
            return pval
        # or if we consider multiple values of rho i.e. equivalent to SKAT-O and used for association test
        else:
            pliumod = sp.zeros((len(self.rho_list), 4))
            for i in range(len(self.rho_list)):
                rho = self.rho_list[i]
                L = sp.hstack(
                    (rho ** 0.5 * self.vec_ones.T, (1 - rho) ** 0.5 * self.Env)
                )
                xoL = X * L
                PxoL = P(self.gp, xoL)
                LToxPxoL = 0.5 * sp.dot(xoL.T, PxoL)
                eighQ, UQ = la.eigh(LToxPxoL)
                pliumod[i,] = mod_liu_corrected(Q_rho[i], eighQ)
            T = pliumod[:, 0].min()
            # if optimal_rho == 0.999:
            #    optimal_rho = 1

            # 2. Calculate qmin
            qmin = sp.zeros(len(self.rho_list))
            percentile = 1 - T
            for i in range(len(self.rho_list)):
                q = st.chi2.ppf(percentile, pliumod[i, 3])
                # Recalculate p-value for each Q rho of seeing values at least as extreme as q again using the modified matching moments method
                qmin[i] = (q - pliumod[i, 3]) / (2 * pliumod[i, 3]) ** 0.5 * pliumod[
                    i, 2
                ] + pliumod[i, 1]

            # 3. Calculate quantites that occur in null distribution
            Px1 = P(self.gp, X)
            m = 0.5 * sp.dot(X.T, Px1)
            xoE = X * self.Env
            PxoE = P(self.gp, xoE)
            ETxPxE = 0.5 * sp.dot(xoE.T, PxoE)
            ETxPx1 = sp.dot(xoE.T, Px1)
            ETxPx11xPxE = 0.25 / m * sp.dot(ETxPx1, ETxPx1.T)
            ZTIminusMZ = ETxPxE - ETxPx11xPxE
            eigh, vecs = la.eigh(ZTIminusMZ)

            eta = sp.dot(ETxPx11xPxE, ZTIminusMZ)
            vareta = 4 * sp.trace(eta)

            OneZTZE = 0.5 * sp.dot(X.T, PxoE)
            tau_top = sp.dot(OneZTZE, OneZTZE.T)
            tau_rho = sp.zeros(len(self.rho_list))
            for i in range(len(self.rho_list)):
                tau_rho[i] = self.rho_list[i] * m + (1 - self.rho_list[i]) / m * tau_top

            MuQ = sp.sum(eigh)
            VarQ = sp.sum(eigh ** 2) * 2 + vareta
            KerQ = sp.sum(eigh ** 4) / (sp.sum(eigh ** 2) ** 2) * 12
            Df = 12 / KerQ

            # 4. Integration
            # from time import time
            # start = time()
            pvalue = optimal_davies_pvalue(
                qmin, MuQ, VarQ, KerQ, eigh, vareta, Df, tau_rho, self.rho_list, T
            )
            # print("Elapsed: {} seconds".format(time() - start))

            # Final correction to make sure that the p-value returned is sensible
            multi = 3
            if len(self.rho_list) < 3:
                multi = 2
            idx = sp.where(pliumod[:, 0] > 0)[0]
            pval = pliumod[:, 0].min() * multi
            if pvalue <= 0 or len(idx) < len(self.rho_list):
                pvalue = pval
            if pvalue == 0:
                if len(idx) > 0:
                    pvalue = pliumod[:, 0][idx].min()

            if debug:
                info = {
                    "Qs": Q_rho,
                    "pvs_liu": pliumod,
                    "qmin": qmin,
                    "MuQ": MuQ,
                    "VarQ": VarQ,
                    "KerQ": KerQ,
                    "lambd": eigh,
                    "VarXi": vareta,
                    "Df": Df,
                    "tau": tau_rho,
                }
                return pvalue, info
            else:
                return pvalue
Beispiel #36
0
    def labelconsistentksvd(self,
                            Y,
                            Dinit,
                            labels,
                            Q_train,
                            Tinit,
                            Winit=None):
        """
        Label consistent KSVD1 algorithm and Discriminative LC-KSVD2 implementation
        Args:
            Y               : training features
            Dinit           : initialized dictionary
            labels          : labels matrix for training feature (numberred from 1 to nb of classes)
            Q_train         : optimal code matrix for training feature
            Tinit           : initialized transform matrix
            Winit           : initialized classifier parameters (None for LC-KSVD1)
        Returns:
            D               : learned dictionary
            X               : sparsed codes
            T               : learned transform matrix
            W               : learned classifier parameters
        """

        # H_train = sp.zeros((int(labels.max()), Y.shape[1]), dtype=float)
        # print(H_train.shape)
        # for c in range(int(labels.max())):
        #     H_train[c, labels == (c+1)] = 1.
        H_train = labels

        # ksvd process
        runKsvd = ApproximateKSVD(Dinit.shape[1],
                                  max_iter=self.iterations,
                                  tol=self.tol,
                                  transform_n_nonzero_coefs=self.sparsitythres)
        if Winit is None:
            runKsvd.fit(sp.vstack((Y, self.sqrt_alpha * Q_train)),
                        Dinit=normcols(
                            sp.vstack((Dinit, self.sqrt_alpha * Tinit))))
        else:
            runKsvd.fit(sp.vstack(
                (Y, self.sqrt_alpha * Q_train, self.sqrt_beta * H_train)),
                        Dinit=normcols(
                            sp.vstack((Dinit, self.sqrt_alpha * Tinit,
                                       self.sqrt_beta * Winit))))

        # get back the desired D, T and W (if sqrt_beta is not None)
        i_end_D = Dinit.shape[0]
        i_start_T = i_end_D
        i_end_T = i_end_D + Tinit.shape[0]
        D = runKsvd.components_[:i_end_D, :]
        T = runKsvd.components_[i_start_T:i_end_T, :]
        if Winit is not None:
            i_start_W = i_end_T
            i_end_W = i_end_T + Winit.shape[0]
            W = runKsvd.components_[i_start_W:i_end_W, :]

        # normalization
        l2norms = splin.norm(D, axis=0)[sp.newaxis, :] + self.tol
        D /= l2norms
        T /= l2norms
        T /= self.sqrt_alpha
        X = runKsvd.gamma_

        if Winit is None:
            # Learning linear classifier parameters
            xxt = X.dot(X.T)
            W = splin.pinv(xxt + sp.eye(*(xxt).shape)).dot(X).dot(H_train.T)
            W = W.T
        else:
            W /= l2norms
            W /= self.sqrt_beta

        return D, X, T, W
Beispiel #37
0
def test1():
    zs = np.linspace(0.01, 1, 21)
    r = np.array([[0, 0, zz] for zz in zs])
    thetas = np.linspace(0, np.pi * 0.25, 10)
    print "thetas: ", thetas
    k_dir = np.vstack([np.sin(thetas), np.zeros_like(thetas), np.cos(thetas)])
    k_dir = k_dir.transpose()

    wavelength = 10
    k0 = np.pi * 2 / wavelength

    a1 = np.array([1, 0, 0])
    a2 = np.array([0, 1, 0])

    class gratinglobes(object):
        def check(self):
            dx = np.sqrt(np.sum(a1 * a1, axis=-1))
            dy = np.sqrt(np.sum(a2 * a2, axis=-1))
            threshold_d = wavelength / (1 + np.sin(thetas))
            temp = np.array([dx < threshold_d, dy < threshold_d])
            return np.sum(temp, axis=0) == temp.shape[0]

    checker = gratinglobes().check()
    print "grating lobe condition: ", checker

    result_direct = PGF_Direct(k0, a1, a2, 100, 100).pgf(k_dir, r)
    result_poisson = PGF_Poisson(k0, a1, a2, 1, 1).pgf(k_dir, r)
    result_ewald = PGF_EWALD(k0, a1, a2, 20, 20).pgf(k_dir, r)
    import matplotlib.pylab as plt
    plt.figure()

    class Method(object):
        def __init__(self, result, marker, line):
            self.result = result
            self.marker = marker
            self.line = line

        def angle(self, it):
            plt.plot(zs,\
                     np.angle(self.result)[it]/np.pi*180,\
                     self.line,\
                     label='angle %d %s'%(it,self.marker))

        def absolute(self, it):
            plt.plot(zs,\
                     np.absolute(self.result)[it],\
                     self.line,\
                     label='abs %d %s'%(it,self.marker))

    map(Method(result_poisson, 'pois', "s").angle, xrange(k_dir.shape[0]))
    map(Method(result_direct, 'dir', "-").angle, xrange(k_dir.shape[0]))
    map(Method(result_ewald, 'ewald', "+").angle, xrange(k_dir.shape[0]))
    plt.ylabel("angle (degree)")
    plt.xlabel("zs")
    #plt.legend()
    plt.show()

    plt.figure()
    map(Method(result_poisson, 'pois', "s").absolute, xrange(k_dir.shape[0]))
    map(Method(result_direct, 'dir', "-").absolute, xrange(k_dir.shape[0]))
    map(Method(result_ewald, 'ewald', "+").absolute, xrange(k_dir.shape[0]))
    plt.xlabel("zs")
    plt.ylabel("log10(abs) ")
    #plt.legend()
    plt.show()

    plt.figure()
    #map(Method((result_direct-result_poisson)/result_direct,'dir_pois',"-s").absolute,xrange(k_dir.shape[0]))
    map(
        Method((result_direct - result_ewald) / result_direct, 'dir_ewald',
               "-d").absolute, xrange(k_dir.shape[0]))
    #map(Method((result_poisson-result_ewald)/result_ewald,'pois_ewald',"-o").absolute,xrange(k_dir.shape[0]))
    plt.xlabel("zs")
    plt.ylabel("log10(abs) ")
    #plt.legend()
    plt.show()
Beispiel #38
0
    i = i + 1
u1 = (Wbig < 0) * -Wbig + Wbig
u1 = sp.sqrt(u1)
u1 = (Wbig < 0) * -10**10 + u1
x, y = discretenorm(k, 4 * sp.sqrt(.25), .25)
for j in range(k):
    u[:, :, j] = x[j] * u1

b = .9
states = sp.zeros((1, 100, 7))
policy = sp.zeros((1, 100, 7))
i = 0
d = 1
while d > 10**-9:
    E = states[i, :, :] * y
    V = sp.vstack(E.sum(1)) * b
    Value1 = V.copy()
    for x in range(100 - 1):
        Value1 = sp.concatenate((Value1, V), 1)
    Value = sp.zeros((100, 100, 7))
    for x in range(7):
        Value[:, :, x] = Value1.T
    total = u + Value
    temp = total.max(1)
    temp.resize(1, 100, 7)
    temp1 = total.argmax(1)
    temp1.resize(1, 100, 7)
    states = sp.concatenate((states, temp), 0)
    policy = sp.concatenate((policy, temp1), 0)
    i = i + 1
    d = la.norm(states[i - 1, :, :] - states[i, :, :])
def parcellate_region(roilist,
                      sub,
                      nClusters,
                      scan,
                      scan_type,
                      savepng=0,
                      session=1,
                      algo=0,
                      type_cor=0):
    p_dir = '/big_disk/ajoshi/HCP100-fMRI-NLM/HCP100-fMRI-NLM'
    r_factor = 3
    ref_dir = os.path.join(p_dir, 'reference')
    ref = '100307'
    fn1 = ref + '.reduce' + str(r_factor) + '.LR_mask.mat'
    fname1 = os.path.join(ref_dir, fn1)
    msk = scipy.io.loadmat(fname1)

    dfs_left_sm = readdfs(
        os.path.join('/home/ajoshi/for_gaurav',
                     '100307.BCI2reduce3.very_smooth.' + scan_type + '.dfs'))
    dfs_left = readdfs(
        os.path.join('/home/ajoshi/for_gaurav',
                     '100307.BCI2reduce3.very_smooth.' + scan_type + '.dfs'))

    data = scipy.io.loadmat(
        os.path.join(
            p_dir, sub, sub + '.rfMRI_REST' + str(session) + scan +
            '.reduce3.ftdata.NLM_11N_hvar_25.mat'))

    LR_flag = msk['LR_flag']
    # 0= right hemisphere && 1== left hemisphere
    if scan_type == 'right':
        LR_flag = np.squeeze(LR_flag) == 0
    else:
        LR_flag = np.squeeze(LR_flag) == 1
    data = data['ftdata_NLM']
    temp = data[LR_flag, :]
    m = np.mean(temp, 1)
    temp = temp - m[:, None]
    s = np.std(temp, 1) + 1e-16
    temp = temp / s[:, None]
    msk_small_region = np.in1d(dfs_left.labels, roilist)
    #    (dfs_left.labels == 46) | (dfs_left.labels == 28) \
    #       | (dfs_left.labels == 29)  # % motor
    d = temp[msk_small_region, :]
    rho = np.corrcoef(d)
    rho[~np.isfinite(rho)] = 0
    # rho=np.abs(rho)
    d_corr = temp[~msk_small_region, :]
    rho_1 = np.corrcoef(d, d_corr)
    rho_1 = rho_1[range(d.shape[0]), d.shape[0]:]
    rho_1[~np.isfinite(rho_1)] = 0
    if type_cor == 1:
        f_rho = np.arctanh(rho_1)
        f_rho[~np.isfinite(f_rho)] = 0
        B = np.corrcoef(f_rho)
        B[~np.isfinite(B)] = 0
        # B = np.abs(B)

    # SC = DBSCAN()
    if algo == 0:
        SC = SpectralClustering(n_clusters=nClusters, affinity='precomputed')
        # SC=SpectralClustering(n_clusters=nClusters,gamma=0.025)
        if type_cor == 0 and rho.size > 0:
            # affinity_matrix = affinity_mat(rho)
            affinity_matrix = np.arcsin(rho)
            labels = SC.fit_predict(np.abs(affinity_matrix))
        if type_cor == 1 and rho.size > 0:
            labels = SC.fit_predict(affinity_mat(B))
            # affinity_matrix=SC.fit(np.abs(d))
    elif algo == 1:
        g = nx.Graph()
        g.add_edges_from(dfs_left.faces[:, (0, 1)])
        g.add_edges_from(dfs_left.faces[:, (1, 2)])
        g.add_edges_from(dfs_left.faces[:, (2, 0)])
        Adj = nx.adjacency_matrix(g)
        AdjS = Adj[(msk_small_region), :]
        AdjS = AdjS[:, (msk_small_region)]
        AdjS = AdjS.todense()
        np.fill_diagonal(AdjS, 1)
        SC = AgglomerativeClustering(n_clusters=nClusters, connectivity=AdjS)
        labels = SC.fit_predict(rho)
    elif algo == 2:
        GM = GMM(n_components=nClusters, covariance_type='full', n_iter=100)
        GM.fit(rho)
        labels = GM.predict(rho)

    elif algo == 3:
        neighbour_correlation(rho, dfs_left_sm.faces, dfs_left_sm.vertices,
                              msk_small_region)

    if savepng > 0:
        r = dfs_left_sm
        r.labels = np.zeros([r.vertices.shape[0]])
        r.labels[msk_small_region] = labels + 1

        cent = separate(labels, r, r.vertices, nClusters)

        manual_order = np.array([0 for x in range(nClusters)])
        save = np.array([0 for x in range(nClusters)])

        for i in range(0, nClusters):
            if nClusters > 1:
                choose_vector = np.argmax(cent.transpose(), axis=1)
                save[i] = cent[choose_vector[1]][1]
                correspondence_point = find_location_smallmask(
                    r.vertices, cent[choose_vector[1]], msk_small_region)
                cent[choose_vector[1]][1] = -np.Inf
                manual_order[i] = choose_vector[1]
                if i == 0:
                    # change
                    correlation_within_precuneus_vector = sp.array(
                        rho[correspondence_point])
                    correlation_with_rest_vector = sp.array(
                        rho_1[correspondence_point])
                else:
                    correlation_within_precuneus_vector = sp.vstack([
                        correlation_within_precuneus_vector,
                        [rho[correspondence_point]]
                    ])
                    correlation_with_rest_vector = sp.vstack([
                        correlation_with_rest_vector,
                        [rho_1[correspondence_point]]
                    ])
            else:
                choose_vector = 0
                correspondence_point = find_location_smallmask(
                    r.vertices, cent, msk_small_region)
                manual_order[i] = choose_vector
                if i == 0:
                    # change
                    correlation_within_precuneus_vector = sp.array(
                        rho[correspondence_point])
                    correlation_with_rest_vector = sp.array(
                        rho_1[correspondence_point])

        manual_order = change_order(manual_order, nClusters)
        r.labels = change_labels(r.labels, manual_order, nClusters)

        new_cent = separate(r.labels, r, temp, nClusters)

        if nClusters > 1:
            for i in range(0, nClusters):
                cent[manual_order[i]][1] = save[i]
    return (r, correlation_within_precuneus_vector,
            correlation_with_rest_vector, msk_small_region, new_cent)
Beispiel #40
0
@author: 56977
"""

import scipy as sp
import matplotlib.pyplot as plt

data = sp.loadtxt("datos2_corrida0.txt")

N = data[:, 0]
dts = data[:, 1]
mem = data[:, 2]

for i in range(1, 10):
    data = sp.loadtxt(f"datos2_corrida{i}.txt")
    dts = sp.vstack((dts, data[:, 1]))

absisas = [10, 20, 50, 100, 200, 500, 1000, 2000, 5000, 10000, 20000]

TPO = [0.1e-3, 1e-3, 1e-2, 0.1, 1., 10., 60., 60 * 10]
TPO_label = [
    "0.1 ms", "1 ms", "10 ms", "0.1 s", "1 s", "10 s", "1 min", "10 min"
]

RAM = [10**3, 10**4, 10**5, 10**6, 10**7, 10**8, 10**9, 10**10, 10**11]
RAM_label = [
    "1 KB", "10 KB", "100 KB", "1 MB", "10 MB", "100 MB", "1 GB", "10 GB", ""
]

plt.figure()
temp=np.ones((cMshape[1],cMshape[2],cMshape[3]+1))
for i in range(cMshape[1]):
	print 'Shape check:',np.shape(temp[i,:,:2]),np.shape(contractionMatrix[0][i])
	temp[i,:,:2]=(contractionMatrix[0][i])-256
	print 'zPos:',contourInformation[i][0]
	temp[i,:,2]=temp[i,:,2]*np.float(contourInformation[i][0])
	#temp[i,:,2]=temp[i,:,2]*-np.float(contourInformation[i][0])#-375

#Read in any fixed points
fixedContour1=np.loadtxt(workingDir+'FixedContours/InletFixedContour1')
fixedContour2=np.loadtxt(workingDir+'FixedContours/InletFixedContour2')
fixedContour3=np.loadtxt(workingDir+'FixedContours/FixedContour1')
fixedContour4=np.loadtxt(workingDir+'FixedContours/OutletFixedContour1')
fixedContour5=np.loadtxt(workingDir+'FixedContours/OutletFixedContour2')

fixedContours=scipy.vstack([fixedContour1,fixedContour2,fixedContour3,fixedContour4,fixedContour5])

print 'countour shape:',np.shape(temp.reshape((cMshape[1]*cMshape[2],cMshape[3]+1)))
#print 'fixedcontour shape',fixedContour.shape

source_points_fitting = temp.reshape((cMshape[1]*cMshape[2],cMshape[3]+1))

#With fixed contours
#source_points_fitting = scipy.vstack([temp.reshape((cMshape[1]*cMshape[2],cMshape[3]+1)),fixedContours])

#########################################################################################################
#Loop through time points
for timePoint in range(11,12):#range(20,cMshape[0]):

	print 'fitting time: ',timePoint
	temp=np.ones((cMshape[1],cMshape[2],cMshape[3]+1))
Beispiel #42
0
def vl_phow(im,
            verbose=True,
            fast=True,
            sizes=[4, 6, 8, 10],
            step=2,
            color='rgb',
            floatdescriptors=False,
            magnif=6,
            windowsize=1.5,
            contrastthreshold=0.005):

    opts = Options(verbose, fast, sizes, step, color, floatdescriptors, magnif,
                   windowsize, contrastthreshold)
    dsiftOpts = DSiftOptions(opts)

    # make sure image is float, otherwise segfault
    im = array(im, 'float32')

    # Extract the features
    imageSize = shape(im)
    if im.ndim == 3:
        if imageSize[2] < 3:  #if imageSize[2] != 3:
            # "IndexError: tuple index out of range" if both if's are checked at the same time
            raise ValueError("Image data in unknown format/shape")
    if opts.color == 'gray':
        numChannels = 1
        if (im.ndim == 2):
            im = vl_rgb2gray(im)
    else:
        numChannels = 3
        if (im.ndim == 2):
            im = dstack([im, im, im])
        if opts.color == 'rgb':
            pass
        elif opts.color == 'opponent':
            # from https://github.com/vlfeat/vlfeat/blob/master/toolbox/sift/vl_phow.m
            # Note that the mean differs from the standard definition of opponent
            # space and is the regular intesity (for compatibility with
            # the contrast thresholding).
            # Note also that the mean is added pack to the other two
            # components with a small multipliers for monochromatic
            # regions.

            mu = 0.3 * im[:, :, 0] + 0.59 * im[:, :, 1] + 0.11 * im[:, :, 2]
            alpha = 0.01
            im = dstack([
                mu, (im[:, :, 0] - im[:, :, 1]) / sqrt(2) + alpha * mu,
                (im[:, :, 0] + im[:, :, 1] - 2 * im[:, :, 2]) / sqrt(6) +
                alpha * mu
            ])
        else:
            raise ValueError('Color option ' + str(opts.color) +
                             ' not recognized')
    if opts.verbose:
        print('{0}: color space: {1}'.format('vl_phow', opts.color))
        print('{0}: image size: {1} x {2}'.format('vl_phow', imageSize[0],
                                                  imageSize[1]))
        print('{0}: sizes: [{1}]'.format('vl_phow', opts.sizes))

    frames_all = []
    descrs_all = []
    for size_of_spatial_bins in opts.sizes:
        # from https://github.com/vlfeat/vlfeat/blob/master/toolbox/sift/vl_phow.m
        # Recall from VL_DSIFT() that the first descriptor for scale SIZE has
        # center located at XC = XMIN + 3/2 SIZE (the Y coordinate is
        # similar). It is convenient to align the descriptors at different
        # scales so that they have the same geometric centers. For the
        # maximum size we pick XMIN = 1 and we get centers starting from
        # XC = 1 + 3/2 MAX(OPTS.SIZES). For any other scale we pick XMIN so
        # that XMIN + 3/2 SIZE = 1 + 3/2 MAX(OPTS.SIZES).
        # In pracrice, the offset must be integer ('bounds'), so the
        # alignment works properly only if all OPTS.SZES are even or odd.

        off = floor(3.0 / 2 * (max(opts.sizes) - size_of_spatial_bins)) + 1

        # smooth the image to the appropriate scale based on the size
        # of the SIFT bins
        sigma = size_of_spatial_bins / float(opts.magnif)
        ims = vl_imsmooth(im, sigma)

        # extract dense SIFT features from all channels
        frames = []
        descrs = []
        for k in range(numChannels):
            size_of_spatial_bins = int(size_of_spatial_bins)
            # vl_dsift does not accept numpy.int64 or similar
            f_temp, d_temp = vl_dsift(data=ims[:, :, k],
                                      step=dsiftOpts.step,
                                      size=size_of_spatial_bins,
                                      fast=dsiftOpts.fast,
                                      verbose=dsiftOpts.verbose,
                                      norm=dsiftOpts.norm,
                                      bounds=[off, off, maxint, maxint])
            frames.append(f_temp)
            descrs.append(d_temp)
        frames = array(frames)
        descrs = array(descrs)
        d_new_shape = [descrs.shape[0] * descrs.shape[1], descrs.shape[2]]
        descrs = descrs.reshape(d_new_shape)
        # remove low contrast descriptors
        # note that for color descriptors the V component is
        # thresholded
        if (opts.color == 'gray') | (opts.color == 'opponent'):
            contrast = frames[0][2, :]
        elif opts.color == 'rgb':
            contrast = mean(
                [frames[0][2, :], frames[1][2, :], frames[2][2, :]], 0)
        else:
            raise ValueError('Color option ' + str(opts.color) +
                             ' not recognized')
        descrs[:, contrast < opts.contrastthreshold] = 0

        # save only x,y, and the scale
        frames_temp = array(frames[0][0:3, :])
        padding = array(size_of_spatial_bins * ones(frames[0][0].shape))
        frames_all.append(vstack([frames_temp, padding]))
        descrs_all.append(array(descrs))

    frames_all = hstack(frames_all)
    descrs_all = hstack(descrs_all)
    return frames_all, descrs_all
Beispiel #43
0
def globallocalregret(optstate, persist, **para):
    #doublenormdist
    #norprior
    if persist == None:
        persist = defaultdict(list)
        persist['raiseS'] = False
        persist['R'] = sp.eye(len(para['lb']))
    if optstate.n < para['onlyafter']:
        return 0, persist, dict()
    if persist['flip']:
        return 1, persist, dict()

    logging.info('globallocalregretchooser with {} inflated diagonal'.format(
        persist['raiseS']))
    if para['rotate']:
        logging.info('rotate\n{}'.format(persist['R']))
    d = len(para['lb'])
    lb = para['lb']
    ub = para['ub']

    #build a GP with slice-samples hypers
    x = sp.vstack(optstate.x).dot(persist['R'].T)
    y = sp.vstack(optstate.y)
    s = sp.vstack([e['s'] + 10**optstate.condition for e in optstate.ev])
    dx = [e['d'] for e in optstate.ev]
    logger.info('building GP')
    G = PES.makeG(x,
                  y,
                  s,
                  dx,
                  para['kindex'],
                  para['mprior'],
                  para['sprior'],
                  para['nhyp'],
                  prior=para['priorshape'])

    xminr, ymin, ierror = gpbo.core.optutils.twopartopt(
        lambda x: G.infer_m_post(persist['R'].dot(x.flatten()).reshape(
            [1, d]), [[sp.NaN]])[0, 0], para['lb'], para['ub'], para['dpara'],
        para['lpara'])
    xmin = persist['R'].dot(xminr.flatten()).reshape([1, d])
    #:mxmin,vxmin = [j[0,0] for j in G.infer_diag_post(optstate.x[0],[[sp.NaN]])]
    logger.info('post min at {}(true) {}(rotated) is {}'.format(
        xminr, xmin, ymin))

    dropdims = []
    for i in range(d):
        if xminr[i] > 0.995 * (ub[i] - lb[i]) + lb[i] or xminr[i] < lb[i] + (
                1. - 0.995) * (ub[i] - lb[i]):
            dropdims.append(i)
            if not sp.allclose(sp.eye(d), persist['R']):
                print('edge isn\'t working with nonzero rotation')
    logger.info('post min in on edge in axes {}'.format(dropdims))
    #get hessian/grad posterior
    #local probmin elipse at post min
    GH = gpbo.core.optutils.gpGH(G, xmin)
    Gr, cG, H, Hvec, varHvec, M, varM = GH

    #est the local regret
    Mdraws = gpbo.core.GPdc.draw(M[0, :], varM, 200)
    lrest = 0.
    for i in xrange(200):
        sM = Mdraws[i, :]
        sG = sM[:d]
        sH = gpbo.core.optutils.Hvec2H(sM[d:], d)
        sR = 0.5 * sG.dot(sp.linalg.solve(sH, sG))
        lrest += max(0., sR)
    lrest /= 200.
    logger.info('localregretest {}'.format(lrest))

    m = sp.diag(H)
    v = sp.diag(gpbo.core.optutils.Hvec2H(sp.diagonal(varHvec), d))
    logger.debug('H,stH\n{}\n{}'.format(
        H, sp.sqrt(gpbo.core.optutils.Hvec2H(sp.diagonal(varHvec), d))))
    logger.debug('axisprobs {}'.format(
        1. - sp.stats.norm.cdf(sp.zeros(d), loc=m, scale=sp.sqrt(v))))
    #step out to check +ve defininteness
    logger.info('checking for +ve definite ball')
    from gpbo.core import debugoutput
    pc = gpbo.core.optutils.probgppve(G,
                                      sp.array(xmin),
                                      tol=para['pvetol'],
                                      dropdims=dropdims)
    logger.info('prob pvedef at xmin {}'.format(pc))

    _ = prob(G, sp.array(xmin), tol=para['pvetol'], dropdims=dropdims)
    if para['rotate']:
        #U,S,V = sp.linalg.svd(H)
        eva, eve = sp.linalg.eigh(H)
        V = eve.T
        persist['R'] = V.dot(persist['R'])
    mask = sp.ones(d)
    for i in dropdims:
        mask[i] = 0.

    def PDcondition(x):
        P = gpbo.core.optutils.probgppve(G,
                                         sp.array(x) * mask + sp.array(xmin),
                                         tol=para['pvetol'],
                                         dropdims=dropdims)
        C = P > 1 - para['pvetol']
        #print(C,P,sp.array(x)*mask)
        return C

    #todo assuming unit radius search region for Rinit=1
    rmax = gpbo.core.optutils.ballradsearch(d,
                                            1.,
                                            PDcondition,
                                            ndirs=para['nlineS'],
                                            lineSh=para['lineSh'])

    if gpbo.core.debugoutput['adaptive']:
        import matplotlib
        matplotlib.rcParams['text.usetex'] = False
        fig, ax = plt.subplots(nrows=3, ncols=4, figsize=(85, 85))
        xmin = xmin.flatten()
        # plot the current GP
        if d == 2:
            #gpbo.core.optutils.gpplot(ax[0,0],ax[0,1],G,para['lb'],para['ub'],ns=60)
            ax[0, 0].set_title('GP post mean')
            ax[0, 1].set_title('GP post var')
            ax[0, 0].plot(xmin[0], xmin[1], 'ro')
            #plot some draws from H
            for i in xrange(4):
                Gm, Gv, Hd = gpbo.core.drawconditionH(*GH)
                try:
                    sp.linalg.cholesky(Hd)
                    gpbo.core.optutils.plotprobstatellipse(Gv,
                                                           Hd,
                                                           xmin,
                                                           ax[1, 1],
                                                           logr=True)
                except sp.linalg.LinAlgError:
                    pass
        if rmax > 0:
            ax[1, 1].plot([sp.log10(rmax)] * 2, [0., 2 * sp.pi], 'purple')
        else:
            logger.debug('plotting some draws...')
            #draw support points

            xvmaxr, vmax, ierror = gpbo.core.optutils.twopartopt(
                lambda x: -G.infer_diag_post(persist['R'].dot(x.flatten(
                )), [[sp.NaN]])[1][0, 0], para['lb'], para['ub'],
                para['dpara'], para['lpara'])
            xvmax = persist['R'].dot(xvmaxr.flatten())
            mvmax, vvmax = [
                j[0, 0] for j in G.infer_diag_post(xvmax, [[sp.NaN]])
            ]
            W = sp.vstack([
                ESutils.draw_support(G,
                                     lb,
                                     ub,
                                     para['support'] / 2,
                                     ESutils.SUPPORT_LAPAPROT,
                                     para=20,
                                     rotation=persist['R']),
                ESutils.draw_support(G,
                                     lb,
                                     ub,
                                     para['support'] / 2,
                                     ESutils.SUPPORT_VARREJ,
                                     para=vvmax,
                                     rotation=persist['R'])
            ])
            nd = 1500
            #draw mins and value of g at xmin as pair
            R, Y, A = ESutils.draw_min_xypairgrad(G, W, nd, xmin)
            #plot support
            if d == 2:
                gpbo.core.optutils.plotaslogrtheta(W[:, 0], W[:, 1], xmin[0],
                                                   xmin[1], ax[1, 1], 'b.')
                ax[0, 2].plot(W[:, 0], W[:, 1], 'b.')
                #plot mindraws
                gpbo.core.optutils.plotaslogrtheta(R[:, 0], R[:, 1], xmin[0],
                                                   xmin[1], ax[1, 1], 'r.')
                ax[0, 2].plot(R[:, 0], R[:, 1], 'r.')
        ax[1, 3].text(0, 0, 'prob +ve at min {}\nR+ve {}'.format(pc, rmax))
    if rmax == 0:
        if gpbo.core.debugoutput['adaptive']:
            try:
                fname = 'lotsofplots' + time.strftime(
                    '%d_%m_%y_%H:%M:%S') + '.png'
                print('saving as {}'.format(fname))
                fig.savefig(os.path.join(gpbo.core.debugoutput['path'], fname))
            except BaseException as e:
                logger.error(str(e))
            fig.clf()
            plt.close(fig)
            del (fig)
        logger.info('no +ve def region, choosereturns 0')
        return 0, persist, {
            'reuseH': [k.hyp for k in G.kf],
            'ppveatx': pc,
            'rpve': rmax,
            'R': persist['R']
        }

    xvmaxr, vmax, ierror = gpbo.core.optutils.twopartopt(
        lambda x: -G.infer_diag_post(persist['R'].dot(x.flatten()), [[sp.NaN]])
        [1][0, 0], para['lb'], para['ub'], para['dpara'], para['lpara'])
    xvmax = persist['R'].dot(xvmaxr.flatten())
    mvmax, vvmax = [j[0, 0] for j in G.infer_diag_post(xvmax, [[sp.NaN]])]
    logger.info('post var max {} at {} with mean {}'.format(
        vvmax, xvmax, mvmax))
    #draw support points
    W = sp.vstack([
        ESutils.draw_support(G,
                             lb,
                             ub,
                             para['support'] / 2,
                             ESutils.SUPPORT_LAPAPROT,
                             para=20,
                             weighted=para['weighted'],
                             rotation=persist['R']),
        ESutils.draw_support(G,
                             lb,
                             ub,
                             para['support'] / 2,
                             ESutils.SUPPORT_VARREJ,
                             para=vvmax,
                             rotation=persist['R'])
    ])

    Q, maxRin = drawpartitionmin2(G, W, xmin, rmax, para['draws'])

    logger.info('+ve region radius {} max sample radius {}'.format(
        rmax, maxRin))

    #pcurves from Q
    def data2cdf(X):
        n = X.size
        C = sp.linspace(1. / n, 1, n)
        XC = sorted(X)
        return XC, C

    Yin, Cin = data2cdf(Q[:, 1])
    normin = sp.stats.norm.fit(Yin)

    Yat, Cat = data2cdf(Q[:, 3])
    normat = sp.stats.norm.fit(Yat)

    Yout, Cout = data2cdf(Q[:, 2])

    #normal dist with var same as max in gp model and passing through estimated prob of min sample
    ydrawmin = Yout[0]
    cdfymin = Cout[0]
    mu = ydrawmin - sp.sqrt(vvmax * 2) * sp.special.erfinv(2 * cdfymin - 1.)
    logger.info('upper norm at y {} c {} has mu {},var {}'.format(
        ymin, cdfymin, mu, vvmax))
    logger.info('lower norm at x {} has mu {},var {}'.format(
        xvmax, mvmax, vvmax))

    #interpolator for cdf
    def splicecdf(y):
        if y < Yout[0]:
            return sp.stats.norm.cdf(y, loc=mu, scale=sp.sqrt(vvmax))
        elif y >= Yout[-1]:
            return 1. - 1e-20
        else:
            i = 0
            while Yout[i] < y:
                i += 1
            return Cout[i]
        return

    m, std = normin
    logger.debug(
        'inner approx m{} std{}\noutsample stats min{} max{} mean{}'.format(
            m, std,
            sp.array(Yout).min(),
            sp.array(Yout).max(), sp.mean(Yout)))

    racc = 0.
    n = len(Cout)
    #regret from samples after the min
    for i in xrange(1, n):
        racc += gpbo.core.GPdc.EI(-Yout[i], -m, std)[0, 0] / float(n)
    tmp = racc
    #regret from the tail bound
    I, err = spi.quad(
        lambda y: gpbo.core.GPdc.EI(-y, -m, std)[0, 0] * sp.stats.norm.pdf(
            y, mu, sp.sqrt(vvmax)), -sp.inf, Yout[0])
    racc += I
    logger.info('outer regret {}  (due to samples: {} due to tail: {}'.format(
        racc, tmp, racc - tmp))

    #regret lower bound
    #rlow,err = spi.quad(lambda y:gpbo.core.GPdc.EI(-y,-m,v)[0,0]*sp.stats.norm.pdf(y,mvmax,sp.sqrt(vvmax)),-sp.inf,mvmax)
    #regret from samples

    rsam = 0.
    for i in xrange(Q.shape[0]):
        rsam += max(0., Q[i, 1] - Q[i, 2])
    rsam /= Q.shape[0]

    #local regret from incumbent from samples
    rloc = 0.
    for i in xrange(Q.shape[0]):
        rloc += max(0., Q[i, 3] - Q[i, 1])
    rloc /= Q.shape[0]
    persist['localsampleregret'].append(rloc)
    #set switch to local if condition achieved
    if racc < para['regretswitch']:
        rval = 1
        persist['flip'] = True
        optstate.startlocal = xmin
    elif maxRin < 0.9 * rmax:
        rval = 2

    else:
        rval = 0
        persist['flip'] = False
    if gpbo.core.debugoutput['adaptive']:
        if d == 2:
            gpbo.core.optutils.plotaslogrtheta(W[:, 0], W[:, 1], xmin[0],
                                               xmin[1], ax[1, 1], 'b.')
            ax[0, 2].plot(W[:, 0], W[:, 1], 'b.')
            #plot mindraws
            R, Y, A = ESutils.draw_min_xypairgrad(G, W, 1500, xmin)
            gpbo.core.optutils.plotaslogrtheta(R[:, 0], R[:, 1], xmin[0],
                                               xmin[1], ax[1, 1], 'r.')

            ax[0, 2].plot(R[:, 0], R[:, 1], 'r.')
        ax[2, 2].plot(Q[:, 1], Q[:, 2], 'r.')
        ax[2, 2].set_xlabel('inR')
        ax[2, 2].set_ylabel('outR')
        ax[2, 2].plot([ymin], [ymin], 'go')

        ax[2, 1].plot(Q[:, 1], Q[:, 3], 'r.')
        ax[2, 1].set_xlabel('inR')
        ax[2, 1].set_ylabel('atArg')
        ax[2, 1].plot([ymin], [ymin], 'go')

        def pltcdf(Y, C, ax, col):
            return ax.plot(sp.hstack([[i, i] for i in Y])[1:-1],
                           sp.hstack([[i - C[0], i] for i in C])[1:-1],
                           color=col,
                           label='Sampled CDF')

        pltcdf(Yin, Cin, ax[2, 0], 'b')
        rin = sp.linspace(Yin[0], Yin[-1], 150)
        ax[2, 0].plot(rin, map(lambda x: sp.stats.norm.cdf(x, *normin), rin),
                      'k')

        pltcdf(Yat, Cat, ax[2, 0], 'g')
        rat = sp.linspace(Yat[0], Yat[-1], 150)
        ax[2, 0].plot(rat, map(lambda x: sp.stats.norm.cdf(x, *normat), rat),
                      'k')
        ax[2, 0].set_yscale('logit')

        pltcdf(Yout, Cout, ax[1, 0], 'r')
        ax[1, 0].set_yscale('logit')

        rl = min(Yout)
        ru = max(Yout)
        sup = sp.linspace(rl - 0.25 * (ru - rl), 0.5 * (rl + ru), 50)
        ax[1, 0].plot(sup,
                      sp.stats.norm.cdf(sup, loc=mu, scale=sp.sqrt(vvmax)),
                      'b--',
                      label='Approximate Tail Upper Bound')
        ax[1, 0].plot(sup,
                      sp.stats.norm.cdf(sup, loc=mvmax, scale=sp.sqrt(vvmax)),
                      'g--',
                      label='Lower Bound')
        ax[1, 0].axvline(ymin)

        if True:
            f2, a2 = plt.subplots(figsize=[8, 5])
            pltcdf(Yout, Cout, a2, 'r')
            a2.set_yscale('logit')

            a2.plot(sup,
                    sp.stats.norm.cdf(sup, loc=mu, scale=sp.sqrt(vvmax)),
                    color='b',
                    linestyle='--',
                    label='Approx Tail Upper Bound')
            a2.plot(sup,
                    sp.stats.norm.cdf(sup, loc=mvmax, scale=sp.sqrt(vvmax)),
                    color='b',
                    linestyle='-.',
                    label='Lower Bound')
            a2.axvline(ymin,
                       label='Posterior Mean Minimum',
                       color='k',
                       linestyle=':')
            a2.set_ylabel('CDF')
            a2.set_xlabel('y')
            from matplotlib.ticker import NullFormatter
            a2.yaxis.set_minor_formatter(NullFormatter())
            a2.spines['left']._adjust_location()

            a2.legend()
            f2.savefig(os.path.join(debugoutput['path'], 'ends.png'))

            plt.close(f2)
            import pickle
            pickle.dump([sup, mu, vvmax, mvmax, ymin, Yout, Cout],
                        open('results/bounddata.p', 'w'))
        mxo = Yout[-1]
        mno = Yout[0]
        ro = sp.linspace(min(mno - 0.05 * (mxo - mno), ymin),
                         mxo + 0.05 * (mxo - mno), 200)

        ax[1, 2].text(0, 0.34, 'regretg sample      {}'.format(rsam))
        ax[1, 2].text(0, 0.24, 'regretg tailest     {}'.format(racc))
        #ax[1,2].text(0,0.18, 'regretg binormest   {}'.format(rbin))
        #ax[1,2].text(0,0.08, 'regretg lowerb      {} '.format(rlow))

        ax[1, 2].text(0, 0.5, 'maxRin  {} / {}'.format(maxRin, rmax))
        ax[1, 2].text(0, 0.6, 'mode  {}'.format(rval))

        ax[1, 2].text(0, 0.74, 'localr sample     {}'.format(rloc))
        ax[1, 2].text(0, 0.8, 'localr Taylor est {} '.format(lrest))
        persist['Rexists'].append(optstate.n)
        persist['sampleregret'].append(rsam)
        persist['expectedregret'].append(racc)
        #persist['expectedRbinorm'].append(rbin)
        persist['localrsam'].append(rloc)
        #persist['regretlower'].append(rlow)
        persist['localrest'].append(lrest)
        ax[0, 3].plot(persist['Rexists'], persist['localrest'], 'k')
        #ax[0,3].plot(persist['Rexists'],persist['expectedRbinorm'],'purple')
        ax[0, 3].plot(persist['Rexists'], persist['sampleregret'], 'b')
        ax[0, 3].plot(persist['Rexists'], persist['expectedregret'], 'g')
        ax[0, 3].plot(persist['Rexists'], persist['localrsam'], 'r')
        #ax[0,3].plot(persist['Rexists'],persist['regretlower'],'purple')
        ax[0, 3].set_yscale('log')

        #ax[2,3].plot(K[:,0],K[:,1],'b.')
        try:
            fname = 'lotsofplots' + time.strftime('%d_%m_%y_%H:%M:%S') + '.png'
            print('saving as {}'.format(fname))
            fig.savefig(os.path.join(gpbo.core.debugoutput['path'], fname))
        except BaseException as e:
            logger.error(str(e))
        fig.clf()
        plt.close(fig)
        del (fig)

    #if a cheat objective as available see how we would do on starting a local opt now
    if 'cheatf' in para.keys():
        try:
            C = sp.linalg.cholesky(H)
        except:
            logger.info('not +ve definite at posterior min')
            C = sp.linalg.cholesky(sp.eye(H.shape[0]))
        print('C {} \nxmin {}\nC.T.xmin{}'.format(C, xmin, C.T.dot(xmin)))

        def fn2(x):
            print(
                x, para['cheatf'](sp.linalg.solve(C.T, x), **{
                    's': 0.,
                    'd': [sp.NaN]
                })[0])
            return para['cheatf'](sp.linalg.solve(C.T, x), **{
                's': 0.,
                'd': [sp.NaN]
            })[0]

        R = minimize(fn2, C.T.dot(xmin), method='Nelder-Mead')
        logger.warn('cheat testopt result with precondition {}:\n{}'.format(
            H, R))

    return rval, persist, {
        'start': xminr.flatten(),
        'H': H,
        'reuseH': [k.hyp for k in G.kf],
        'offsetEI': m,
        'ppveatx': pc,
        'rpve': rmax,
        'log10GRest': sp.log10(racc)
    }
Beispiel #44
0
def merge(s1,s2):
    s=[]
    s=sp.array(s1)
    s=sp.vstack([s,s2])
    return s
Beispiel #45
0
def power_law(physics,
              phase,
              A1='',
              A2='',
              A3='',
              x='',
              return_rate=True,
              **kwargs):
    r"""
    For the following source term:
        .. math::
            r = A_{1}   x^{A_{2}}  +  A_{3}
    If return_rate is True, it returns the value of source term for the
    provided x in each pore.
    If return_rate is False, it calculates the slope and intercept for the
    following linear form :
        .. math::
            r = S_{1}   x  +  S_{2}

    Parameters
    ----------
    A1 -> A3 : string
        The property name of the coefficients in the source term model
    x : string or float/int or array/list
        The property name or numerical value or array for the main quantity
    Notes
    -----

    """
    if x is '':
        X = _sp.ones(physics.Np) * _sp.nan
    else:
        if type(x) == str:
            x = 'pore.' + x.split('.')[-1]
            try:
                X = physics[x]
            except KeyError:
                raise Exception(physics.name +
                                ' does not have the pore property :' + x + '!')
        else:
            X = _sp.array(x)

    length_X = _sp.size(X)
    if length_X != physics.Np:
        if length_X == 1:
            X = X * _sp.ones(physics.Np)
        elif length_X >= phase.Np:
            X = X[physics.map_pores()]
        else:
            raise Exception('Wrong size for the numerical array of x!')

    a = {}
    source_params = [A1, A2, A3]
    for ind in _sp.arange(_sp.size(source_params)):
        A = source_params[ind]
        if A is '':
            a[str(ind + 1)] = 0
        else:
            if type(A) == str:
                A = 'pore.' + A.split('.')[-1]
                try:
                    a[str(ind + 1)] = physics[A]
                except KeyError:
                    raise Exception(physics.name + '/' + phase.name +
                                    ' does not have the pore property :' + A +
                                    '!')
            else:
                raise Exception('source_term parameters can only be string '
                                'type!')

    if return_rate:
        return (a['1'] * X**a['2'] + a['3'])
    else:
        S1 = a['1'] * a['2'] * X**(a['2'] - 1)
        S2 = a['1'] * X**a['2'] * (1 - a['2']) + a['3']
        return (_sp.vstack((S1, S2)).T)
def _parse_decode_genotypes_(decode_file, sids, pns, ocg):
    ih5f = h5py.File(decode_file, 'r')

    #Determine individual filter
    pns1 = ih5f['PNs'][...]
    assert len(sp.unique(pns1)) == len(pns1), 'WTF?'
    pn_sort_indices = sp.argsort(pns1)
    pn_overlap_filter = sp.in1d(pns1, pns)
    pn_sort_indices = pn_sort_indices[pn_overlap_filter]
    assert sp.all(
        pns == pns1[pn_sort_indices]), 'Re-ordering of individuals failed?'

    ocg.create_dataset('indivs', data=pns)

    #Determine marker filter
    mns1 = ih5f['Marker-names'][...]
    assert len(sp.unique(mns1)) == len(mns1), 'WTF?'
    mn_filter = sp.in1d(sids, mns1)
    mns = sids[mn_filter]

    indices = range(len(mns1))
    mn_indices_dict = dict(
        (key, value) for (key, value) in it.izip(mns1, indices))

    mn_indices = []
    for mn in mns:
        mn_indices.append(mn_indices_dict[mn])


#         mn_indices = sp.array(mn_indices)
    mn_indices = sp.array(mn_indices)
    assert sp.all(mns == mns1[mn_indices]), 'Re-ordering failed?'

    #Pull off position from marker name
    positions = sp.array([int(mn.split(':')[1]) for mn in mns])

    #Sort by position
    order = sp.argsort(positions)
    positions = positions[order]
    mn_indices = mn_indices[order]
    mns = mns[order]

    #Get nucleotide
    alleles = ih5f["Alleles"][...]
    a = sp.arange(len(alleles))
    even_map = a % 2 == 0
    odd_map = a % 2 == 1
    nts = (sp.vstack([alleles[even_map], alleles[odd_map]])).T
    nts = nts[mn_indices]

    n_snps = len(mns)
    n_indivs = len(pns)
    print 'Parsing SNPs (%d x %d matrix)' % (n_snps, n_indivs)
    snps = ocg.create_dataset('raw_snps_ref',
                              shape=(n_snps, n_indivs),
                              dtype='single',
                              compression='lzf')
    freqs = sp.zeros(len(mn_indices))
    snp_means = sp.zeros(len(mn_indices))
    for i, m_i in enumerate(mn_indices):
        if i % 1000 == 0:
            print "Reached %d'th SNP" % i
        probs = ih5f["Probabilities2"][m_i, pn_sort_indices]
        pat_snp = sp.array(map(lambda x: x[0], probs), 'float32')
        mat_snp = sp.array(map(lambda x: x[1], probs), 'float32')
        snp = pat_snp + mat_snp
        ok_filter = (pat_snp >= 0) * (mat_snp >= 0)
        if not sp.all(ok_filter):
            #impute missing
            mean_gt = sp.mean(snp[ok_filter])
            snp[~ok_filter] = mean_gt

        snps[i] = snp
        freq = mean_gt / 2.0
        snp_means[i] = mean_gt
        freqs[i] = freq

    #Calculate stds
    snp_stds = sp.sqrt(2 * freqs * (1 - freqs))  #sp.std(raw_snps, 1)

    ocg.create_dataset('snp_stds_ref', data=snp_stds)
    ocg.create_dataset('snp_means_ref', data=snp_means)
    ocg.create_dataset('freqs_ref', data=freqs)
    ocg.create_dataset('positions', data=positions)
    ocg.create_dataset('nts', data=nts)
    ocg.create_dataset('sids', data=mns)

    return {'ss_filter': mn_filter, 'ss_order': order}
Beispiel #47
0
#import scipy as np
# In[]
if __name__ == '__main__':
    '''
    test1()
    '''
    '''
    test2()
    '''
    thetas = np.linspace(0, np.pi * 0.3, 7)
    print "thetas: ", thetas
    rho = np.array([[0.1, 0.12, 0], [0, 0, 0]])
    print "rho:", rho

    k_dir = np.vstack([np.sin(thetas), np.zeros_like(thetas), np.cos(thetas)])
    k_dir = k_dir.transpose()

    wavelength = 10
    k0 = np.pi * 2 / wavelength

    a1 = np.array([1, 0, 0])
    a2 = np.array([0, 1, 0])

    class Gratinglobes(object):
        def check(self):
            dx = np.sqrt(np.sum(a1 * a1, axis=-1))
            dy = np.sqrt(np.sum(a2 * a2, axis=-1))
            threshold_d = wavelength / (1 + np.sin(thetas))
            temp = np.array([dx < threshold_d, dy < threshold_d])
            return np.sum(temp, axis=0) == temp.shape[0]
Beispiel #48
0
    def perform_selection(self,delta_values,strategy,plots_fn=None,results_fn=None):
        """Perform delta selection for kernel ridge regression

        delta_values : array-like, shape = [n_steps_delta]
            Array of delta values to test

        strategy : {'full_cv','insample_cv'}
            Strategy to perform feature selection:
            - 'full_cv' perform cross-validation over delta
            - 'insample_cv' pestimates delta in sample using maximum likelihood.

        plots_fn    : str, optional, default=None
            File name for generated plot. if not specified, the plot is not saved

        results_fn  : str, optional, default=None
            file name for saving cross-validation results. if not specified, nothing is saved
        Returns
        -------
        best_delta : float
            best regularization parameter delta for ridge regression

        """
        import matplotlib
        matplotlib.use('Agg',warn=False) #This lets it work even on machines without graphics displays
        import matplotlib.pylab as PLT 


        # use precomputed data if available
        if self.K == None:
            self.setup_kernel()

        print 'run selection strategy %s'%strategy

        model = fastlmm.lmm()
        nInds = self.K.shape[0]
   
        if strategy=='insample':
            # take delta with largest likelihood
            model.setK(self.K)
            model.sety(self.y)
            model.setX(self.X)
            best_delta = None
            best_nLL = SP.inf

            # evaluate negative log-likelihood for different values of alpha
            nLLs = SP.zeros(len(delta_values))
            for delta_idx, delta in enumerate(delta_values):
                res = model.nLLeval(delta=delta,REML=True)
                if res["nLL"] < best_nLL:
                    best_delta = delta
                    best_nLL = res["nLL"]

                nLLs[delta_idx] = res['nLL']

            fig = PLT.figure()
            fig.add_subplot(111)
            PLT.semilogx(delta_values,nLLs,color='g',linestyle='-')
            PLT.axvline(best_delta,color='r',linestyle='--')
            PLT.xlabel('logdelta')
            PLT.ylabel('nLL')
            PLT.title('Best delta: %f'%best_delta)
            PLT.grid(True)
            if plots_fn!=None:
                PLT.savefig(plots_fn)
            if results_fn!=None:
                SP.savetxt(results_fn, SP.vstack((delta_values,nLLs)).T,delimiter='\t',header='delta\tnLLs')
            
        if strategy=='cv':
            # run cross-validation for determining best delta
            kfoldIter = SKCV.KFold(n_splits=self.num_folds,shuffle=True,random_state=self.random_state).split(range(nInds))
            Ypred = SP.zeros((len(delta_values),nInds))
            for Itrain,Itest in kfoldIter:
                model.setK(self.K[Itrain][:,Itrain])
                model.sety(self.y[Itrain])
                model.setX(self.X[Itrain])

                model.setTestData(Xstar=self.X[Itest],K0star=self.K[Itest][:,Itrain])
                
                for delta_idx,delta in enumerate(delta_values):
                    res = model.nLLeval(delta=delta,REML=True)
                    beta = res['beta']
                    Ypred[delta_idx,Itest] = model.predictMean(beta=beta,delta=delta)

            MSE = SP.zeros(len(delta_values))
            for i in range(len(delta_values)):
                MSE[i] = SKM.mean_squared_error(self.y,Ypred[i])
            idx_bestdelta = SP.argmin(MSE)
            best_delta = delta_values[idx_bestdelta]

            fig = PLT.figure()
            fig.add_subplot(111)
            PLT.semilogx(delta_values,MSE,color='g',linestyle='-')
            PLT.axvline(best_delta,color='r',linestyle='--')
            PLT.xlabel('logdelta')
            PLT.ylabel('MSE')
            PLT.grid(True)
            PLT.title('Best delta: %f'%best_delta)
            if plots_fn!=None:
                PLT.savefig(plots_fn)
            if results_fn!=None:
                SP.savetxt(results_fn, SP.vstack((delta_values,MSE)).T,delimiter='\t',header='delta\tnLLs')

        return best_delta
 def QFun(x,y):
     xll=domain.geo_reference.xllcorner
     yll=domain.geo_reference.yllcorner
     inDat=scipy.vstack([x+xll,y+yll]).transpose()
     return rasterValuesAtPoints(xy=inDat,rasterFile=rasterFile, 
                                 interpolation=interpolation)
Beispiel #50
0
mprior = sp.array([0.,-1.,-5.,-0.5,0.5])
sprior = sp.array([1.,1.,3.,1.,1.])

MAPH = GPdc.searchMAPhyp(X,Y,S,D,mprior,sprior,GPdc.SQUEXPPS,mx=20000)
print "MLEH: "+str(MLEH)
print "MAPH: "+str(MAPH)
G = GPdc.GPcore(X,Y,S,D,GPdc.kernel(GPdc.SQUEXPPS,1,sp.array(MAPH)))



print G.llk()

np=180
sup = sp.linspace(-1,1,np)
Dp = [[sp.NaN]]*np
Xp = sp.vstack([sp.array([i]) for i in sup])

[m,v] = G.infer_diag(Xp,Dp)
sq = sp.sqrt(v)
S = sp.empty([np,1])
for i in xrange(np):
    S[i,0] = -MAPH[2]*(Xp[i,0]-MAPH[3])*(Xp[i,0]-MAPH[4])
sc= sp.sqrt(S.flatten())

a0.plot(sup,m.flatten())
sc = sp.sqrt(S.flatten())

a0.fill_between(sup, sp.array(m-1.*sq).flatten(), sp.array(m+1.*sq).flatten(), facecolor='lightblue',edgecolor='lightblue')
a0.plot(sup,(m+2*sc).flatten(),'g')
a0.plot(sup,(m-2*sc).flatten(),'g')
plt.show()
Beispiel #51
0
                    # (46,28,29) motor 243 is precuneus
                    labs1, correlation_within_precuneus_vector, correlation_with_rest_vector, mask, centroid = parcellate_region(
                        (33, 34, 35, 74), sub, nClusters, sdir[i],
                        scan_type[i], 1, session_type[j], 0, 0)
                    count1 += 1
                    if count1 == 1:
                        labs_all_1 = sp.array(labs1.labels)
                        vert_all_1 = sp.array(labs1.vertices)
                        faces_all_1 = sp.array(labs1.faces)
                        correlation_within_precuneus = sp.array(
                            correlation_within_precuneus_vector)
                        correlation_with_rest = sp.array(
                            correlation_with_rest_vector)
                        all_centroid = sp.array(centroid)
                    else:
                        labs_all_1 = sp.vstack([labs_all_1, labs1.labels])
                        vert_all_1 = sp.array([labs1.vertices])
                        faces_all_1 = sp.array([labs1.faces])
                        correlation_within_precuneus = sp.vstack([
                            correlation_within_precuneus,
                            correlation_within_precuneus_vector
                        ])
                        correlation_with_rest = sp.vstack([
                            correlation_with_rest, correlation_with_rest_vector
                        ])
                        all_centroid = sp.vstack([all_centroid, centroid])

            # sp.savez_compressed('clustering_results_sessions_region_pc', R_all=R_all)
            data_file = 'data_file' + str(nClusters)
            sp.savez(data_file + str(i * 2 + j) + 'precuneus_sine.npz',
                     correlation_within_precuneus=correlation_within_precuneus,
Beispiel #52
0
            dmrt += h[2]['RT'][:]
            dmz += h[2]['Z'][:]
            nbdm += 1.
        h.close()

for p in da:
    w = we[p] > 0
    da[p][w] /= we[p][w]

rp /= wet
rt /= wet
z /= wet
if not args.no_dmat:
    dm /= wet[:, None]

da = sp.vstack(list(da.values()))
we = sp.vstack(list(we.values()))

co = smooth_cov(da, we, rp, rt)
da = (da * we).sum(axis=0)
da /= wet

if 'dmrp' in locals():
    dmrp /= nbdm
    dmrt /= nbdm
    dmz /= nbdm
if ('dmrp' not in locals()) or (dmrp.size == rp.size):
    dmrp = rp.copy()
    dmrt = rt.copy()
    dmz = z.copy()
Beispiel #53
0
def plotLine(vector,
             val=1.0,
             close=False,
             tube_radius=None,
             index=None,
             **kwargs):
    """
    PlotLine creates a single plot object from a singular vector or from a n-dimensional
    tuple or list.
    """
    plot = False
    try:
        x = vector.x()
        temp0 = x[0]
        temp1 = x[1]
        temp2 = x[2]
        s = val * scipy.ones(temp0.shape)

        # For surface objects, this keyword allows for the last corner to connect with the first
        if close:
            temp0 = scipy.concatenate((temp0, scipy.atleast_1d(temp0[0])))
            temp1 = scipy.concatenate((temp1, scipy.atleast_1d(temp1[0])))
            temp2 = scipy.concatenate((temp2, scipy.atleast_1d(temp2[0])))
            s = scipy.concatenate((s, scipy.atleast_1d(s[0])))

        if not index is None:
            N = len(temp0)
            connect = scipy.vstack([
                scipy.arange(index, index + N - 1.5),
                scipy.arange(index + 1, index + N - .5)
            ]).T  # I want to rewrite this...
            index += N

    except AttributeError:

        temp0 = []
        temp1 = []
        temp2 = []
        s = []
        connect = []

        # if it is not some sort of vector or vector-derived class, iterate through and make a surface object
        if index is None:
            index = 0
            plot = True

        for i in vector:
            output = plotLine(i, close=close, index=index, **kwargs)
            temp0 += [output[0]]
            temp1 += [output[1]]
            temp2 += [output[2]]
            s += [output[3]]
            connect += [output[4]]
            index = output[5]

        #turn to arrays here so I don't accidentally nest lists or tuples
        temp0 = scipy.hstack(temp0)
        temp1 = scipy.hstack(temp1)
        temp2 = scipy.hstack(temp2)
        s = scipy.hstack(s)
        connect = scipy.vstack(connect)

    if index is None:

        try:
            mlab.plot3d(temp0,
                        temp1,
                        temp2,
                        s,
                        vmin=0.,
                        vmax=1.,
                        tube_radius=tube_radius,
                        **kwargs)
        except ValueError:
            mlab.plot3d(temp0.flatten(),
                        temp1.flatten(),
                        temp2.flatten(),
                        s.flatten(),
                        vmin=0.,
                        vmax=1.,
                        tube_radius=tube_radius,
                        **kwargs)

    else:
        if plot:
            # follows http://docs.enthought.com/mayavi/mayavi/auto/example_plotting_many_lines.html#example-plotting-many-lines

            src = mlab.pipeline.scalar_scatter(temp0, temp1, temp2, s)
            src.mlab_source.dataset.lines = connect
            lines = mlab.pipeline.stripper(src)
            mlab.pipeline.surface(lines, **kwargs)

        else:
            return (temp0, temp1, temp2, s, connect, index)
Beispiel #54
0
    else:
        print('Loading data from pickle: %s' % picklefile)
        (w_g, V_g, ctypes, tn_labels, psi) = pickle.load(open(picklefile, 'r'))

    ### choose colormap and adapt normalization
    cmap = plt.get_cmap('jet')
    norm = plt.Normalize(0, sp.unique(ctypes).shape[0])
    ### plot first k main axes of variation
    print('Plotting by ctype ... ')
    fig = plt.figure(figsize=(k * 4, k * 4))
    for k1 in range(0, k):
        cnt = 1
        for k2 in range(k1 + 1, k):
            ax = fig.add_subplot(k - 1, k - 1, (k1 * (k - 1)) + cnt)
            cnt += 1
            trans_data = sp.vstack([V_g[k1, :], V_g[k2, :]]).dot(psi)
            for idx, ct in enumerate(sp.unique(ctypes)):
                c_idx = sp.where(ctypes == ct)[0]
                if c_idx.shape[0] > 0:
                    ax.plot(trans_data[0, c_idx],
                            trans_data[1, c_idx],
                            markers.MarkerStyle.filled_markers[idx % 13],
                            color=cmap(norm(idx)),
                            label=ct,
                            ms=4,
                            alpha=0.75)
            ax.set_title('PC %i vs %i' % (k1 + 1, k2 + 1))
            ax.set_xticks(ax.get_xticks()[::2])
            ax.set_yticks(ax.get_yticks()[::2])
            ax.tick_params(axis='both', which='major', labelsize=10)
            ax.tick_params(axis='both', which='minor', labelsize=10)
Beispiel #55
0
        if os.path.isfile(
                os.path.join(
                    p_dir, sub, sub + '.rfMRI_REST1_RL.reduce3.ftdata.NLM_11N\
_hvar_25.mat')):
            labs1 = parcellate_motor(sub, nClusters + 1, 1, 1, 2)
        if os.path.isfile(
                os.path.join(
                    p_dir, sub, sub + '.rfMRI_REST2_RL.reduce3.ftdata.NLM_11N\
_hvar_25.mat')):
            labs2 = parcellate_motor(sub, nClusters + 1, 1, 2, 2)
            count1 += 1
            if count1 == 1:
                labs_all_1 = sp.array(labs1)
                labs_all_2 = sp.array(labs2)
            else:
                labs_all_1 = sp.vstack([labs_all_1, labs1])
                labs_all_2 = sp.vstack([labs_all_2, labs2])

    R = sp.zeros(count1)
    for a in range(count1):
        R = adjusted_rand_score(labs_all_1, labs_all_2)

    R_all.append(R)
    print('Clusters=', nClusters)

sp.savez_compressed('clustering_results_sessions_GMM', R_all=R_all)

#%%
fig = plt.figure()
plt.plot(R_all)
fig.savefig('across_subjects_adj_rand_sessions_GMM.pdf')
Beispiel #56
0
def test2():
    thetas = np.linspace(0, np.pi * 0.3, 7)
    #    thetas = np.array([np.pi*0.3])
    print "thetas: ", thetas
    zmin = 1
    zmax = 5
    zs = np.linspace(zmin, zmax, 100)
    r = np.array([[0.1, 0.12, zz] for zz in zs])

    k_dir = np.vstack([np.sin(thetas), np.zeros_like(thetas), np.cos(thetas)])
    k_dir = k_dir.transpose()
    #    print k_dir

    wavelength = 10
    k0 = np.pi * 2 / wavelength

    a1 = np.array([1, 0, 0])
    a2 = np.array([0, 1, 0])

    class Gratinglobes(object):
        def check(self):
            dx = np.sqrt(np.sum(a1 * a1, axis=-1))
            dy = np.sqrt(np.sum(a2 * a2, axis=-1))
            threshold_d = wavelength / (1 + np.sin(thetas))
            temp = np.array([dx < threshold_d, dy < threshold_d])
            return np.sum(temp, axis=0) == temp.shape[0]

    checker = Gratinglobes().check()
    print "grating lobe condition: ", checker
    pgf_gen_ewald = PGF_EWALD(k0, a1, a2, 2, 2)

    theta_phi = np.vstack([thetas, np.zeros_like(thetas)])
    theta_phi = theta_phi.transpose()

    class DGF_INT(object):
        def interp(self):
            x_sample = np.linspace(0, 0.2, 3)
            y_sample = np.linspace(0, 0.2, 3)
            z_sample = np.linspace(zmin, zmax, 20)
            theta_sample = np.linspace(0, np.pi * 0.67, 10)
            phi_sample = np.array([-0.01, 0.01])  #np.linspace(0,np.pi*2,10)
            pdf =  DGF_Interp_3D(x=x_sample,y=y_sample,z=z_sample, \
                                 pgf_gen=pgf_gen_ewald,\
                                 k_dir_theta=theta_sample, k_dir_phi=phi_sample)

            result_interp = pdf.interp_dir_r(theta_phi, r)
            return result_interp

    result_interp = DGF_INT().interp()
    result_ewald = pgf_gen_ewald.pgf(k_dir, r)

    import matplotlib.pylab as plt
    plt.figure()

    class Method(object):
        def __init__(self, result, marker, line):
            self.result = result
            self.marker = marker
            self.line = line

        def angle(self, it):
            plt.plot(zs,\
                     np.angle(self.result)[it]/np.pi*180,\
                     self.line,\
                     label='angle %d %s'%(it,self.marker))

        def absolute(self, it):
            plt.plot(zs,\
                     np.absolute(self.result)[it],\
                     self.line,\
                     label='abs %d %s'%(it,self.marker))

    map(Method(result_interp, 'interp', "s").angle, xrange(theta_phi.shape[0]))
    map(Method(result_ewald, 'ewald', "-").angle, xrange(theta_phi.shape[0]))
    plt.ylabel("angle (degree)")
    plt.xlabel("zs")
    plt.legend()
    plt.show()

    plt.figure()
    map(
        Method(result_interp, 'interp', "+").absolute,
        xrange(theta_phi.shape[0]))
    map(
        Method(result_ewald, 'ewald', "-").absolute,
        xrange(theta_phi.shape[0]))
    plt.xlabel("zs")
    plt.ylabel("log10(abs) ")
    plt.legend()
    plt.show()

    plt.figure()
    map(
        Method((result_interp - result_ewald) / result_ewald, 'diff',
               "-+").absolute, xrange(theta_phi.shape[0]))
    #    map(Method(result_ewald,'ewald',"-").absolute,xrange(theta_phi.shape[0]))
    plt.xlabel("zs")
    plt.ylabel("log10(abs) ")
    plt.legend()
    plt.show()
Beispiel #57
0
def gphinasrecc(optstate, persist, **para):
    if para['onlyafter'] >= optstate.n or not optstate.n % para['everyn'] == 0:
        #return [sp.NaN for i in para['lb']],{'didnotrun':True}
        return argminrecc(optstate, persist, **para)
    logger.info('gpmapas reccomender')
    d = len(para['lb'])

    x = sp.hstack(
        [sp.vstack([e['xa'] for e in optstate.ev]),
         sp.vstack(optstate.x)])
    y = sp.vstack(optstate.y)
    s = sp.vstack([e['s'] + 10**optstate.condition for e in optstate.ev])
    dx = [e['d'] for e in optstate.ev]

    G = GPdc.GPcore(x, y, s, dx, [
        GPdc.kernel(optstate.aux['kindex'], d + 1, h)
        for h in optstate.aux['HYPdraws']
    ])

    #    def directwrap(xq,y):
    #        xq.resize([1,d])
    #        xe = sp.hstack([sp.array([[0.]]),xq])
    #        #print xe
    #        a = G.infer_m_post(xe,[[sp.NaN]])
    #        return (a[0,0],0)
    #    [xmin,ymin,ierror] = direct(directwrap,para['lb'],para['ub'],user_data=[], algmethod=1, maxf=para['maxf'], logfilename='/dev/null')

    def wrap(x):
        xq = copy.copy(x)
        xq.resize([1, d])
        xe = sp.hstack([sp.array([[0.]]), xq])
        a = G.infer_m_post(xe, [[sp.NaN]])
        return a[0, 0]

    xmin, ymin, ierror = gpbo.core.optutils.twopartopt(wrap, para['lb'],
                                                       para['ub'],
                                                       para['dpara'],
                                                       para['lpara'])
    logger.info('reccsearchresult: {}'.format([xmin, ymin, ierror]))
    from gpbo.core import debugoutput
    if debugoutput['datavis']:
        if not os.path.exists(debugoutput['path']):
            os.mkdir(debugoutput['path'])

        l = sp.mean([h[3] for h in optstate.aux['HYPdraws']])
        from matplotlib import pyplot as plt
        fig, ax = plt.subplots(nrows=3, ncols=1, figsize=(10, 30))

        n = 200
        x_ = sp.linspace(-1, 1, n)
        y_ = sp.linspace(-1, 1, n)
        z_ = sp.empty([n, n])
        s_ = sp.empty([n, n])
        for i in xrange(n):
            for j in xrange(n):
                m_, v_ = G.infer_diag_post(sp.array([0., y_[j], x_[i]]),
                                           [[sp.NaN]])
                z_[i, j] = m_[0, 0]
                s_[i, j] = sp.sqrt(v_[0, 0])
        CS = ax[1].contour(x_, y_, z_, 20)
        ax[1].clabel(CS, inline=1, fontsize=10)
        CS = ax[2].contour(x_, y_, s_, 20)
        ax[2].clabel(CS, inline=1, fontsize=10)
        for i in xrange(x.shape[0] - 1):
            ax[0].plot(x[i, 1], x[i, 2], 'b.')
            circle = plt.Circle([x[i, 1], x[i, 2]],
                                radius=0.5 * x[i, 0] * l,
                                edgecolor="none",
                                color='lightblue',
                                alpha=0.8 - 0.6 * x[i, 2])
            ax[0].add_patch(circle)
        ax[0].plot(x[i + 1, 1], x[i + 1, 2], 'r.')
        circle = plt.Circle([x[i + 1, 1], x[i + 1, 2]],
                            radius=0.5 * x[i, 0] * l,
                            edgecolor="none",
                            color='lightblue',
                            alpha=0.8 - 0.6 * x[i, 2])
        ax[0].add_patch(circle)
        ax[0].axis([-1., 1., -1., 1.])
        ax[1].plot(xmin[0], xmin[1], 'ro')
        fig.savefig(
            os.path.join(
                debugoutput['path'],
                'datavis' + time.strftime('%d_%m_%y_%H:%M:%S') + '.png'))
        fig.clf()
        plt.close(fig)
        del (fig)

    return [i for i in xmin], persist, {'ymin': ymin}
Beispiel #58
0
def linear(physics, phase, A1='', A2='', x='', return_rate=True, **kwargs):
    r"""
    For the following source term:
        .. math::
            r = A_{1}   x  +  A_{2}
    If return_rate is True, it returns the value of source term for the
    provided x in each pore.
    If return_rate is False, it calculates the slope and intercept for the
    following linear form :
        .. math::
            r = S_{1}   x  +  S_{2}

    Parameters
    ----------
    A1 , A2 : string
        The property name of the coefficients in the source term model.
        With A2 set to zero this equation takes on the familiar for of r=kx.
    x : string or float/int or array/list
        The property name or numerical value or array for the main quantity
    Notes
    -----
    Because this source term is linear in concentration (x) is it not necessary
    to iterate during the solver step.  Thus, when using the
    ``set_source_term`` method for an algorithm, it is recommended to set the
    ``maxiter``
    argument to 0.  This will save 1 unncessary solution of the system, since
    the solution would coverge after the first pass anyway.

    """
    if x is '':
        X = _sp.ones(physics.Np) * _sp.nan
    else:
        if type(x) == str:
            x = 'pore.' + x.split('.')[-1]
            try:
                X = physics[x]
            except KeyError:
                raise Exception(physics.name +
                                ' does not have the pore property :' + x + '!')
        else:
            X = _sp.array(x)

    length_X = _sp.size(X)
    if length_X != physics.Np:
        if length_X == 1:
            X = X * _sp.ones(physics.Np)
        elif length_X >= phase.Np:
            X = X[physics.map_pores()]
        else:
            raise Exception('Wrong size for the numerical array of x!')

    a = {}
    source_params = [A1, A2]
    for ind in _sp.arange(_sp.size(source_params)):
        A = source_params[ind]
        if A is '':
            a[str(ind + 1)] = 0
        else:
            if type(A) == str:
                A = 'pore.' + A.split('.')[-1]
                try:
                    a[str(ind + 1)] = physics[A]
                except KeyError:
                    raise Exception(physics.name + '/' + phase.name +
                                    ' does not have the pore property :' + A +
                                    '!')
            else:
                raise Exception('source_term parameters can only be string '
                                'type!')

    if return_rate:
        return (a['1'] * X + a['2'])
    else:
        S1 = a['1']
        S2 = a['2']
        return (_sp.vstack((S1, S2)).T)
Beispiel #59
0
def PESbsaq(optstate, persist, **para):
    para = copy.deepcopy(para)
    if persist == None:
        persist = {'n': 0, 'd': len(para['ub'])}
    n = persist['n']
    d = persist['d']
    if n < para['nrandinit']:
        persist['n'] += 1
        para['ev']['xa'] = sp.random.uniform(para['xal'], para['xau'])
        return randomaq(optstate, persist, **para)
    logger.info('PESssaq')

    x = sp.hstack(
        [sp.vstack([e['xa'] for e in optstate.ev]),
         sp.vstack(optstate.x)])

    y = sp.vstack(optstate.y)
    s = sp.vstack([e['s'] for e in optstate.ev])
    dx = [e['d'] for e in optstate.ev]
    print "\n at pesinplane x {} axis 0".format(x)
    pesobj = PES.PES_inplane(x,
                             y,
                             s,
                             dx, [para['xal']] + para['lb'],
                             [para['xau']] + para['ub'],
                             para['kindex'],
                             para['mprior'],
                             para['sprior'],
                             0,
                             0,
                             DH_SAMPLES=para['DH_SAMPLES'],
                             DM_SAMPLES=para['DM_SAMPLES'],
                             DM_SUPPORT=para['DM_SUPPORT'],
                             DM_SLICELCBPARA=para['DM_SLICELCBPARA'],
                             mode=para['SUPPORT_MODE'])
    if para['traincfn']:  #
        #print "XXXXXXXXXXXXXXx"
        cx = sp.vstack([e['xa'] for e in optstate.ev])
        cc = sp.vstack([e for e in optstate.c])
        #print cx
        #print cc
        #print optstate.ev
        #print optstate.x
        cfn = objectives.traincfn(cx, cc)
        """
        if len(cc)%5==0:
            from matplotlib import pyplot as plt
            f,a = plt.subplots(1)
            xt = sp.linspace(0,1,100)
            m = sp.empty(100)
            for i in xrange(100):
                m[i]=cfn(0.,**{'xa':xt[i]})
            a.plot(xt,m,'b')
            for i in xrange(len(optstate.c)):
                a.plot(cx[i,0],cc[i,0],'ro')
            plt.show()
        """
    else:
        cfn = para['cfn']

    [xmin, ymin, ierror] = pesobj.search_acq(cfn,
                                             lambda s: para['ev']['s'],
                                             volper=para['volper'])
    logger.debug([xmin, ymin, ierror])
    para['ev']['xa'] = xmin[0]
    xout = [i for i in xmin[1:]]
    return xout, para['ev'], persist, {
        'HYPdraws': [k.hyp for k in pesobj.G.kf],
        'mindraws': pesobj.Z,
        'DIRECTmessage': ierror,
        'PESmin': ymin
    }
Beispiel #60
0
def creAllvecSamp():
    Tlines = open("res/RELATIONS_train.txt").readlines()
    TLlines = open("res/RELATIONS_trainLable.txt").readlines()
    batchNum = 10
    batchSize = len(Tlines) / 10
    for b in range(0, batchNum):
        line = Tlines[b * batchSize]
        word1 = line.strip("\n").split("\t")[0]
        word2 = line.strip("\n").split("\t")[1]
        wordvec = getWordPairVec(word1, word2)
        Mat = wordvec

        for i in range(b * batchSize + 1, (b + 1) * batchSize):
            line = Tlines[i]
            print(i)
            word1 = line.strip("\n").split("\t")[0]
            word2 = line.strip("\n").split("\t")[1]
            wordvec = getWordPairVec(word1, word2)
            Mat = vstack([Mat, wordvec])
            print(Mat.shape)
        sio.savemat(u"I:/数据/wordpairRelExpt/allVec/PairRelSimTrain_" + str(b) +
                    ".mat", {"train": Mat.transpose()},
                    oned_as='column')  #行大于列 按列存成一维数组

        list = []
        for i in range(b * batchSize, (b + 1) * batchSize):
            line = TLlines[i]
            list.append(int(line.strip()))
            Mat = np.array(list)
            sio.savemat(u"I:/数据/wordpairRelExpt/allVec/PairRelSimTrainLable_" +
                        str(b) + ".mat", {"trainLable": Mat.transpose()},
                        oned_as='column')  #行大于列 按列存成一维数组

    Telines = open("res/RELATIONS_test.txt").readlines()
    line = Telines[0]
    word1 = line.strip("\n").split("\t")[0]
    word2 = line.strip("\n").split("\t")[1]
    wordvec = getWordPairVec(word1, word2)  #300维度的相似度
    Mat = wordvec
    for i in range(1, 600):
        line = Telines[i]
        print(i)
        word1 = line.strip("\n").split("\t")[0]
        word2 = line.strip("\n").split("\t")[1]
        wordvec = getWordPairVec(word1, word2)
        Mat = vstack([Mat, wordvec])
        print(Mat.shape)
    sio.savemat(u"I:/数据/wordpairRelExpt/allVec/PairRelTestSim_0.mat",
                {"tests": Mat.transpose()},
                oned_as='column')  #行大于列 按列存成一维数组
    # print(trainMat[])
    line = Telines[600]
    word1 = line.strip("\n").split("\t")[0]
    word2 = line.strip("\n").split("\t")[1]
    wordvec = getWordPairVec(word1, word2)  #300维度的相似度
    Mat = wordvec
    for i in range(601, len(Telines)):
        line = Telines[i]
        print(i)
        word1 = line.strip("\n").split("\t")[0]
        word2 = line.strip("\n").split("\t")[1]
        wordvec = getWordPairVec(word1, word2)
        Mat = vstack([Mat, wordvec])
        print(Mat.shape)
    sio.savemat(u"I:/数据/wordpairRelExpt/allVec/PairRelTestSim_1",
                {"tests": Mat.transpose()},
                oned_as='column')  #行大于列 按列存成一维数组
    # print(trainMat[])

    lines = open("res/RELATIONS_testLable.txt").readlines()
    list = []
    for i in range(0, 600):
        line = lines[i]
        list.append(int(line.strip()))
    Mat = np.array(list)
    sio.savemat(u"I:/数据/wordpairRelExpt/allVec/PairRelSimTestLable_0.mat",
                {"testLable": Mat.transpose()},
                oned_as='column')  #行大于列 按列存成一维数组

    list = []
    for i in range(600, len(lines)):
        line = lines[i]
        list.append(int(line.strip()))
    Mat = np.array(list)
    sio.savemat(u"I:/数据/wordpairRelExpt/allVec/PairRelSimTestLable_1.mat",
                {"testLable": Mat.transpose()},
                oned_as='column')  #行大于列 按列存成一维数组