示例#1
0
文件: dirk_2.py 项目: catubc/myapp
def Compare_sorts(Sort1, Sort2, tsf):

    import compare_sort
    #Hacked way of making a numpy array out of uneven list; using trailing zeros
    max_len = 0
    for i in range(len(Sort1.units)):
        if max_len< len(Sort1.units[i]): max_len=len(Sort1.units[i])
    spikes1 = np.zeros((len(Sort1.units),max_len), dtype=np.int32)
    for i in range(len(Sort1.units)):
        spikes1[i][0:len(Sort1.units[i])]=Sort1.units[i]

    spikes1=np.asfortranarray(spikes1)

    max_len = 0
    for i in range(len(Sort2.units)):
        if max_len< len(Sort2.units[i]): max_len=len(Sort2.units[i])
    spikes2 = np.zeros((len(Sort2.units),max_len), dtype=np.int32)
    for i in range(len(Sort2.units)):
        spikes2[i][0:len(Sort2.units[i])]=Sort2.units[i]

    spikes2=np.asfortranarray(spikes2)

    Siteloc = np.asfortranarray(tsf.Siteloc)

    print "Into fortran..."
    out_array =  compare_sort.compare_sort(tsf.SampleFrequency, spikes1, spikes2, Siteloc, Sort1.maxchan, Sort2.maxchan)
    print "... out of fortran"
    
    Sort2.purity = out_array[0][0:len(Sort2.units)]
    Sort2.completeness= out_array[1][0:len(Sort2.units)]
    Sort2.match = out_array[2][0:len(Sort2.units)]

    return
示例#2
0
def test_reorder_vector():
    nobs = 5
    k_endog = 3

    missing = np.zeros((k_endog, nobs))
    missing[0, 0] = 1
    missing[:2, 1] = 1
    missing[0, 2] = 1
    missing[2, 2] = 1
    missing[1, 3] = 1
    missing[2, 4] = 1

    given = np.zeros((k_endog, nobs))
    given[:, :] = np.array([1, 2, 3])[:, np.newaxis]
    desired = given.copy()

    given[:, 0] = [2, 3, 0]
    desired[:, 0] = [0, 2, 3]
    given[:, 1] = [3, 0, 0]
    desired[:, 1] = [0, 0, 3]
    given[:, 2] = [2, 0, 0]
    desired[:, 2] = [0, 2, 0]
    given[:, 3] = [1, 3, 0]
    desired[:, 3] = [1, 0, 3]
    given[:, 4] = [1, 2, 0]
    desired[:, 4] = [1, 2, 0]

    actual = np.asfortranarray(given.copy())
    missing = np.asfortranarray(missing.astype(np.int32))
    tools.reorder_missing_vector(actual, missing, inplace=True)
    assert_equal(actual, desired)
示例#3
0
def writeArraysToFile(stream, x, y, z):
    # Check if arrays have same shape and data type
    assert ( x.size == y.size == z.size ), "Different array sizes."
    assert ( x.dtype.itemsize == y.dtype.itemsize == z.dtype.itemsize ), "Different item sizes."
  
    nitems = x.size
    itemsize = x.dtype.itemsize

    fmt = _get_byte_order_char() + str(1) + np_to_struct[x.dtype.name]  # > for big endian
    if (x.flags['C_CONTIGUOUS']):  
        xx = np.asfortranarray(x.T).ravel()
    else:
	xx = x.ravel()

    if (y.flags['C_CONTIGUOUS']):
        yy = np.asfortranarray(y.T).ravel()
    else:
        yy = y.ravel()

    if (z.flags['C_CONTIGUOUS']):
        zz = np.asfortranarray(z.T).ravel()
    else:
        zz = z.ravel()    
        
    # eliminate this loop by creating a composed array.
    for i in range(nitems):
        bx = struct.pack(fmt, xx[i])
        by = struct.pack(fmt, yy[i])
        bz = struct.pack(fmt, zz[i])
        stream.write(bx)
        stream.write(by)
        stream.write(bz)
示例#4
0
文件: warp.py 项目: spillai/ProxImaL
    def adjoint(self, inputs, outputs):
        """The adjoint operator.

        Reads from inputs and writes to outputs.
        """


        if self.implementation == Impl['halide'] :

            #Halide implementation
            if len(self.H.shape) == 2:
                tmpin = np.asfortranarray( inputs[0][..., np.newaxis].astype(np.float32) )
            else:
                tmpin = np.asfortranarray( inputs[0].astype(np.float32) )

            Halide('At_warp.cpp').At_warp( tmpin, self.Hf, self.tmpadj ) #Call
            np.copyto(outputs[0], self.tmpadj )

        else:

            #CV2 version
            inimg = inputs[0]
            if len(self.H.shape) == 2:
                # + cv2.WARP_INVERSE_MAP
                warpedInput = cv2.warpPerspective(np.asfortranarray(inimg), self.Hinv.T, inimg.shape[1::-1], flags=cv2.INTER_LINEAR,
                                    borderMode=cv2.BORDER_CONSTANT, borderValue=0.)
                np.copyto( outputs[0], warpedInput )

            else:
                outputs[0][:] = 0.0
                for j in range(self.H.shape[2]):
                    warpedInput = cv2.warpPerspective(np.asfortranarray(inimg[:,:,:,j]), self.Hinv[:,:,j].T, inimg.shape[1::-1], flags=cv2.INTER_LINEAR,
                                                    borderMode=cv2.BORDER_CONSTANT, borderValue=0.) #Necessary due to array layout in opencv
                    outputs[0] += warpedInput
def test_mem_layout():
    # Test with different memory layouts of X and y
    X_ = np.asfortranarray(X)
    clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
    clf.fit(X_, y)
    assert_array_equal(clf.predict(T), true_result)
    assert_equal(100, len(clf.estimators_))

    X_ = np.ascontiguousarray(X)
    clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
    clf.fit(X_, y)
    assert_array_equal(clf.predict(T), true_result)
    assert_equal(100, len(clf.estimators_))

    y_ = np.asarray(y, dtype=np.int32)
    y_ = np.ascontiguousarray(y_)
    clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
    clf.fit(X, y_)
    assert_array_equal(clf.predict(T), true_result)
    assert_equal(100, len(clf.estimators_))

    y_ = np.asarray(y, dtype=np.int32)
    y_ = np.asfortranarray(y_)
    clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
    clf.fit(X, y_)
    assert_array_equal(clf.predict(T), true_result)
    assert_equal(100, len(clf.estimators_))
示例#6
0
    def consLinear( self, A, lb=None, ub=None ):
        """
        sets linear constraints.

        Arguments:
        A:  linear constraint matrix, array of size (Nconslin,N).
        lb: lower bounds, array of size (Nconslin,). (default: -inf).
        ub: upper bounds, array of size (Nconslin,). (default: zeros).

        """

        self.conslinA = np.asfortranarray( A )

        if( self.conslinA.shape != ( self.Nconslin, self.N ) ):
            raise ValueError( "Argument 'A' must have size (" + str(self.Nconslin)
                              + "," + str(self.N) + ")." )

        if( lb is None ):
            lb = -np.inf * np.ones( (self.Nconslin,) )

        if( ub is None ):
            ub = np.zeros( (self.Nconslin,) )

        self.conslinlb = np.asfortranarray( lb )
        self.conslinub = np.asfortranarray( ub )

        if( self.conslinlb.shape != ( self.Nconslin, ) or
            self.conslinub.shape != ( self.Nconslin, ) ):
            raise ValueError( "Bounds must have size (" + str(self.Nconslin) + ",)." )
def test_bind():
    mod = Representation(1, k_states=2)

    # Test invalid endogenous array (it must be ndarray)
    assert_raises(ValueError, lambda: mod.bind([1,2,3,4]))

    # Test valid (nobs x 1) endogenous array
    mod.bind(np.arange(10)*1.)
    assert_equal(mod.nobs, 10)

    # Test valid (k_endog x 0) endogenous array
    mod.bind(np.zeros(0,dtype=np.float64))

    # Test invalid (3-dim) endogenous array
    assert_raises(ValueError, lambda: mod.bind(np.arange(12).reshape(2,2,3)*1.))

    # Test valid F-contiguous
    mod.bind(np.asfortranarray(np.arange(10).reshape(1,10)))
    assert_equal(mod.nobs, 10)

    # Test valid C-contiguous
    mod.bind(np.arange(10).reshape(10,1))
    assert_equal(mod.nobs, 10)

    # Test invalid F-contiguous
    assert_raises(ValueError, lambda: mod.bind(np.asfortranarray(np.arange(10).reshape(10,1))))

    # Test invalid C-contiguous
    assert_raises(ValueError, lambda: mod.bind(np.arange(10).reshape(1,10)))
示例#8
0
def pixel(xp, yp, channel_is_used, min_equ_ref, mean_equ_ref, eof, max_usable_eof, ss, ms, r):

    from constant import R17600, COMPONENT_PARTICLE

    channel_is_used_p = np.transpose(channel_is_used[xp, yp, :, :]).astype(bool)
    min_equ_ref_p = np.transpose(min_equ_ref[xp, yp, :, :])
    mean_equ_ref_p = np.transpose(mean_equ_ref[xp, yp, :, :])

    reg_scale = R17600 / r

    if r > 1100:
        eof_p = eof[xp, yp, :, :, :]
        max_usable_eof_p = max_usable_eof[xp, yp, :]
    else:
        eof_p = eof[xp, yp, :, :]
        max_usable_eof_p = max_usable_eof[xp, yp]

    tau_cam_ss = np.asfortranarray(np.transpose(ss[:, math.ceil(xp/reg_scale), math.ceil(yp/reg_scale), COMPONENT_PARTICLE, :, :], [0, 3, 1, 2]))
    tau_cam_ms = np.asfortranarray(np.transpose(ms[:, math.ceil(xp/reg_scale), math.ceil(yp/reg_scale), COMPONENT_PARTICLE, :, :], [0, 3, 1, 2]))

    reg = collections.namedtuple('reg', 'channel_is_used min_equ_ref mean_equ_ref eof max_usable_eof')
    smart = collections.namedtuple('smart', 'ss ms')

    reg_p = reg(channel_is_used_p, min_equ_ref_p, mean_equ_ref_p, eof_p, max_usable_eof_p)
    smart_p = smart(tau_cam_ss, tau_cam_ms)

    return reg_p, smart_p
def test_nmf():
    img_file = 'boat.png'
    try:
        img = Image.open(img_file)
    except:
        print("Cannot load image %s : skipping test" %img_file)
        return None
    I = np.array(img) / 255.
    if I.ndim == 3:
        A = np.asfortranarray(I.reshape((I.shape[0],I.shape[1] * I.shape[2])),dtype = myfloat)
        rgb = True
    else:
        A = np.asfortranarray(I,dtype = myfloat)
        rgb = False

    m = 16;n = 16;
    X = spams.im2col_sliding(A,m,n,rgb)
    X = X[:,::10]
    X = np.asfortranarray(X / np.tile(np.sqrt((X * X).sum(axis=0)),(X.shape[0],1)),dtype = myfloat)
    ########## FIRST EXPERIMENT ###########
    tic = time.time()
    (U,V) = spams.nmf(X,return_lasso= True,K = 49,numThreads=4,iter = -5)
    tac = time.time()
    t = tac - tic
    print('time of computation for Dictionary Learning: %f' %t)

    print('Evaluating cost function...')
    Y = X - U * V
    R = np.mean(0.5 * (Y * Y).sum(axis=0))
    print('objective function: %f' %R)
    return None
示例#10
0
def MakeGrid(shape, step=None, start=None): 

    if step==None:
        step = np.ones(2)

    # if start not specified, we'll arrange it as assumed in fftshift    
    if start==None:
        start = np.zeros(2)
        if shape[0] % 2:
            start[0] = -0.5 * (shape[0]-1) * step[0]
        else:
            start[0] = -0.5 * (shape[0]) * step[0]
        if shape[1] % 2:    
            start[1] = -0.5 * (shape[1]-1) * step[1]
        else:
            start[1] = -0.5 * shape[1] * step[1]
                        
    
    x = np.arange(0, shape[0]) * step[0] + start[0]
    y = np.arange(0, shape[1]) * step[1] + start[1]
    
    xGrid, yGrid = np.meshgrid(x, y)
    xGrid = np.asfortranarray(xGrid.T)
    yGrid = np.asfortranarray(yGrid.T)
    
    return xGrid, yGrid    
示例#11
0
    def psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, variance, lengthscale, Z, variational_posterior):
        ARD = (len(lengthscale)!=1)
        
        N,M,Q = self.get_dimensions(Z, variational_posterior)
        psi1_gpu = self.gpuCache['psi1_gpu']
        psi2n_gpu = self.gpuCache['psi2n_gpu']
        l_gpu = self.gpuCache['l_gpu']
        Z_gpu = self.gpuCache['Z_gpu']
        mu_gpu = self.gpuCache['mu_gpu']
        S_gpu = self.gpuCache['S_gpu']
        gamma_gpu = self.gpuCache['gamma_gpu']
        dvar_gpu = self.gpuCache['dvar_gpu']
        dl_gpu = self.gpuCache['dl_gpu']
        dZ_gpu = self.gpuCache['dZ_gpu']
        dmu_gpu = self.gpuCache['dmu_gpu']
        dS_gpu = self.gpuCache['dS_gpu']
        dgamma_gpu = self.gpuCache['dgamma_gpu']
        grad_l_gpu = self.gpuCache['grad_l_gpu']
        grad_mu_gpu = self.gpuCache['grad_mu_gpu']
        grad_S_gpu = self.gpuCache['grad_S_gpu']
        grad_gamma_gpu = self.gpuCache['grad_gamma_gpu']
        log_denom1_gpu = self.gpuCache['log_denom1_gpu']
        log_denom2_gpu = self.gpuCache['log_denom2_gpu']
        log_gamma_gpu = self.gpuCache['log_gamma_gpu']
        log_gamma1_gpu = self.gpuCache['log_gamma1_gpu']
        
        if self.GPU_direct:
            dL_dpsi1_gpu = dL_dpsi1
            dL_dpsi2_gpu = dL_dpsi2
            dL_dpsi0_sum = gpuarray.sum(dL_dpsi0).get()
        else:
            dL_dpsi1_gpu = self.gpuCache['dL_dpsi1_gpu']
            dL_dpsi2_gpu = self.gpuCache['dL_dpsi2_gpu']
            dL_dpsi1_gpu.set(np.asfortranarray(dL_dpsi1))
            dL_dpsi2_gpu.set(np.asfortranarray(dL_dpsi2))
            dL_dpsi0_sum = dL_dpsi0.sum()

        self.reset_derivative()
        # t=self.g_psi1compDer(dvar_gpu,dl_gpu,dZ_gpu,dmu_gpu,dS_gpu,dL_dpsi1_gpu,psi1_gpu, np.float64(variance),l_gpu,Z_gpu,mu_gpu,S_gpu, np.int32(N), np.int32(M), np.int32(Q), block=(self.threadnum,1,1), grid=(self.blocknum,1),time_kernel=True)
        # print 'g_psi1compDer '+str(t)
        # t=self.g_psi2compDer(dvar_gpu,dl_gpu,dZ_gpu,dmu_gpu,dS_gpu,dL_dpsi2_gpu,psi2n_gpu, np.float64(variance),l_gpu,Z_gpu,mu_gpu,S_gpu, np.int32(N), np.int32(M), np.int32(Q), block=(self.threadnum,1,1), grid=(self.blocknum,1),time_kernel=True)
        # print 'g_psi2compDer '+str(t)
        self.g_psi1compDer.prepared_call((self.blocknum,1),(self.threadnum,1,1),dvar_gpu.gpudata,dl_gpu.gpudata,dZ_gpu.gpudata,dmu_gpu.gpudata,dS_gpu.gpudata,dgamma_gpu.gpudata,dL_dpsi1_gpu.gpudata,psi1_gpu.gpudata, log_denom1_gpu.gpudata, log_gamma_gpu.gpudata, log_gamma1_gpu.gpudata, np.float64(variance),l_gpu.gpudata,Z_gpu.gpudata,mu_gpu.gpudata,S_gpu.gpudata,gamma_gpu.gpudata,np.int32(N), np.int32(M), np.int32(Q))
        self.g_psi2compDer.prepared_call((self.blocknum,1),(self.threadnum,1,1),dvar_gpu.gpudata,dl_gpu.gpudata,dZ_gpu.gpudata,dmu_gpu.gpudata,dS_gpu.gpudata,dgamma_gpu.gpudata,dL_dpsi2_gpu.gpudata,psi2n_gpu.gpudata, log_denom2_gpu.gpudata, log_gamma_gpu.gpudata, log_gamma1_gpu.gpudata, np.float64(variance),l_gpu.gpudata,Z_gpu.gpudata,mu_gpu.gpudata,S_gpu.gpudata,gamma_gpu.gpudata,np.int32(N), np.int32(M), np.int32(Q))

        dL_dvar = dL_dpsi0_sum + gpuarray.sum(dvar_gpu).get()
        sum_axis(grad_mu_gpu,dmu_gpu,N*Q,self.blocknum)
        dL_dmu = grad_mu_gpu.get()
        sum_axis(grad_S_gpu,dS_gpu,N*Q,self.blocknum)
        dL_dS = grad_S_gpu.get()
        sum_axis(grad_gamma_gpu,dgamma_gpu,N*Q,self.blocknum)
        dL_dgamma = grad_gamma_gpu.get()
        dL_dZ = dZ_gpu.get()
        if ARD:
            sum_axis(grad_l_gpu,dl_gpu,Q,self.blocknum)
            dL_dlengscale = grad_l_gpu.get()
        else:
            dL_dlengscale = gpuarray.sum(dl_gpu).get()
            
        return dL_dvar, dL_dlengscale, dL_dZ, dL_dmu, dL_dS, dL_dgamma
示例#12
0
def actionAngleStaeckel_calcu0(E,Lz,pot,delta):
    """
    NAME:
       actionAngleStaeckel_calcu0
    PURPOSE:
       Use C to calculate u0 in the Staeckel approximation
    INPUT:
       E, Lz - energy and angular momentum
       pot - Potential or list of such instances
       delta - focal length of prolate spheroidal coordinates
    OUTPUT:
       (u0,err)
       u0 : array, shape (len(E))
       err - non-zero if error occured
    HISTORY:
       2012-12-03 - Written - Bovy (IAS)
    """
    #Parse the potential
    npot, pot_type, pot_args= _parse_pot(pot,potforactions=True)

    #Set up result arrays
    u0= numpy.empty(len(E))
    err= ctypes.c_int(0)

    #Set up the C code
    ndarrayFlags= ('C_CONTIGUOUS','WRITEABLE')
    actionAngleStaeckel_actionsFunc= _lib.calcu0
    actionAngleStaeckel_actionsFunc.argtypes= [ctypes.c_int,
                               ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
                               ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
                               ctypes.c_int,
                               ndpointer(dtype=numpy.int32,flags=ndarrayFlags),
                               ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
                               ctypes.c_double,
                               ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
                               ctypes.POINTER(ctypes.c_int)]

    #Array requirements, first store old order
    f_cont= [E.flags['F_CONTIGUOUS'],
             Lz.flags['F_CONTIGUOUS']]
    E= numpy.require(E,dtype=numpy.float64,requirements=['C','W'])
    Lz= numpy.require(Lz,dtype=numpy.float64,requirements=['C','W'])
    u0= numpy.require(u0,dtype=numpy.float64,requirements=['C','W'])

    #Run the C code
    actionAngleStaeckel_actionsFunc(len(E),
                                    E,
                                    Lz,
                                    ctypes.c_int(npot),
                                    pot_type,
                                    pot_args,
                                    ctypes.c_double(delta),
                                    u0,
                                    ctypes.byref(err))

    #Reset input arrays
    if f_cont[0]: E= numpy.asfortranarray(E)
    if f_cont[1]: Lz= numpy.asfortranarray(Lz)

    return (u0,err.value)
def subset_selection_xtx(X, Y):
    """ Subsets selection using EvalSubsetsUsingXtx in the Earth package.
    """
    X = numpy.asfortranarray(X, dtype=ctypes.c_double)
    Y = numpy.asfortranarray(Y, dtype=ctypes.c_double)
    if Y.ndim == 1:
        Y = Y.reshape((-1, 1), order="F")

    if X.shape[0] != Y.shape[0]:
        raise ValueError("First dimensions of bx and y must be the same")

    var_count = X.shape[1]
    resp_count = Y.shape[1]
    cases = X.shape[0]
    subsets = numpy.zeros((var_count, var_count), dtype=ctypes.c_bool,
                              order="F")
    rss_vec = numpy.zeros((var_count,), dtype=ctypes.c_double, order="F")
    weights = numpy.ones((cases,), dtype=ctypes.c_double, order="F")

    rval = _c_eval_subsets_xtx(subsets, rss_vec, cases, resp_count, var_count,
                        X, Y, weights)
    if rval == 1:
        raise numpy.linalg.LinAlgError("Lin. dep. terms in X")
    elif rval == 2:
        raise Exception("Trying to prune the intercept.")
    elif rval != 0:
        raise Exception("Error %i" % rval)

    subsets_ind = numpy.zeros((var_count, var_count), dtype=int)
    for i, used in enumerate(subsets.T):
        subsets_ind[i, :i + 1] = numpy.where(used)[0]

    return subsets_ind, rss_vec
示例#14
0
文件: svigp.py 项目: Dalar/GPy
    def _computations(self,do_Kmm=True, do_Kmm_grad=True):
        """
        All of the computations needed. Some are optional, see kwargs.
        """

        if do_Kmm:
            self.Lm = jitchol(self.Kmm)

        # The rather complex computations of self.A
        if self.has_uncertain_inputs:
            if self.likelihood.is_heteroscedastic:
                psi2_beta = (self.psi2 * (self.likelihood.precision.flatten().reshape(self.batchsize, 1, 1))).sum(0)
            else:
                psi2_beta = self.psi2.sum(0) * self.likelihood.precision
            evals, evecs = np.linalg.eigh(psi2_beta)
            clipped_evals = np.clip(evals, 0., 1e6) # TODO: make clipping configurable
            tmp = evecs * np.sqrt(clipped_evals)
        else:
            if self.likelihood.is_heteroscedastic:
                tmp = self.psi1.T * (np.sqrt(self.likelihood.precision.flatten().reshape(1, self.batchsize)))
            else:
                tmp = self.psi1.T * (np.sqrt(self.likelihood.precision))
        tmp, _ = dtrtrs(self.Lm, np.asfortranarray(tmp), lower=1)
        self.A = tdot(tmp)

        self.V = self.likelihood.precision*self.likelihood.Y
        self.VmT = np.dot(self.V,self.q_u_expectation[0].T)
        self.psi1V = np.dot(self.psi1.T, self.V)

        self.B = np.eye(self.num_inducing)*self.data_prop + self.A
        self.Lambda = backsub_both_sides(self.Lm, self.B.T)
        self.LQL = backsub_both_sides(self.Lm,self.q_u_expectation[1].T,transpose='right')

        self.trace_K = self.psi0.sum() - np.trace(self.A)/self.likelihood.precision
        self.Kmmi_m, _ = dpotrs(self.Lm, self.q_u_expectation[0], lower=1)
        self.projected_mean = np.dot(self.psi1,self.Kmmi_m)

        # Compute dL_dpsi
        self.dL_dpsi0 = - 0.5 * self.output_dim * self.likelihood.precision * np.ones(self.batchsize)
        self.dL_dpsi1, _ = dpotrs(self.Lm,np.asfortranarray(self.VmT.T),lower=1)
        self.dL_dpsi1 = self.dL_dpsi1.T

        dL_dpsi2 = -0.5 * self.likelihood.precision * backsub_both_sides(self.Lm, self.LQL - self.output_dim * np.eye(self.num_inducing))
        if self.has_uncertain_inputs:
            self.dL_dpsi2 = np.repeat(dL_dpsi2[None,:,:],self.batchsize,axis=0)
        else:
            self.dL_dpsi1 += 2.*np.dot(dL_dpsi2,self.psi1.T).T
            self.dL_dpsi2 = None

        # Compute dL_dKmm
        if do_Kmm_grad:
            tmp = np.dot(self.LQL,self.A) - backsub_both_sides(self.Lm,np.dot(self.q_u_expectation[0],self.psi1V.T),transpose='right')
            tmp += tmp.T
            tmp += -self.output_dim*self.B
            tmp += self.data_prop*self.LQL
            self.dL_dKmm = 0.5*backsub_both_sides(self.Lm,tmp)

        #Compute the gradient of the log likelihood wrt noise variance
        self.partial_for_likelihood =  -0.5*(self.batchsize*self.output_dim - np.sum(self.A*self.LQL))*self.likelihood.precision
        self.partial_for_likelihood +=  (0.5*self.output_dim*self.trace_K + 0.5 * self.likelihood.trYYT - np.sum(self.likelihood.Y*self.projected_mean))*self.likelihood.precision**2
示例#15
0
def test_mul():
    ## Test multiply method of a distributed matrix
    ms, ns = 5, 14

    gA = np.random.standard_normal((ms, ns)).astype(np.float64)
    gA = np.asfortranarray(gA)
    dA = core.DistributedMatrix.from_global_array(gA, rank=0)

    gB = np.random.standard_normal((ms, ns)).astype(np.float64)
    gB = np.asfortranarray(gB)
    dB = core.DistributedMatrix.from_global_array(gB, rank=0)

    dC = dA * dB
    gC = dC.to_global_array(rank=0)

    a = np.random.standard_normal(ns).astype(np.float64)
    comm.Bcast(a, root=0) # ensure all process have the same data
    dD = dA * a
    gD = dD.to_global_array(rank=0)

    alpha = 2.345
    dE = dA * alpha
    gE = dE.to_global_array(rank=0)

    if rank == 0:
        assert allclose(gA * gB, gC)
        assert allclose(gA * a, gD)
        assert allclose(gA * alpha, gE)
示例#16
0
def test_lassoMask():
    np.random.seed(0)
    print("test lassoMask")
##############################################
# Decomposition of a large number of signals
##############################################
# data generation
    X = np.asfortranarray(np.random.normal(size=(300,300)))
    # X=X./repmat(sqrt(sum(X.^2)),[size(X,1) 1]);
    X = np.asfortranarray(X / np.tile(np.sqrt((X*X).sum(axis=0)),(X.shape[0],1)),dtype= myfloat)
    D = np.asfortranarray(np.random.normal(size=(300,50)))
    D = np.asfortranarray(D / np.tile(np.sqrt((D*D).sum(axis=0)),(D.shape[0],1)),dtype= myfloat)
    mask = np.asfortranarray((X > 0))  # generating a binary mask
    param = {
        'lambda1' : 0.15, # not more than 20 non-zeros coefficients
        'numThreads' : -1, # number of processors/cores to use; the default choice is -1
        # and uses all the cores of the machine
        'mode' : spams.PENALTY}        # penalized formulation
    tic = time.time()
    alpha = spams.lassoMask(X,D,mask,**param)
    tac = time.time()
    t = tac - tic
    print("%f signals processed per second\n" %(float(X.shape[1]) / t))

    return None
示例#17
0
def test_cd():
    np.random.seed(0)
    X = np.asfortranarray(np.random.normal(size = (64,100)))
    X = np.asfortranarray(X / np.tile(np.sqrt((X*X).sum(axis=0)),(X.shape[0],1)),dtype=myfloat)
    D = np.asfortranarray(np.random.normal(size = (64,100)))
    D = np.asfortranarray(D / np.tile(np.sqrt((D*D).sum(axis=0)),(D.shape[0],1)),dtype=myfloat)
    # parameter of the optimization procedure are chosen
    lambda1 = 0.015
    mode = spams.PENALTY
    tic = time.time()
    alpha = spams.lasso(X,D,lambda1 = lambda1,mode = mode,numThreads = 4)
    tac = time.time()
    t = tac - tic
    xd = X - D * alpha
    E = np.mean(0.5 * (xd * xd).sum(axis=0) + lambda1 * np.abs(alpha).sum(axis=0))
    print("%f signals processed per second for LARS" %(X.shape[1] / t))
    print('Objective function for LARS: %g' %E)
    tol = 0.001
    itermax = 1000
    tic = time.time()
#    A0 = ssp.csc_matrix(np.empty((alpha.shape[0],alpha.shape[1])))
    A0 = ssp.csc_matrix((alpha.shape[0],alpha.shape[1]),dtype=myfloat)
    alpha2 = spams.cd(X,D,A0,lambda1 = lambda1,mode = mode,tol = tol, itermax = itermax,numThreads = 4)
    tac = time.time()
    t = tac - tic
    print("%f signals processed per second for CD" %(X.shape[1] / t))
    xd = X - D * alpha2
    E = np.mean(0.5 * (xd * xd).sum(axis=0) + lambda1 * np.abs(alpha).sum(axis=0))
    print('Objective function for CD: %g' %E)
    print('With Random Design, CD can be much faster than LARS')

    return None
示例#18
0
def generate_itp_pn(lut):
    ndim=lut.ndim-1
    
    if ndim == 1: interpolator = interpolators.interpol_1pn
    elif ndim == 2: interpolator = interpolators.interpol_2pn
    elif ndim == 3: interpolator = interpolators.interpol_3pn
    elif ndim == 4: interpolator = interpolators.interpol_4pn
    elif ndim == 5: interpolator = interpolators.interpol_5pn
    elif ndim == 6: interpolator = interpolators.interpol_6pn
    elif ndim >= 7: interpolator = interpolators.interpol_npn
    else:
        print 'Not implemented'
        return
    
    if ndim <= 6:
        if np.isfortran(lut): my_lut=lut
        else: my_lut=np.asfortranarray(lut)        
        def function(wo): 
            return interpolator(wo,my_lut)
    else: 
        if np.isfortran(lut): 
            flat_lut=np.asfortranarray(lut.reshape((-1,lut.shape[-1]) ,order='C'))
        else: 
            flat_lut=np.asfortranarray(lut.reshape((-1,lut.shape[-1]), order='C'))
        shape=np.array(lut.shape)
        size=shape[:-1].prod()
        def function(wo):
            return interpolator(wo,flat_lut,shape[0:-1],shape[-1])
    
    return function
示例#19
0
def run():
    print_complex = get_print_complex()
    convolutionCPU = get_convolution_cpu()
    check_results = get_check_results()

    #data = np.ones((3,3)).astype('complex64')
    data = np.asfortranarray(np.random.randn(3,3).astype('complex64'))
    #kernel = np.ones((3,3)).astype('complex64')
    kernel = np.asfortranarray(np.random.randn(3,3).astype('complex64'))
    result = np.asfortranarray(np.zeros_like(data).astype('complex64'))

    convolutionCPU(_get_float2_ptr(result), _get_float2_ptr(data), _get_float2_ptr(kernel), data.shape[1], data.shape[0], kernel.shape[1], kernel.shape[0], 1, 6)

    print
    print kernel
    print
    print data
    print

    s1 = np.array(data.shape)
    s2 = np.array(kernel.shape)

    print result
    print 
    print fftconvolve(data.real, kernel.real, mode='full').astype('complex64')
示例#20
0
 def test_singular_a(self):
     for b in [self.b_1dim, self.b_2dim]:
         for dtype in self.dtypes:
             a = np.asfortranarray(self.a_singular, dtype=dtype)
             b = np.asfortranarray(b, dtype=dtype)
             r = _numba_linalg_solve(a, b)
             ok_(r != 0)
示例#21
0
    def test_poisson_halide(self):
        """Halide Poisson norm test
        """
        if halide_installed():
            # Load image
            testimg_filename = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                                            'data', 'angela.jpg')
            img = Image.open(testimg_filename)
            np_img = np.asfortranarray(im2nparray(img))

            # Convert to gray
            np_img = np.mean(np_img, axis=2)

            # Test problem
            v = np_img
            theta = 0.5

            mask = np.asfortranarray(np.random.randn(*list(np_img.shape)).astype(np.float32))
            mask = np.maximum(mask, 0.)
            b = np_img * np_img

            # Output
            output = np.zeros_like(v)

            tic()
            Halide('prox_Poisson.cpp').prox_Poisson(v, mask, b, theta, output)  # Call
            print('Running took: {0:.1f}ms'.format(toc()))

            # Reference
            output_ref = 0.5 * (v - theta + np.sqrt((v - theta) * (v - theta) + 4 * theta * b))
            output_ref[mask <= 0.5] = v[mask <= 0.5]

            self.assertItemsAlmostEqual(output, output_ref)
示例#22
0
文件: warp.py 项目: PeterZs/ProxImaL
    def forward(self, inputs, outputs):
        """The forward operator.

        Reads from inputs and writes to outputs.
        """

        if self.implementation == Impl['halide']:

            # Halide implementation
            tmpin = np.asfortranarray(inputs[0].astype(np.float32))
            Halide('A_warp.cpp').A_warp(tmpin, self.Hinvf, self.tmpfwd)  # Call
            np.copyto(outputs[0], np.reshape(self.tmpfwd, self.shape))

        else:

            # CV2 version
            inimg = inputs[0]
            if len(self.H.shape) == 2:
                warpedInput = cv2.warpPerspective(np.asfortranarray(inimg), self.H.T,
                                                  inimg.shape[1::-1], flags=cv2.INTER_LINEAR,
                                                  borderMode=cv2.BORDER_CONSTANT, borderValue=0.)
                # Necessary due to array layout in opencv
                np.copyto(outputs[0], warpedInput)

            else:
                for j in range(self.H.shape[2]):
                    warpedInput = cv2.warpPerspective(np.asfortranarray(inimg),
                                                      self.H[:, :, j].T, inimg.shape[1::-1],
                                                      flags=cv2.INTER_LINEAR,
                                                      borderMode=cv2.BORDER_CONSTANT,
                                                      borderValue=0.)
                    # Necessary due to array layout in opencv

                    np.copyto(outputs[0][:, :, :, j], warpedInput)
示例#23
0
    def test_mask_halide(self):
        """Test mask lin op in halide.
        """
        if halide_installed():
            # Load image
            testimg_filename = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                                            'data', 'angela.jpg')
            # opens the file using Pillow - it's not an array yet
            img = Image.open(testimg_filename)
            np_img = np.asfortranarray(im2nparray(img))

            # Test problem
            output = np.zeros_like(np_img)
            mask = np.asfortranarray(np.random.randn(*list(np_img.shape)).astype(np.float32))
            mask = np.maximum(mask, 0.)

            Halide('A_mask.cpp').A_mask(np_img, mask, output)  # Call
            output_ref = mask * np_img

            # Transpose
            output_trans = np.zeros_like(np_img)
            Halide('At_mask.cpp').At_mask(np_img, mask, output_trans)  # Call

            self.assertItemsAlmostEqual(output, output_ref)
            self.assertItemsAlmostEqual(output_trans, output_ref)
示例#24
0
文件: api.py 项目: sashafrey/latex
    def runRandomWalking(self, w, nAlgs, nWalks = 1, nIters=-1, nErrorsLimit=-1, allowSimilar=False, pTransition=0.8,
                         randomSeed=0):
        RunRandomWalkingResult = collections.namedtuple('RunRandomWalkingResult', 'W isSource')
        nFeatures = w.shape[0]
        w0 = np.tile(w, (nWalks, 1))
        sessionStats = self.getStats()

        if sessionStats.nFeatures != nFeatures:
            raise Exception('sessionStats.nFeatures != w0.shape[1]')

        W = np.asfortranarray(np.zeros((nAlgs, nFeatures)).astype(np.float32))
        isSource = np.asfortranarray(np.zeros((nAlgs, 1)).astype(np.uint8))

        w0_p = w0.ctypes.data_as(self.lsPlugin.c_float_p)
        W_p = W.ctypes.data_as(self.lsPlugin.c_float_p)
        isSource_p = isSource.ctypes.data_as(self.lsPlugin.c_uint8_p)

        pTransition_p = (ctypes.c_float * 1)()
        pTransition_p[0] = pTransition;
        #pTransition.ctypes.data_as(self.c_float_p)
        nAlgs = self.lsPlugin.dll.runRandomWalking(self.sessionId, w0_p, nWalks, nAlgs, nIters, nErrorsLimit,
                                                   allowSimilar, pTransition_p, randomSeed, W_p, isSource_p)
        self.lsPlugin.verifyCall(nAlgs)

        return RunRandomWalkingResult(W, isSource)
示例#25
0
    def setup(self):
        self.p = numpy.array([[27, 51],
                              [66, 85],
                              [77, 45]])

        self.p3 = numpy.array([[27, 51, 37],
                               [66, 85, 25],
                               [77, 45, 73]])

        self.space = numpy.array((100, 100))
        self.space3 = numpy.array((100, 100, 100))
        self.radii = numpy.array((5, 6, 7))

        self.g = nanshe.syn.data.generate_hypersphere_masks(
            self.space, self.p, self.radii
        )

        self.g = self.g.reshape((self.g.shape[0], -1))
        self.g = self.g.transpose()
        self.g = numpy.asmatrix(self.g)
        self.g = numpy.asfortranarray(self.g)

        self.g3 = nanshe.syn.data.generate_hypersphere_masks(
            self.space3, self.p3, self.radii
        )

        self.g3 = self.g3.reshape((self.g3.shape[0], -1))
        self.g3 = self.g3.transpose()
        self.g3 = numpy.asmatrix(self.g3)
        self.g3 = numpy.asfortranarray(self.g3)
示例#26
0
文件: gp.py 项目: Dalar/GPy
    def _set_params(self, p):
        new_kern_params = p[:self.kern.num_params_transformed()]
        new_likelihood_params = p[self.kern.num_params_transformed():]
        old_likelihood_params = self.likelihood._get_params()

        self.kern._set_params_transformed(new_kern_params)
        self.likelihood._set_params_transformed(new_likelihood_params)

        self.K = self.kern.K(self.X)

        #Re fit likelihood approximation (if it is an approx), as parameters have changed
        if isinstance(self.likelihood, Laplace):
            self.likelihood.fit_full(self.K)

        self.K += self.likelihood.covariance_matrix

        self.Ki, self.L, self.Li, self.K_logdet = pdinv(self.K)

        # the gradient of the likelihood wrt the covariance matrix
        if self.likelihood.YYT is None:
            # alpha = np.dot(self.Ki, self.likelihood.Y)
            alpha, _ = dpotrs(self.L, self.likelihood.Y, lower=1)

            self.dL_dK = 0.5 * (tdot(alpha) - self.output_dim * self.Ki)
        else:
            # tmp = mdot(self.Ki, self.likelihood.YYT, self.Ki)
            tmp, _ = dpotrs(self.L, np.asfortranarray(self.likelihood.YYT), lower=1)
            tmp, _ = dpotrs(self.L, np.asfortranarray(tmp.T), lower=1)
            self.dL_dK = 0.5 * (tmp - self.output_dim * self.Ki)

        #Adding dZ_dK (0 for a non-approximate likelihood, compensates for
        #additional gradients of K when log-likelihood has non-zero Z term)
        self.dL_dK += self.likelihood.dZ_dK
示例#27
0
文件: var.py 项目: dismalpy/dismalpy
    def endog(self, value):
        self._endog = np.array(value, order='A')

        # (T x M)
        if (self.nobs, self.k_endog) == self._endog.shape:
            self._endog = self._endog.T
        # (M x T)
        elif (self.k_endog, self.nobs) == self._endog.shape:
            pass
        else:
            raise ValueError('Invalid endogenous array shape. Required'
                             '(%d, %d) or (%d, %d). Got %s'
                             % (self.nobs, self.k_endog, self.k_endog,
                                self.nobs, str(self._endog.shape)))

        if not self._endog.flags['F_CONTIGUOUS']:
                self._endog = np.asfortranarray(self._endog)

        # Create a new lag matrix, shaped (k_ar, nobs) = (k_ar, T)
        self._lagged = np.asfortranarray(np.hstack([
            self.endog[:, self.order-i:-i].T
            for i in range(1, self.order+1)
        ]).T)

        # Set calculation flags
        self._recalculate = True
 def test_matrix_multiply_ff(self):
     """matrix multiply two FORTRAN layout matrices"""
     a = np.asfortranarray(np.random.randn(M,N))
     b = np.asfortranarray(np.random.randn(N,K))
     res = gulinalg.matrix_multiply(a,b)
     ref = np.dot(a,b)
     assert_allclose(res, ref)
示例#29
0
def test_conjGrad():
    A = np.asfortranarray(np.random.normal(size = (5000,500)))
#*    np.random.seed(0)
#*    A = np.asfortranarray(np.random.normal(size = (10,5)))
    A = np.asfortranarray(np.dot(A.T,A),dtype=myfloat)
    b = np.ones((A.shape[1],),dtype=myfloat,order="FORTRAN")
    x0 = b
    tol = 1e-4
    itermax = int(0.5 * len(b))

    tic = time.time()
    for i in xrange(0,20):
        y1 = np.linalg.solve(A,b)
    tac = time.time()
    print "  Time (numpy): ", tac - tic
    x1 = np.abs(b - np.dot(A,y1))
    print "Mean error on b : %f" %(x1.sum() / b.shape[0])

    tic = time.time()
    for i in xrange(0,20):
        y2 = spams.conjGrad(A,b,x0,tol,itermax)
#*        y2 = spams.conjGrad(A,b)
    tac = time.time()
    print "  Time (spams): ", tac - tic
    x1 = np.dot(A,y2)
    x2 = np.abs(b - x1)
    print "Mean error on b : %f" %(x2.sum() / b.shape[0])

    err = abs(y1 - y2)
    return err.max()
def dictEval( X, D, param, lam=None, dsfactor=None, patchSize=None, patchFnGrp=None, kind='avg'):
    if dsfactor is not None:
        X_useme,dsz  = downsamplePatchList( X, patchSize, dsfactor, kind=kind )
        D_useme,Ddsz = downsamplePatchList( D, patchSize, dsfactor, kind=kind )

        if patchFnGrp:
            patchFnGrp.create_dataset('patchesDown', data=X_useme)
    else:
        X_useme = X
        D_useme = D

    if lam is None:
        lam = param['lambda1']

    alpha = spams.lasso( np.asfortranarray(X_useme), D = np.asfortranarray(D_useme), **param )
    Xre = ( D * alpha )

    if patchFnGrp:
        patchFnGrp.create_dataset('patchesRecon', data=Xre)

    xd = X - Xre 

    R = np.mean( (xd * xd).sum(axis=0))

    if lam > 0:
        print "   dictEval - lambda: ", lam
        R = R + lam * np.mean( np.abs(alpha).sum(axis=0))

    return R
示例#31
0
import pytest

from copy import deepcopy
import numpy as np

import tinybrain

image1x1x1 = np.array([[[[0]]]])
image1x1x1f = np.asfortranarray(image1x1x1)

image2x2x2 = np.array([[
    [[1], [1]],
    [[2], [2]],
], [
    [[1], [0]],
    [[0], [30]],
]])
image2x2x2f = np.asfortranarray(image2x2x2)

image3x3x3 = np.array([
    [  #z 0  1  2   
        [[1], [1], [1]],  # y=0
        [[1], [1], [1]],  # y=1      # x=0
        [[1], [1], [1]],  # y=2
    ],
    [
        [[2], [2], [2]],  # y=0 
        [[2], [2], [2]],  # y=1      # x=1
        [[2], [2], [2]],  # y=2
    ],
    [
示例#32
0
def enet_path(X,
              y,
              rho=0.5,
              eps=1e-3,
              n_alphas=100,
              alphas=None,
              precompute='auto',
              Xy=None,
              fit_intercept=True,
              normalize=False,
              copy_X=True,
              verbose=False,
              **params):
    """Compute Elastic-Net path with coordinate descent

    The Elastic Net optimization function is::

        1 / (2 * n_samples) * ||y - Xw||^2_2 +
        + alpha * rho * ||w||_1 + 0.5 * alpha * (1 - rho) * ||w||^2_2

    Parameters
    ----------
    X : numpy array of shape [n_samples, n_features]
        Training data. Pass directly as fortran contiguous data to avoid
        unnecessary memory duplication

    y : numpy array of shape [n_samples]
        Target values

    rho : float, optional
        float between 0 and 1 passed to ElasticNet (scaling between
        l1 and l2 penalties). rho=1 corresponds to the Lasso

    eps : float
        Length of the path. eps=1e-3 means that
        alpha_min / alpha_max = 1e-3

    n_alphas : int, optional
        Number of alphas along the regularization path

    alphas : numpy array, optional
        List of alphas where to compute the models.
        If None alphas are set automatically

    precompute : True | False | 'auto' | array-like
        Whether to use a precomputed Gram matrix to speed up
        calculations. If set to 'auto' let us decide. The Gram
        matrix can also be passed as argument.

    Xy : array-like, optional
        Xy = np.dot(X.T, y) that can be precomputed. It is useful
        only when the Gram matrix is precomputed.

    fit_intercept : bool
        Fit or not an intercept

    normalize : boolean, optional
        If True, the regressors X are normalized

    copy_X : boolean, optional, default True
        If True, X will be copied; else, it may be overwritten.

    verbose : bool or integer
        Amount of verbosity

    params : kwargs
        keyword arguments passed to the Lasso objects

    Returns
    -------
    models : a list of models along the regularization path

    Notes
    -----
    See examples/plot_lasso_coordinate_descent_path.py for an example.

    See also
    --------
    ElasticNet
    ElasticNetCV
    """
    X = as_float_array(X, copy_X)

    X_init = X
    X, y, X_mean, y_mean, X_std = LinearModel._center_data(X,
                                                           y,
                                                           fit_intercept,
                                                           normalize,
                                                           copy=False)
    X = np.asfortranarray(X)  # make data contiguous in memory
    n_samples, n_features = X.shape

    if X_init is not X and hasattr(precompute, '__array__'):
        precompute = 'auto'
    if X_init is not X and Xy is not None:
        Xy = None

    if 'precompute' is True or \
                ((precompute == 'auto') and (n_samples > n_features)):
        precompute = np.dot(X.T, X)

    if Xy is None:
        Xy = np.dot(X.T, y)

    n_samples = X.shape[0]
    if alphas is None:
        alpha_max = np.abs(Xy).max() / (n_samples * rho)
        alphas = np.logspace(np.log10(alpha_max * eps),
                             np.log10(alpha_max),
                             num=n_alphas)[::-1]
    else:
        alphas = np.sort(alphas)[::-1]  # make sure alphas are properly ordered
    coef_ = None  # init coef_
    models = []

    n_alphas = len(alphas)
    for i, alpha in enumerate(alphas):
        model = ElasticNet(alpha=alpha,
                           rho=rho,
                           fit_intercept=False,
                           precompute=precompute)
        model.set_params(**params)
        model.fit(X, y, coef_init=coef_, Xy=Xy)
        if fit_intercept:
            model.fit_intercept = True
            model._set_intercept(X_mean, y_mean, X_std)
        if verbose:
            if verbose > 2:
                print model
            elif verbose > 1:
                print 'Path: %03i out of %03i' % (i, n_alphas)
            else:
                sys.stderr.write('.')
        coef_ = model.coef_.copy()
        models.append(model)
    return models
示例#33
0
    def fit(self, X, y, Xy=None, coef_init=None):
        """Fit Elastic Net model with coordinate descent

        Parameters
        -----------
        X: ndarray, (n_samples, n_features)
            Data
        y: ndarray, (n_samples)
            Target
        Xy : array-like, optional
            Xy = np.dot(X.T, y) that can be precomputed. It is useful
            only when the Gram matrix is precomputed.
        coef_init: ndarray of shape n_features
            The initial coeffients to warm-start the optimization

        Notes
        -----

        Coordinate descent is an algorithm that considers each column of
        data at a time hence it will automatically convert the X input
        as a fortran contiguous numpy array if necessary.

        To avoid memory re-allocation it is advised to allocate the
        initial data in memory directly using that format.
        """
        # X and y must be of type float64
        X = np.asanyarray(X, dtype=np.float64)
        y = np.asarray(y, dtype=np.float64)

        n_samples, n_features = X.shape

        X_init = X
        X, y, X_mean, y_mean, X_std = self._center_data(X,
                                                        y,
                                                        self.fit_intercept,
                                                        self.normalize,
                                                        copy=self.copy_X)
        precompute = self.precompute
        if X_init is not X and hasattr(precompute, '__array__'):
            # recompute Gram
            # FIXME: it could be updated from precompute and X_mean
            # instead of recomputed
            precompute = 'auto'
        if X_init is not X and Xy is not None:
            Xy = None  # recompute Xy

        if coef_init is None:
            self.coef_ = np.zeros(n_features, dtype=np.float64)
        else:
            self.coef_ = coef_init

        alpha = self.alpha * self.rho * n_samples
        beta = self.alpha * (1.0 - self.rho) * n_samples

        X = np.asfortranarray(X)  # make data contiguous in memory

        # precompute if n_samples > n_features
        if hasattr(precompute, '__array__'):
            Gram = precompute
        elif precompute == True or \
               (precompute == 'auto' and n_samples > n_features):
            Gram = np.dot(X.T, X)
        else:
            Gram = None

        if Gram is None:
            self.coef_, self.dual_gap_, self.eps_ = \
                    cd_fast.enet_coordinate_descent(self.coef_, alpha, beta,
                                                    X, y, self.max_iter,
                                                    self.tol)
        else:
            if Xy is None:
                Xy = np.dot(X.T, y)
            self.coef_, self.dual_gap_, self.eps_ = \
                    cd_fast.enet_coordinate_descent_gram(self.coef_, alpha,
                                beta, Gram, Xy, y, self.max_iter, self.tol)

        self._set_intercept(X_mean, y_mean, X_std)

        if self.dual_gap_ > self.eps_:
            warnings.warn('Objective did not converge, you might want'
                          ' to increase the number of iterations')

        # return self for chaining fit and predict calls
        return self
示例#34
0
def compute_bench(samples_range, features_range):

    it = 0

    results = dict()
    lars = np.empty((len(features_range), len(samples_range)))
    lars_gram = lars.copy()
    omp = lars.copy()
    omp_gram = lars.copy()

    max_it = len(samples_range) * len(features_range)
    for i_s, n_samples in enumerate(samples_range):
        for i_f, n_features in enumerate(features_range):
            it += 1
            n_informative = n_features / 10
            print '===================='
            print 'Iteration %03d of %03d' % (it, max_it)
            print '===================='
            # dataset_kwargs = {
            #     'n_train_samples': n_samples,
            #     'n_test_samples': 2,
            #     'n_features': n_features,
            #     'n_informative': n_informative,
            #     'effective_rank': min(n_samples, n_features) / 10,
            #     #'effective_rank': None,
            #     'bias': 0.0,
            # }
            dataset_kwargs = {
                'n_samples': 1,
                'n_components': n_features,
                'n_features': n_samples,
                'n_nonzero_coefs': n_informative,
                'random_state': 0
            }
            print "n_samples: %d" % n_samples
            print "n_features: %d" % n_features
            y, X, _ = make_sparse_coded_signal(**dataset_kwargs)
            X = np.asfortranarray(X)

            gc.collect()
            print "benching lars_path (with Gram):",
            sys.stdout.flush()
            tstart = time()
            G = np.dot(X.T, X)  # precomputed Gram matrix
            Xy = np.dot(X.T, y)
            lars_path(X, y, Xy=Xy, Gram=G, max_iter=n_informative)
            delta = time() - tstart
            print "%0.3fs" % delta
            lars_gram[i_f, i_s] = delta

            gc.collect()
            print "benching lars_path (without Gram):",
            sys.stdout.flush()
            tstart = time()
            lars_path(X, y, Gram=None, max_iter=n_informative)
            delta = time() - tstart
            print "%0.3fs" % delta
            lars[i_f, i_s] = delta

            gc.collect()
            print "benching orthogonal_mp (with Gram):",
            sys.stdout.flush()
            tstart = time()
            orthogonal_mp(X, y, precompute_gram=True,
                          n_nonzero_coefs=n_informative)
            delta = time() - tstart
            print "%0.3fs" % delta
            omp_gram[i_f, i_s] = delta

            gc.collect()
            print "benching orthogonal_mp (without Gram):",
            sys.stdout.flush()
            tstart = time()
            orthogonal_mp(X, y, precompute_gram=False,
                          n_nonzero_coefs=n_informative)
            delta = time() - tstart
            print "%0.3fs" % delta
            omp[i_f, i_s] = delta

    results['time(LARS) / time(OMP)\n (w/ Gram)'] = (lars_gram / omp_gram)
    results['time(LARS) / time(OMP)\n (w/o Gram)'] = (lars / omp)
    return results
示例#35
0
def np_asfortranarray(arr):
    return np.asfortranarray(arr)
示例#36
0
def Forder(var):
    return np.asfortranarray(var.T, dtype=np.float64)
示例#37
0
from proximal.prox_fns import *
from proximal.algorithms import *

import cvxpy as cvx
import numpy as np
from scipy import ndimage

import argparse
import matplotlib.pyplot as plt
from PIL import Image
import cv2

# Load image
img = Image.open(
    './data/angela.jpg')  # opens the file using Pillow - it's not an array yet
x = np.asfortranarray(im2nparray(img))
x = np.mean(x, axis=2)
x = np.maximum(x, 0.0)

# Kernel
K = Image.open('./data/kernel_snake.png'
               )  # opens the file using Pillow - it's not an array yet
K = np.mean(np.asfortranarray(im2nparray(K)), axis=2)
K = np.maximum(cv2.resize(K, (15, 15), interpolation=cv2.INTER_LINEAR), 0)
K /= np.sum(K)

# Generate observation
sigma_noise = 0.01
b = ndimage.convolve(x, K,
                     mode='wrap') + sigma_noise * np.random.randn(*x.shape)
示例#38
0
def arma_innovations(endog,
                     ar_params=None,
                     ma_params=None,
                     sigma2=1,
                     normalize=False,
                     prefix=None):
    """
    Compute innovations using a given ARMA process.

    Parameters
    ----------
    endog : ndarray
        The observed time-series process, may be univariate or multivariate.
    ar_params : ndarray, optional
        Autoregressive parameters.
    ma_params : ndarray, optional
        Moving average parameters.
    sigma2 : ndarray, optional
        The ARMA innovation variance. Default is 1.
    normalize : bool, optional
        Whether or not to normalize the returned innovations. Default is False.
    prefix : str, optional
        The BLAS prefix associated with the datatype. Default is to find the
        best datatype based on given input. This argument is typically only
        used internally.

    Returns
    -------
    innovations : ndarray
        Innovations (one-step-ahead prediction errors) for the given `endog`
        series with predictions based on the given ARMA process. If
        `normalize=True`, then the returned innovations have been "whitened" by
        dividing through by the square root of the mean square error.
    innovations_mse : ndarray
        Mean square error for the innovations.
    """
    # Parameters
    endog = np.array(endog)
    squeezed = endog.ndim == 1
    if squeezed:
        endog = endog[:, None]

    ar_params = np.atleast_1d([] if ar_params is None else ar_params)
    ma_params = np.atleast_1d([] if ma_params is None else ma_params)

    nobs, k_endog = endog.shape
    ar = np.r_[1, -ar_params]
    ma = np.r_[1, ma_params]

    # Get BLAS prefix
    if prefix is None:
        prefix, dtype, _ = find_best_blas_type(
            [endog, ar_params, ma_params,
             np.array(sigma2)])
    dtype = prefix_dtype_map[prefix]

    # Make arrays contiguous for BLAS calls
    endog = np.asfortranarray(endog, dtype=dtype)
    ar_params = np.asfortranarray(ar_params, dtype=dtype)
    ma_params = np.asfortranarray(ma_params, dtype=dtype)
    sigma2 = dtype(sigma2).item()

    # Get the appropriate functions
    arma_transformed_acovf_fast = getattr(
        _arma_innovations, prefix + 'arma_transformed_acovf_fast')
    arma_innovations_algo_fast = getattr(_arma_innovations,
                                         prefix + 'arma_innovations_algo_fast')
    arma_innovations_filter = getattr(_arma_innovations,
                                      prefix + 'arma_innovations_filter')

    # Run the innovations algorithm for ARMA coefficients
    arma_acovf = arima_process.arma_acovf(ar, ma, sigma2=sigma2,
                                          nobs=nobs) / sigma2
    acovf, acovf2 = arma_transformed_acovf_fast(ar, ma, arma_acovf)
    theta, v = arma_innovations_algo_fast(nobs, ar_params, ma_params, acovf,
                                          acovf2)
    v = np.array(v)
    if (np.any(v < 0) or not np.isfinite(theta).all()
            or not np.isfinite(v).all()):
        # This is defensive code that is hard to hit
        raise ValueError(NON_STATIONARY_ERROR)

    # Run the innovations filter across each series
    u = []
    for i in range(k_endog):
        u_i = np.array(
            arma_innovations_filter(endog[:, i], ar_params, ma_params, theta))
        u.append(u_i)
    u = np.vstack(u).T
    if normalize:
        u /= v[:, None]**0.5

    # Post-processing
    if squeezed:
        u = u.squeeze()

    return u, v
示例#39
0
 def test_multiple(self):
     nodes = np.asfortranarray([[0.0, 1.0, 2.0, 3.0], [4.0, 4.5, 5.0, 5.5]])
     new_nodes = self._call_function_under_test(nodes)
     expected = np.asfortranarray([[0.0, 3.0], [4.0, 5.5]])
     self.assertEqual(expected, new_nodes)
示例#40
0
 def prep_array(array):
     return np.asfortranarray(array) if is_col_major else array
示例#41
0
 def test_linear(self):
     nodes = np.asfortranarray([[5.5, 5.5]])
     new_nodes = self._call_function_under_test(nodes)
     expected = np.asfortranarray([[5.5]])
     self.assertEqual(expected, new_nodes)
示例#42
0
 def test_no_reduce(self):
     nodes = np.asfortranarray([[0.0, -1.0, 1.0, -0.75],
                                [2.0, 0.0, 1.0, 1.625]])
     new_nodes = self._call_function_under_test(nodes)
     self.assertIs(new_nodes, nodes)
示例#43
0
 def test_to_quadratic(self):
     nodes = np.asfortranarray([[3.0, 2.0, 1.0, 0.0], [0.0, 2.0, 2.0, 0.0]])
     was_reduced, new_nodes = self._call_function_under_test(nodes)
     self.assertTrue(was_reduced)
     expected = np.asfortranarray([[3.0, 1.5, 0.0], [0.0, 3.0, 0.0]])
     self.assertEqual(expected, new_nodes)
示例#44
0
 def test_single(self):
     nodes = np.asfortranarray([[0.0, 2.0, 4.0, 6.0], [0.0, 4.0, 6.0, 6.0]])
     new_nodes = self._call_function_under_test(nodes)
     expected = np.asfortranarray([[0.0, 3.0, 6.0], [0.0, 6.0, 6.0]])
     self.assertEqual(expected, new_nodes)
示例#45
0
 def test_to_cubic(self):
     nodes = np.asfortranarray([[0.0, 0.75, 2.0, 2.75, 2.0]])
     result = self._call_function_under_test(nodes)
     expected = np.asfortranarray([[0.0, 1.0, 3.0, 2.0]])
     self.assertEqual(result, expected)
示例#46
0
 def test_from_cubic_not_elevated(self):
     nodes = np.asfortranarray([[0.0, -1.0, 1.0, -0.75],
                                [2.0, 0.0, 1.0, 1.625]])
     was_reduced, new_nodes = self._call_function_under_test(nodes)
     self.assertFalse(was_reduced)
     self.assertIs(new_nodes, nodes)
示例#47
0
 def test_to_constant(self):
     nodes = np.asfortranarray([[-2.0, -2.0], [1.0, 1.0]])
     result = self._call_function_under_test(nodes)
     expected = np.asfortranarray([[-2.0], [1.0]])
     self.assertEqual(result, expected)
示例#48
0
 def test_nodes_zero(self):
     nodes = np.asfortranarray([[0.0], [0.0]])
     result = self._call_function_under_test(nodes, nodes)
     self.assertEqual(result, 0.0)
示例#49
0
 def test_failure_on_invalid(self):
     nodes = np.asfortranarray([[0.0, -1.0, 1.0, -0.75],
                                [2.0, 0.0, 1.0, 1.625]])
     point = np.asfortranarray([[-0.25], [1.375]])
     with self.assertRaises(ValueError):
         self._call_function_under_test(nodes, point)
示例#50
0
 def test_to_linear(self):
     nodes = np.asfortranarray([[0.0, 1.0, 2.0], [0.0, 2.0, 4.0]])
     result = self._call_function_under_test(nodes)
     expected = np.asfortranarray([[0.0, 2.0], [0.0, 4.0]])
     self.assertEqual(result, expected)
示例#51
0
 def test_quadratic(self):
     s = 0.5
     nodes = np.asfortranarray([[0.0, 0.5, 1.0], [0.0, 1.0, 0.0]])
     tangent_vec = self._get_tangent_vec(s, nodes)
     result = self._call_function_under_test(nodes, tangent_vec, s)
     self.assertEqual(result, -4.0)
示例#52
0
 def test_outside_right(self):
     # Newton's method pushes the value slightly to the right of ``1.0``.
     nodes = np.asfortranarray([[0.0, 1.0, 2.0], [0.0, 1.0, 0.0]])
     point = np.asfortranarray([[2.0], [0.0]])
     result = self._call_function_under_test(nodes, point)
     self.assertEqual(result, 1.0)
示例#53
0
 def test_linear(self):
     nodes = np.asfortranarray([[0.0, 3.0], [0.0, 4.0]])
     length = self._call_function_under_test(nodes)
     self.assertEqual(length, 5.0)
示例#54
0
 def test_no_match(self):
     nodes = np.asfortranarray([[0.0, 0.5, 1.0], [0.0, 1.0, 0.0]])
     point = np.asfortranarray([[0.5], [2.0]])
     self.assertIsNone(self._call_function_under_test(nodes, point))
示例#55
0
 def test_quadratic(self):
     nodes = np.asfortranarray([[0.0, 2.0, 1.0], [0.0, 3.0, 6.0]])
     size = self._call_function_under_test(nodes, 0.5)
     self.assertEqual(size, 3.25)
示例#56
0
 def test_it(self):
     nodes = np.asfortranarray([[0.0, 3.0], [1.0, 5.0]])
     result = self._call_function_under_test(nodes, 0.25, 0.75)
     self.assertEqual(result, np.asfortranarray([[2.25], [4.0]]))
示例#57
0
    def test_binomial_roundoff(self):
        s_vals = np.asfortranarray([0.5])
        degree = 55
        nodes = np.eye(degree + 1, order="F")

        expected = np.asfortranarray([
            1.0,
            55.0,
            1485.0,
            26235.0,
            341055.0,
            3478761.0,
            28989675.0,
            202927725.0,
            1217566350.0,
            6358402050.0,
            29248649430.0,
            119653565850.0,
            438729741450.0,
            1451182990950.0,
            4353548972850.0,
            11899700525790.0,
            29749251314475.0,
            68248282427325.0,
            144079707346575.0,
            280576272201225.0,
            505037289962205.0,
            841728816603675.0,
            1300853625660225.0,
            1866442158555975.0,
            2488589544741300.0,
            3085851035479212.0,
            3560597348629860.0 - 0.5,
            3824345300380220.0 - 0.5,
            3824345300380220.0 - 0.5,
            3560597348629860.0 - 0.5,
            3085851035479212.0 - 0.5,
            2488589544741300.0 - 0.5,
            1866442158555974.0 + 0.5,
            1300853625660225.0 - 0.5**2,
            841728816603675.0 - 0.5**3,
            505037289962205.0 - 0.5**4,
            280576272201225.0 - 0.5**4,
            144079707346575.0 - 0.5**5,
            68248282427325.0 - 0.5**6,
            29749251314475.0 - 0.5**7,
            11899700525790.0 - 0.5**8,
            4353548972850.0 - 3 * 0.5**11,
            1451182990950.0 - 0.5**11,
            438729741450.0 - 3 * 0.5**14,
            119653565850.0 - 3 * 0.5**16,
            29248649430.0 - 3 * 0.5**18,
            6358402050.0 - 3 * 0.5**20,
            1217566350.0 - 0.5**21,
            202927725.0 - 3 * 0.5**25,
            28989675.0 - 0.5**26,
            3478761.0 - 0.5**29,
            341055.0 - 3 * 0.5**34,
            26235.0 - 0.5**36,
            1485.0 - 0.5**40,
            55.0 - 5 * 0.5**47,
            1.0,
        ], )
        evaluated = self._call_function_under_test(nodes, s_vals)
        binomial_coefficients = evaluated.flatten() * 2.0**degree
        self.assertEqual(expected, binomial_coefficients)
示例#58
0
 def test_degree_zero(self):
     nodes = np.asfortranarray([[0.0], [0.0]])
     length = self._call_function_under_test(nodes)
     self.assertEqual(length, 0.0)
示例#59
0
 def test_quadratic(self):
     nodes = np.asfortranarray([[0.0, 4.0, 7.0], [1.0, 6.0, 3.0]])
     expected_l = np.asfortranarray([[0.0, 2.0, 3.75], [1.0, 3.5, 4.0]])
     expected_r = np.asfortranarray([[3.75, 5.5, 7.0], [4.0, 4.5, 3.0]])
     self._helper(nodes, expected_l, expected_r)
示例#60
0
 def test_linear(self):
     nodes = np.asfortranarray([[0.0, 3.0], [0.0, -4.0]])
     size = self._call_function_under_test(nodes, 0.25)
     self.assertEqual(size, 0.25 * 5.0)