Example #1
0
File: coo.py Project: sugiki/scipy
    def getnnz(self, axis=None):
        """Get the count of explicitly-stored values (nonzeros)

        Parameters
        ----------
        axis : None, 0, or 1
            Select between the number of values across the whole matrix, in
            each column, or in each row.
        """
        if axis is None:
            nnz = len(self.data)
            if nnz != len(self.row) or nnz != len(self.col):
                raise ValueError('row, column, and data array must all be the '
                                 'same length')

            if np.rank(self.data) != 1 or np.rank(self.row) != 1 or \
               np.rank(self.col) != 1:
                raise ValueError('row, column, and data arrays must have '
                                 'rank 1')

            return int(nnz)

        if axis < 0:
            axis += 2
        if axis == 0:
            return _compat_bincount(downcast_intp_index(self.col),
                                    minlength=self.shape[1])
        elif axis == 1:
            return _compat_bincount(downcast_intp_index(self.row),
                                    minlength=self.shape[0])
        else:
            raise ValueError('axis out of bounds')
Example #2
0
    def check_format(self, full_check=True):
        """check whether the matrix format is valid

        Parameters
        ==========

            - full_check : {bool}
                - True  - rigorous check, O(N) operations : default
                - False - basic check, O(1) operations

        """
        # use _swap to determine proper bounds
        major_name,minor_name = self._swap(('row','column'))
        major_dim,minor_dim = self._swap(self.shape)

        # index arrays should have integer data types
        if self.indptr.dtype.kind != 'i':
            warn("indptr array has non-integer dtype (%s)"
                    % self.indptr.dtype.name)
        if self.indices.dtype.kind != 'i':
            warn("indices array has non-integer dtype (%s)"
                    % self.indices.dtype.name)

        # only support 32-bit ints for now
        self.indptr = np.asarray(self.indptr, dtype=np.intc)
        self.indices = np.asarray(self.indices, dtype=np.intc)
        self.data = to_native(self.data)

        # check array shapes
        if np.rank(self.data) != 1 or np.rank(self.indices) != 1 or np.rank(self.indptr) != 1:
            raise ValueError('data, indices, and indptr should be rank 1')

        # check index pointer
        if (len(self.indptr) != major_dim + 1):
            raise ValueError("index pointer size (%d) should be (%d)" %
                                (len(self.indptr), major_dim + 1))
        if (self.indptr[0] != 0):
            raise ValueError("index pointer should start with 0")

        # check index and data arrays
        if (len(self.indices) != len(self.data)):
            raise ValueError("indices and data should have the same size")
        if (self.indptr[-1] > len(self.indices)):
            raise ValueError("Last value of index pointer should be less than "
                                "the size of index and data arrays")

        self.prune()

        if full_check:
            # check format validity (more expensive)
            if self.nnz > 0:
                if self.indices.max() >= minor_dim:
                    raise ValueError("%s index values must be < %d" %
                                        (minor_name,minor_dim))
                if self.indices.min() < 0:
                    raise ValueError("%s index values must be >= 0" %
                                        minor_name)
                if np.diff(self.indptr).min() < 0:
                    raise ValueError("index pointer values must form a "
                                        "non-decreasing sequence")
Example #3
0
def calc( absphot_vals_target, absphot_errs_target, absphot_vals_comparisons, absphot_errs_comparisons ):
    """
    Calculates relative fluxes given target and comparison flux time values. Also calculates the
    propagated formal uncertainty using the formal errors on each of the flux values.

    The required inputs are:
      ** absphot_vals_target - N-length np.array containing the absolute fluxes of the target star.
      ** absphot_errs_target - N-length np.array containing the error bars on the absolute fluxes of
         the target star.
      ** absphot_vals_comparisons - NxM np.array containing the absolute fluxes of M comparison stars.
      ** absphot_errs_comparisons - NxM np.array containing the error bars on the absolute fluxes of
         M comparison stars.

    All error calculation is done using standard uncertainty propagation assuming independence
    between points and quadrature sums.

    The function is defined in this way so that it's possible to have more flexibility in
    experimenting with different combinations of comparison stars elsewhere (eg. in other routines)
    if necessary.
    """
    
    # If there's only one comparison, just make sure everything is in the correct format:
    if ( np.rank( absphot_vals_comparisons )==1 ):
        absphot_vals_comparisons = np.reshape( absphot_vals_comparisons, [ len( absphot_vals_comparisons ), 1 ] )
    if ( np.rank( absphot_errs_comparisons )==1 ):
        absphot_errs_comparisons = np.reshape( absphot_errs_comparisons, [ len( absphot_errs_comparisons ), 1 ] )
    comparisons_sum_vals = np.sum( absphot_vals_comparisons, axis=1 )
    comparisons_sum_errs_sq = np.sum( absphot_errs_comparisons**2., axis=1 )
    comparisons_sum_errs = np.sqrt( comparisons_sum_errs_sq )
    relphot_vals = absphot_vals_target.flatten() / comparisons_sum_vals.flatten()
    relphot_errs = relphot_vals * np.sqrt( ( absphot_errs_target.flatten() / absphot_vals_target.flatten() )**2. \
                                        + ( comparisons_sum_errs.flatten() / comparisons_sum_vals.flatten() )**2. )
    return relphot_vals, relphot_errs
Example #4
0
def _format_for_gam2(count, time, pos):
    assert np.rank(count) == np.rank(time) == (np.rank(pos) - 1)
    assert count.shape[0] == time.shape[0] == pos.shape[0]
    assert (count.shape[1] + 1) == time.shape[1] == pos.shape[1]

    y = count.flatten()[:,None]
    npt = y.size
    tax, spax = 1, -1
    
    # don't use real time as won't compare equivalent portions of trials
    # t  = edge2cen(time, axis=tax) # (ntrial, nbin)
    # subtract offset to get all relative times
    #t = (t - t[:,:1]).flatten()[:,None]
    
    # instead use bin numbers
    ntrial, nbin = count.shape
    t = np.tile(np.arange(nbin, dtype=float)[None], (ntrial, 1))
    t = t.flatten()[:,None]

    # also create trial numbers for pulling out appropriate later on
    # these don't correspond to original trial numbers because original trials
    # have been permuted before getting here
    tr = np.tile(np.arange(ntrial), (nbin, 1)).T.flatten()
    
    d  = kin.get_dir(pos, tax=tax, spax=spax).reshape(npt, 3)
    p  = edge2cen(pos, axis=tax).reshape(npt, 3)
    v  = kin.get_vel(pos, time, tax=tax, spax=spax).reshape(npt, 3)
    sp = kin.get_speed(pos, time, tax=tax, spax=spax).flatten()[:,None]

    # q is a second direction set-of-columns for deviance calculation
    q  = kin.get_dir(pos, tax=tax, spax=spax).reshape(npt, 3)
    return np.concatenate([y,t,d,p,v,sp,q], axis=1), tr
Example #5
0
	def __init__(self,init_pos,init_measurement=[[]],init_weight=1,movement_weight=1,measurement_weight=1):
		if rank(init_pos)==0:
			init_pos=[init_pos]
		self.num_dim=len(init_pos)
		while (rank(init_measurement)<2):
			init_measurement=[init_measurement]
		if init_measurement==[[]]:
			self.num_landmarks=0
		else:
			self.num_landmarks=len(init_measurement[0])
		self.matrix_dim=self.num_dim+self.num_landmarks
		self.omega=zeros([self.matrix_dim,self.matrix_dim])
		self.xi=zeros([self.matrix_dim,1])
		self.init_weight=init_weight
		self.movement_weight=movement_weight
		self.measurement_weight=measurement_weight
		# initial position
		for i in range(self.num_dim):
			self.omega[i][i]+=self.init_weight
			self.xi[i][0]=init_pos[i]
			for j in range(self.num_landmarks):
				if init_measurement[i][j]!=None:
					self.omega[i][i]+=measurement_weight
					self.omega[i][self.num_dim+j]-=measurement_weight
					self.omega[self.num_dim+j][i]-=measurement_weight
					self.omega[self.num_dim+j][self.num_dim+j]+=measurement_weight
					self.xi[i][0]-=measurement_weight*init_measurement[i][j]
					self.xi[self.num_dim+j][0]+=measurement_weight*init_measurement[i][j]
def orientation_product(T, Bb):
    """Computes the product of a tensor and a vector.

    Assumptions:
    None

    Source:
    N/A

    Inputs:
    T         [-] 3-dimensional array with rotation matrix
                  patterned along dimension zero
    Bb        [-] 3-dimensional vector

    Outputs:
    C         [-] transformed vector

    Properties Used:
    N/A
    """

    assert np.rank(T) == 3

    if np.rank(Bb) == 3:
        C = np.einsum('aij,ajk->aik', T, Bb)
    elif np.rank(Bb) == 2:
        C = np.einsum('aij,aj->ai', T, Bb)
    else:
        raise Exception('bad B rank')

    return C
Example #7
0
def coordinate_reverse_transform(old_coord_data, iapp):
	# Translate, then transform
	transformed = np.array([[1.245218544034379,-0.7978622303790213],[-0.3779055518316757,1.4298048589659018]])
	transformed = linalg.inv(transformed)

	if len(old_coord_data)==2 and np.rank(old_coord_data)==1:
		old_coord_data = np.dot(transformed, old_coord_data)
		old_coord_data[0] = old_coord_data[0] + 0.4 - 2*iapp
		old_coord_data[1] = old_coord_data[1] + 0.15 - iapp
		return old_coord_data

	elif old_coord_data.shape[1] == 2 and np.rank(old_coord_data)==2:
		for i in range(len(old_coord_data)):
			old_coord_data[i] = np.dot(transformed, old_coord_data[i])
		old_coord_data[:,0] = old_coord_data[:,0] + 0.4 - 2*iapp
		old_coord_data[:,1] = old_coord_data[:,1] + 0.15 - iapp
		return old_coord_data

	elif old_coord_data.shape[0] == 2 and np.rank(old_coord_data)==2:
		for i in range(len(old_coord_data[0])):
			old_coord_data[:,i] = np.dot(transformed, old_coord_data[:,i])
		old_coord_data[0] = old_coord_data[0] + 0.4 - 2*iapp
		old_coord_data[1] = old_coord_data[1] + 0.15 - iapp
		return old_coord_data

	else:
		raise Exception("couldn't find appropriate manipulation for", old_coord_data.shape)
Example #8
0
    def check_format(self, full_check=True):
        """check whether the matrix format is valid

            *Parameters*:
                full_check:
                    True  - rigorous check, O(N) operations : default
                    False - basic check, O(1) operations

        """
        M, N = self.shape
        R, C = self.blocksize

        # index arrays should have integer data types
        if self.indptr.dtype.kind != 'i':
            warn("indptr array has non-integer dtype (%s)" \
                    % self.indptr.dtype.name )
        if self.indices.dtype.kind != 'i':
            warn("indices array has non-integer dtype (%s)" \
                    % self.indices.dtype.name )

        # only support 32-bit ints for now
        self.indptr = np.asarray(self.indptr, np.intc)
        self.indices = np.asarray(self.indices, np.intc)
        self.data = to_native(self.data)

        # check array shapes
        if np.rank(self.indices) != 1 or np.rank(self.indptr) != 1:
            raise ValueError, "indices, and indptr should be rank 1"
        if np.rank(self.data) != 3:
            raise ValueError, "data should be rank 3"

        # check index pointer
        if (len(self.indptr) != M / R + 1):
            raise ValueError, \
                "index pointer size (%d) should be (%d)" % \
                 (len(self.indptr), M/R + 1)
        if (self.indptr[0] != 0):
            raise ValueError, "index pointer should start with 0"

        # check index and data arrays
        if (len(self.indices) != len(self.data)):
            raise ValueError, "indices and data should have the same size"
        if (self.indptr[-1] > len(self.indices)):
            raise ValueError, \
                  "Last value of index pointer should be less than "\
                  "the size of index and data arrays"

        self.prune()

        if full_check:
            #check format validity (more expensive)
            if self.nnz > 0:
                if self.indices.max() >= N / C:
                    print "max index", self.indices.max()
                    raise ValueError, "column index values must be < %d" % (N /
                                                                            C)
                if self.indices.min() < 0:
                    raise ValueError, "column index values must be >= 0"
                if diff(self.indptr).min() < 0:
                    raise ValueError, 'index pointer values must form a " \
Example #9
0
def find_duplicates_others(source_array, other_array):
    """find indices of duplicate values in src_array against other_array
    source_array - numpy array to be checked
    other_array - numpy array to be checked againt, must have compatiable shape[0]
                   with src_array
    """

    if other_array is None or len(other_array) == 0:
        return zeros(source_array.shape, dtype="int32")

    if other_array.shape[0] <> source_array.shape[0]:
        raise ValueError, "Arrays have incompatible shapes"
    source_array_rank = rank(source_array)
    if rank(source_array) < rank(other_array):
        src_array = source_array[:, newaxis]
        oth_array = other_array
    elif rank(source_array) > rank(other_array):
        src_array = source_array
        oth_array = other_array[:, newaxis]

    is_duplicates = equal(src_array, oth_array)
    duplicate_indicator = sum(is_duplicates, axis=1)

#    if duplicate_indicator.ndim > source_array_rank:
#        reshape(duplicate_indicator, shape=(src_array.size,))
    return duplicate_indicator
Example #10
0
    def load_h5(self):

        # Read from file

        #with gfile.Open(self.filename) as h5_file:
        f = h5py.File(self.filename, 'r')

        dataT = f[u'/valD']
        targetT = f[u'/valL']
        print(np.rank(dataT))
        if np.rank(dataT) == 2:
            print(dataT.shape)
            self.is_long = True
            self.n_seg = dataT.shape[0] // self.config.segsize
            dataT = dataT[:self.n_seg * self.config.segsize, :]
            targetT = targetT[:self.n_seg * self.config.segsize, :]
            self.num_batches = np.floor(
                targetT.shape[0] /
                (self.config.eval_nseg_atonce * self.config.segsize))
            print(dataT.shape)

            self.batch_size = 1
            dataT = np.expand_dims(dataT, 0)
            targetT = np.expand_dims(targetT, 0)
        else:
            self.is_long = False
            self.batch_size = self.config.batch_size
            self.num_batches = dataT.shape[0] // self.batch_size

        print(dataT.shape)
        print(targetT.shape)

        return Dataset(data=dataT, target=targetT)
Example #11
0
def project_scores_to_var_space(score, weight, data):
    '''
    Project reduced scores, via reduced weights, up to neuron space    
    
    Parameters
    ----------
    score : ndarray
      shape (npc, nobs), i.e. (nscore, ntask [* nrep] * nbin)
    weight : ndarray
      shape (npc, nvar), i.e. (nscore, nunit)
    data : ndarray
      shape (nvar, nobs), data from which to get mean
      
    Returns
    -------
    projected : ndarray
      shape (nvar, nobs), i.e. (nunit, ntask * nbin)
    '''
    assert np.rank(score) == np.rank(weight) == np.rank(data) == 2
    assert score.shape[0] == weight.shape[0] # npc
    assert score.shape[1] == data.shape[1]   # nobs
    assert weight.shape[1] == data.shape[0]  # nvar
    
    # take average over observations
    mean = stats.nanmean(data, axis=1)
    return (np.dot(weight.T, score) + mean[:,None])
Example #12
0
def orientation_product(T,Bb):
    """Computes the product of a tensor and a vector.

    Assumptions:
    None

    Source:
    N/A

    Inputs:
    T         [-] 3-dimensional array with rotation matrix
                  patterned along dimension zero
    Bb        [-] 3-dimensional vector

    Outputs:
    C         [-] transformed vector

    Properties Used:
    N/A
    """            
    
    assert np.rank(T) == 3
    
    if np.rank(Bb) == 3:
        C = np.einsum('aij,ajk->aik', T, Bb )
    elif np.rank(Bb) == 2:
        C = np.einsum('aij,aj->ai', T, Bb )
    else:
        raise Exception , 'bad B rank'
        
    return C
Example #13
0
	def __init__(self,init_pos,init_meas,\
	init_weight=1,move_weight=1,meas_weight=1):
		"""
		sets up properties for the algorithm
		sets initial position and takes measurements there
		"""
		# allows for a variety of formatted input
		while rank(init_pos)<1:
			init_pos=[init_pos]
		while rank(init_meas)<1:
			init_meas=[init_meas]
		if rank(init_meas)==1:
			for i in range(len(init_meas)):
				if init_meas[i]!=None:
					init_meas[i]=[init_meas[i]]
		# set up properties for the algorithm
		self.num_dim=len(init_pos)
		self.num_landmarks=len(init_meas)
		self.matrix_dim=self.num_dim+self.num_landmarks
		self.omega=zeros([self.matrix_dim,self.matrix_dim])
		self.xi=zeros([self.matrix_dim,1])
		self.init_weight=init_weight
		self.move_weight=move_weight
		self.meas_weight=meas_weight
		# sets initial position
		self.setPosition(init_pos)
		# takes measurements at initial position
		self.measure(init_meas)
Example #14
0
    def check_format(self, full_check=True):
        """check whether the matrix format is valid

        Parameters
        ==========

            - full_check : {bool}
                - True  - rigorous check, O(N) operations : default
                - False - basic check, O(1) operations

        """
        # use _swap to determine proper bounds
        major_name,minor_name = self._swap(('row','column'))
        major_dim,minor_dim = self._swap(self.shape)

        # index arrays should have integer data types
        if self.indptr.dtype.kind != 'i':
            warn("indptr array has non-integer dtype (%s)"
                    % self.indptr.dtype.name)
        if self.indices.dtype.kind != 'i':
            warn("indices array has non-integer dtype (%s)"
                    % self.indices.dtype.name)

        # only support 32-bit ints for now
        self.indptr = np.asarray(self.indptr, dtype=np.intc)
        self.indices = np.asarray(self.indices, dtype=np.intc)
        self.data = to_native(self.data)

        # check array shapes
        if np.rank(self.data) != 1 or np.rank(self.indices) != 1 or np.rank(self.indptr) != 1:
            raise ValueError('data, indices, and indptr should be rank 1')

        # check index pointer
        if (len(self.indptr) != major_dim + 1):
            raise ValueError("index pointer size (%d) should be (%d)" %
                                (len(self.indptr), major_dim + 1))
        if (self.indptr[0] != 0):
            raise ValueError("index pointer should start with 0")

        # check index and data arrays
        if (len(self.indices) != len(self.data)):
            raise ValueError("indices and data should have the same size")
        if (self.indptr[-1] > len(self.indices)):
            raise ValueError("Last value of index pointer should be less than "
                                "the size of index and data arrays")

        self.prune()

        if full_check:
            # check format validity (more expensive)
            if self.nnz > 0:
                if self.indices.max() >= minor_dim:
                    raise ValueError("%s index values must be < %d" %
                                        (minor_name,minor_dim))
                if self.indices.min() < 0:
                    raise ValueError("%s index values must be >= 0" %
                                        minor_name)
                if np.diff(self.indptr).min() < 0:
                    raise ValueError("index pointer values must form a "
                                        "non-decreasing sequence")
Example #15
0
def cumtrapz(var,z,inv=False):
    
    varint = np.zeros((var.shape[0],var.shape[1],var.shape[2]))
    
    if np.rank(z)==0:
    
        if inv:
            varint[:,:,1:] = integrate.cumtrapz(var*z,axis=2)
        else:
            varint[:,:,:-1] = integrate.cumtrapz(var[:,:,::-1]*z[:,:,::-1],axis=2)[:,:,::-1]
            
    elif np.rank(z)==1:
    
        for i in range(var.shape[0]):
            for j in range(var.shape[1]):
                var[i,j,:] = var[i,j,:]*z
                
        if inv:
            varint[:,:,1:] = integrate.cumtrapz(var,axis=2)
        else:
            varint[:,:,:-1] = integrate.cumtrapz(var[:,:,::-1],axis=2)[:,:,::-1]           
            
    elif np.rank(z)==3:
    
        if inv:
            varint[:,:,1:] = integrate.cumtrapz(var*z,axis=2)
        else:
            varint[:,:,:-1] = integrate.cumtrapz(var[:,:,::-1]*z[:,:,::-1],axis=2)[:,:,::-1]
            
    return varint
Example #16
0
def coordinate_transform(old_coord_data, iapp):
	# Translate, then transform
	transformed = np.array([[1.245218544034379,-0.7978622303790213],[-0.3779055518316757,1.4298048589659018]])

	if len(old_coord_data)==2 and np.rank(old_coord_data)==1:
		new_coord = np.zeros([2,1])
		#print new_coord
		#print old_coord_data
		new_coord[0] = old_coord_data[0] - 0.4 + 2*iapp
		new_coord[1] = old_coord_data[1] - 0.15 + iapp
		new_coord_final = np.dot(transformed, np.array([new_coord[0], new_coord[1]]))
		return new_coord_final

	elif old_coord_data.shape[1] == 2 and np.rank(old_coord_data)==2:
		old_coord_data[:,0] = old_coord_data[:,0] - 0.4 + 2*iapp
		old_coord_data[:,1] = old_coord_data[:,1] - 0.15 + iapp
		for i in range(len(old_coord_data)):
			old_coord_data[i] = np.dot(transformed, old_coord_data[i])
		return old_coord_data

	elif old_coord_data.shape[0] == 2 and np.rank(old_coord_data)==2:
		old_coord_data[0] = old_coord_data[0] - 0.4 + 2*iapp
		old_coord_data[1] = old_coord_data[1] - 0.15 + iapp
		for i in range(len(old_coord_data[0])):
			old_coord_data[:,i] = np.dot(transformed, old_coord_data[:,i])
		return old_coord_data			

	else:
		raise Exception("couldn't find appropriate manipulation for", old_coord_data.shape)
def find_duplicates_others(source_array, other_array):
    """find indices of duplicate values in src_array against other_array
    source_array - numpy array to be checked
    other_array - numpy array to be checked againt, must have compatiable shape[0]
                   with src_array
    """

    if other_array is None or len(other_array) == 0:
        return zeros(source_array.shape, dtype="int32")

    if other_array.shape[0] <> source_array.shape[0]:
        raise ValueError, "Arrays have incompatible shapes"
    source_array_rank = rank(source_array)
    if rank(source_array) < rank(other_array):
        src_array = source_array[:, newaxis]
        oth_array = other_array
    elif rank(source_array) > rank(other_array):
        src_array = source_array
        oth_array = other_array[:, newaxis]

    is_duplicates = equal(src_array, oth_array)
    duplicate_indicator = sum(is_duplicates, axis=1)

    #    if duplicate_indicator.ndim > source_array_rank:
    #        reshape(duplicate_indicator, shape=(src_array.size,))
    return duplicate_indicator
Example #18
0
def cube_array_search(k_face_array, k_faces):
    """
    Find the row indices (of s) corresponding to the
    cubes stored in the rows of cube array v.
    It is assumed that the rows of s are sorted in
    lexicographical order.

    Example:

      k_face_array = array([[0,0,0],[0,0,1],[0,1,0],[1,0,1]])
      k_faces = array([[0,1,0],[0,0,1]])
      cube_array_searchsorted(k_face_array,k_faces)

    Returns:

      array([2,1])

    """
    if rank(k_face_array) != 2 or rank(k_faces) != 2:
        raise ValueError, 'expected rank 2 arrays'

    if k_face_array.shape[1] != k_faces.shape[1]:
        raise ValueError, 'number of columns must agree'

    # a dense array used to lookup k_face_array row indices
    lookup_grid_dimensions = k_face_array.max(axis=0) + 1

    lookup_grid = empty(lookup_grid_dimensions, dtype=k_faces.dtype)
    lookup_grid[:] = -1
    lookup_grid[hsplit(k_face_array, k_face_array.shape[1])] = arange(
        k_face_array.shape[0], dtype=k_faces.dtype).reshape((-1, 1))
    row_indices = lookup_grid[hsplit(k_faces, k_faces.shape[1])].reshape((-1))

    return row_indices
Example #19
0
    def getnnz(self, axis=None):
        """Get the count of explicitly-stored values (nonzeros)

        Parameters
        ----------
        axis : None, 0, or 1
            Select between the number of values across the whole matrix, in
            each column, or in each row.
        """
        if axis is None:
            nnz = len(self.data)
            if nnz != len(self.row) or nnz != len(self.col):
                raise ValueError('row, column, and data array must all be the '
                                 'same length')

            if np.rank(self.data) != 1 or np.rank(self.row) != 1 or \
               np.rank(self.col) != 1:
                raise ValueError('row, column, and data arrays must have '
                                 'rank 1')

            return int(nnz)

        if axis < 0:
            axis += 2
        if axis == 0:
            return _compat_bincount(downcast_intp_index(self.col),
                                    minlength=self.shape[1])
        elif axis == 1:
            return _compat_bincount(downcast_intp_index(self.row),
                                    minlength=self.shape[0])
        else:
            raise ValueError('axis out of bounds')
Example #20
0
def cube_array_search(k_face_array,k_faces):
    """
    Find the row indices (of s) corresponding to the
    cubes stored in the rows of cube array v.
    It is assumed that the rows of s are sorted in
    lexicographical order.

    Example:

      k_face_array = array([[0,0,0],[0,0,1],[0,1,0],[1,0,1]])
      k_faces = array([[0,1,0],[0,0,1]])
      cube_array_searchsorted(k_face_array,k_faces)

    Returns:

      array([2,1])

    """
    if rank(k_face_array) != 2 or rank(k_faces) != 2:
        raise ValueError,'expected rank 2 arrays'

    if k_face_array.shape[1] != k_faces.shape[1]:
        raise ValueError,'number of columns must agree'

    # a dense array used to lookup k_face_array row indices 
    lookup_grid_dimensions = k_face_array.max(axis=0) + 1
    
    lookup_grid = empty(lookup_grid_dimensions,dtype=k_faces.dtype)
    lookup_grid[:] = -1
    lookup_grid[hsplit(k_face_array,k_face_array.shape[1])] = arange(k_face_array.shape[0],dtype=k_faces.dtype).reshape((-1,1))
    row_indices = lookup_grid[hsplit(k_faces,k_faces.shape[1])].reshape((-1))

    return row_indices
def coordinate_transform(old_coord_data, iapp=None):
	# Translate, then transform
	assert (type(iapp) is float or type(iapp) is np.float64 or iapp == None)
	saddle = saddle_point(iapp)
	eigenvectors = get_eigenvectors(saddle, iapp)

	eigenvectors[:,0] = eigenvectors[:,0]/linalg.norm(eigenvectors[:,0])
	eigenvectors[:,1] = eigenvectors[:,1]/linalg.norm(eigenvectors[:,1])

	raw_matrix = eigenvectors
	transformed = linalg.inv(raw_matrix)

	if len(old_coord_data)==2 and np.rank(old_coord_data)==1:
		new_coord = np.zeros([2,1])
		new_coord[0] = old_coord_data[0] - saddle[0]
		new_coord[1] = old_coord_data[1] - saddle[1]
		new_coord_final = np.dot(transformed, np.array([new_coord[0], new_coord[1]]))
		return new_coord_final


	elif old_coord_data.shape[0] == 2 and np.rank(old_coord_data)==2:
		old_coord_data[0] = old_coord_data[0] - saddle[0]
		old_coord_data[1] = old_coord_data[1] - saddle[1]
		for i in range(len(old_coord_data[0])):
			old_coord_data[:,i] = np.dot(transformed, old_coord_data[:,i])
		return old_coord_data

	elif old_coord_data.shape[1] == 2 and np.rank(old_coord_data)==2:
		old_coord_data[:,0] = old_coord_data[:,0] - saddle[0]
		old_coord_data[:,1] = old_coord_data[:,1] - saddle[1]
		for i in range(len(old_coord_data)):
			old_coord_data[i] = np.dot(transformed, old_coord_data[i])
		return old_coord_data
	else:
		raise Exception("couldn't find appropriate manipulation for", old_coord_data.shape)
Example #22
0
def get_vel(pos, time, tax=0, spax=-1):
    ''' Get instantaneous velocity

    Parameters
    ----------
    time : array_like

    pos : array_like

    tax : int, optional
      time axis, defaults to 0
      has to be suitable for both pos and time,
      so probably needs to be +ve, i.e. indexed from beginning
    spax : int, optional
      space axis in pos, defaults to -1, i.e. last axis
    '''
    dp = np.diff(pos, axis=tax)
    dt = np.diff(time, axis=tax)

    if np.rank(dp) != np.rank(dt):
        if spax < 0:
            spax = len(pos.shape) + spax
        dts = [slice(None) for x in dt.shape]
        dts.insert(spax, None)
    else:
        dts = [slice(None) for x in dt.shape]
    return dp / dt[dts]
Example #23
0
    def pack(self, tod):
        """
        Convert a Tod which only includes all the detectors into 
        another Tod which contains only the valid ones under the control
        of the detector mask

        Parameters
        ----------
        tod : Tod
              rank-2 Tod to be packed.

        Returns
        -------
        packed_tod : Tod
              This will be a new view object if possible; otherwise, it will
              be a copy.

        See Also
        --------
        unpack: inverse method

        """
        if np.rank(tod) != np.rank(self.instrument.detector.removed) + 1:
            raise ValueError('The input Tod is not unpacked.')
        ndetectors = self.instrument.detector.size
        nvalids = self.get_ndetectors()
        nsamples = tod.shape[-1]

        newshape = (nvalids, nsamples)
        if tod.shape[0:-1] != self.instrument.shape:
            raise ValueError("The detector mask has a shape '" + \
                str(self.instrument.shape) + "' incompatible with the unpacke" \
                "d input '" + str(tod.shape[0:-1]) + "'.")

        # return a view if all detectors are valid
        if nvalids == ndetectors:
            return tod.reshape((ndetectors, nsamples))

        # otherwise copy the detector timelines one by one
        mask = self.instrument.detector.removed.ravel()
        ptod = Tod.empty(newshape,
                         nsamples=tod.nsamples,
                         unit=tod.unit,
                         derived_units=tod.derived_units,
                         dtype=tod.dtype,
                         mask=np.empty(newshape, dtype='int8'))
        rtod = tod.reshape((ndetectors, nsamples))

        i = 0
        for iall in range(mask.size):
            if mask[iall] != 0:
                continue
            ptod[i, :] = rtod[iall, :]
            if tod.mask is not None:
                ptod.mask[i, :] = rtod.mask[iall, :]
            else:
                ptod.mask[i, :] = 0
            i += 1
        return ptod
Example #24
0
    def _sample_by_agent_and_stratum(self, index1, index2, stratum, prob_array,
                                     chosen_choice_index,
                                     strata_sample_setting):
        """agent by agent and stratum by stratum stratified sampling, suitable for 2d prob_array and/or sample_size varies for agents
        this method is slower than _sample_by_stratum, for simpler stratified sampling use _sample_by_stratum instead"""

        rank_of_prob = rank(prob_array)
        rank_of_strata = rank(strata_sample_setting)

        J = self.__determine_sampled_index_size(strata_sample_setting,
                                                rank_of_strata)
        sampled_index = zeros((index1.size, J), dtype=DTYPE) - 1
        self._sampling_probability = zeros((index1.size, J), dtype=float32)
        self._stratum_id = ones((index1.size, J), dtype=DTYPE) * NO_STRATUM_ID

        for i in range(index1.size):
            if rank_of_strata == 3:
                strata_sample_pairs = strata_sample_setting[i, :]
            else:
                strata_sample_pairs = strata_sample_setting

            if rank_of_prob == 2:
                prob = prob_array[i, :]
            else:
                prob = prob_array

            j = 0
            for (this_stratum, this_size) in strata_sample_pairs:
                if this_size <= 0: continue
                index_not_in_stratum = where(stratum != this_stratum)[0]
                this_prob = copy.copy(prob)

                this_prob[index_not_in_stratum] = 0.0
                this_prob = normalize(this_prob)

                if nonzerocounts(this_prob) < this_size:
                    logger.log_warning(
                        "weight array dosen't have enough non-zero counts, use sample with replacement"
                    )


#                chosen_index_to_index2 = where(index2 == chosen_choice_index[i])[0]
#exclude_index passed to probsample_noreplace needs to be indexed to index2
                this_sampled_index = probsample_noreplace(
                    index2,
                    sample_size=this_size,
                    prob_array=this_prob,
                    exclude_index=chosen_choice_index[i],
                    return_index=True)
                sampled_index[i, j:j + this_size] = this_sampled_index

                self._sampling_probability[
                    i, j:j + this_size] = this_prob[this_sampled_index]
                self._stratum_id[i, j:j + this_size] = ones(
                    (this_sampled_index.size, ), dtype=DTYPE) * this_stratum

                j += this_size

        return index2[sampled_index]
Example #25
0
    def check_format(self, full_check=True):
        """check whether the matrix format is valid

            *Parameters*:
                full_check:
                    True  - rigorous check, O(N) operations : default
                    False - basic check, O(1) operations

        """
        M,N = self.shape
        R,C = self.blocksize

        # index arrays should have integer data types
        if self.indptr.dtype.kind != 'i':
            warn("indptr array has non-integer dtype (%s)" \
                    % self.indptr.dtype.name )
        if self.indices.dtype.kind != 'i':
            warn("indices array has non-integer dtype (%s)" \
                    % self.indices.dtype.name )

        # only support 32-bit ints for now
        self.indptr  = np.asarray(self.indptr, np.intc)
        self.indices = np.asarray(self.indices, np.intc)
        self.data    = to_native(self.data)

        # check array shapes
        if np.rank(self.indices) != 1 or np.rank(self.indptr) != 1:
            raise ValueError,"indices, and indptr should be rank 1"
        if np.rank(self.data) != 3:
            raise ValueError,"data should be rank 3"

        # check index pointer
        if (len(self.indptr) != M/R + 1 ):
            raise ValueError, \
                "index pointer size (%d) should be (%d)" % \
                 (len(self.indptr), M/R + 1)
        if (self.indptr[0] != 0):
            raise ValueError,"index pointer should start with 0"

        # check index and data arrays
        if (len(self.indices) != len(self.data)):
            raise ValueError,"indices and data should have the same size"
        if (self.indptr[-1] > len(self.indices)):
            raise ValueError, \
                  "Last value of index pointer should be less than "\
                  "the size of index and data arrays"

        self.prune()

        if full_check:
            #check format validity (more expensive)
            if self.nnz > 0:
                if self.indices.max() >= N/C:
                    print "max index",self.indices.max()
                    raise ValueError, "column index values must be < %d" % (N/C)
                if self.indices.min() < 0:
                    raise ValueError, "column index values must be >= 0"
                if diff(self.indptr).min() < 0:
                    raise ValueError,'index pointer values must form a " \
Example #26
0
    def check_format(self, full_check=True):
        """check whether the matrix format is valid

            *Parameters*:
                full_check:
                    True  - rigorous check, O(N) operations : default
                    False - basic check, O(1) operations

        """
        M, N = self.shape
        R, C = self.blocksize

        # index arrays should have integer data types
        if self.indptr.dtype.kind != 'i':
            warn("indptr array has non-integer dtype (%s)" %
                 self.indptr.dtype.name)
        if self.indices.dtype.kind != 'i':
            warn("indices array has non-integer dtype (%s)" %
                 self.indices.dtype.name)

        idx_dtype = get_index_dtype((self.indices, self.indptr))
        self.indptr = np.asarray(self.indptr, dtype=idx_dtype)
        self.indices = np.asarray(self.indices, dtype=idx_dtype)
        self.data = to_native(self.data)

        # check array shapes
        if np.rank(self.indices) != 1 or np.rank(self.indptr) != 1:
            raise ValueError("indices, and indptr should be rank 1")
        if np.rank(self.data) != 3:
            raise ValueError("data should be rank 3")

        # check index pointer
        if (len(self.indptr) != M // R + 1):
            raise ValueError("index pointer size (%d) should be (%d)" %
                             (len(self.indptr), M // R + 1))
        if (self.indptr[0] != 0):
            raise ValueError("index pointer should start with 0")

        # check index and data arrays
        if (len(self.indices) != len(self.data)):
            raise ValueError("indices and data should have the same size")
        if (self.indptr[-1] > len(self.indices)):
            raise ValueError("Last value of index pointer should be less than "
                             "the size of index and data arrays")

        self.prune()

        if full_check:
            # check format validity (more expensive)
            if self.nnz > 0:
                if self.indices.max() >= N // C:
                    raise ValueError(
                        "column index values must be < %d (now max %d)" %
                        (N // C, self.indices.max()))
                if self.indices.min() < 0:
                    raise ValueError("column index values must be >= 0")
                if np.diff(self.indptr).min() < 0:
                    raise ValueError("index pointer values must form a "
                                     "non-decreasing sequence")
Example #27
0
def fftconvolve(
    in1,
    in2,
    mode="full",
    preprocessed_input=None
    ):
    """Convolve two N-dimensional arrays using FFT.

    Very similar to scipy.signal.fftconvolve, but with some performance 
    customizations to the padding, and optional preprocessing.

    scipy.signal.fftconvolve currently pads to a power of 2 for fast
    FFTs; this is often suboptimal, since numpy's FFTs are fast for
    array sizes that are products of small primes. Our version tries
    to guess a good padding size.

    Often, we find ourselves convolving many different in1's with the
    same in2. The 'preprocessed_input' argument lets us save
    computation time by passing a preprocessed version of in2.
    """
    in1 = np.asarray(in1)
    in2 = np.asarray(in2)

    if np.rank(in1) == np.rank(in2) == 0:  # scalar inputs
        return in1 * in2
    elif not in1.ndim == in2.ndim:
        raise ValueError("in1 and in2 should have the same rank")
    elif in1.size == 0 or in2.size == 0:  # empty arrays
        return array([])

    if preprocessed_input is None:
        preprocessed_input = preprocess_input_2(in1, in2)
    (in2_preprocessed,
     in2_preprocessed_is_complex,
     size,
     fsize,
     s1,
     s2
     ) = preprocessed_input

    complex_result = (np.issubdtype(in1.dtype, np.complex) or
                      in2_preprocessed_is_complex)

    fslice = tuple([slice(0, int(sz)) for sz in size])
    if not complex_result:
        ret = np.fft.irfftn(np.fft.rfftn(in1, fsize) * in2_preprocessed, fsize
                            )[fslice].copy()
        ret = ret.real
    else:
        ret = np.fft.ifftn(np.fft.fftn(in1, fsize) * in2_preprocessed
                           )[fslice].copy()

    if mode == "full":
        return ret
    elif mode == "same":
        return _centered(ret, s1)
    elif mode == "valid":
        return _centered(ret, s1 - s2 + 1)
Example #28
0
    def SI(self):
        """
        Return the quantity in SI unit. If the quantity has
        no units, the quantity itself is returned, otherwise, a
        shallow copy is returned.

        Example
        -------
        >>> print(Quantity(1., 'km').SI)
        1000.0 m
        """
        if len(self._unit) == 0:
            return self

        factor = Quantity(1., '')
        for key, val in self._unit.items():

            # check if the unit is a local derived unit
            newfactor = _check_du(self, key, val, self.derived_units)

            # check if the unit is a global derived unit
            if newfactor is None:
                newfactor = _check_du(self, key, val, units)
                
            # if the unit is not derived, we add it to the dictionary
            if newfactor is None:            
                _multiply_unit_inplace(factor._unit, key, val)
                continue

            # factor may be broadcast
            factor = factor * newfactor

        if np.rank(self) == 0 or np.rank(factor) == 0:
            result = self * factor.magnitude
            result._unit = factor._unit
            return result

        # make sure that the unit factor can be broadcast
        if any([d1 not in (1,d2)
                for (d1,d2) in zip(self.shape[0:factor.ndim], factor.shape)]):
            raise ValueError("The derived unit '" + key + "' has a shape '" + \
                str(newfactor.shape) + "' which is incompatible with the dime" \
                "nsion(s) along the first axes '" + str(self.shape) + "'.")

        # Python's broadcast operates by adding slow dimensions. Since we
        # need to use a unique conversion factor along the fast dimensions
        # (ex: second axis in an array tod[detector,time]), we need to perform
        # our own broadcast by adding fast dimensions
        if np.any(self.shape[0:factor.ndim] != factor.shape):
            broadcast = np.hstack([factor.shape,
                                  np.ones(self.ndim-factor.ndim,int)])
            result = self * np.ones(broadcast)
        else:
            result = self.copy()
        result.T[:] *= factor.magnitude.T
        result._unit = factor._unit

        return result
Example #29
0
def add_column(cur_vars, more_vars):
    if np.rank(more_vars) == 1:
        more_vars.shape = list(more_vars.shape) + [1,]
    if cur_vars != None: # must have same number of tasks
        assert np.rank(more_vars) == 2
        assert cur_vars.shape[0] == more_vars.shape[0]
        return np.concatenate((cur_vars, more_vars), axis=-1)
    else:
        return more_vars
Example #30
0
    def check_format(self, full_check=True):
        """check whether the matrix format is valid

            *Parameters*:
                full_check:
                    True  - rigorous check, O(N) operations : default
                    False - basic check, O(1) operations

        """
        M,N = self.shape
        R,C = self.blocksize

        # index arrays should have integer data types
        if self.indptr.dtype.kind != 'i':
            warn("indptr array has non-integer dtype (%s)"
                    % self.indptr.dtype.name)
        if self.indices.dtype.kind != 'i':
            warn("indices array has non-integer dtype (%s)"
                    % self.indices.dtype.name)

        idx_dtype = get_index_dtype((self.indices, self.indptr))
        self.indptr = np.asarray(self.indptr, dtype=idx_dtype)
        self.indices = np.asarray(self.indices, dtype=idx_dtype)
        self.data = to_native(self.data)

        # check array shapes
        if np.rank(self.indices) != 1 or np.rank(self.indptr) != 1:
            raise ValueError("indices, and indptr should be rank 1")
        if np.rank(self.data) != 3:
            raise ValueError("data should be rank 3")

        # check index pointer
        if (len(self.indptr) != M//R + 1):
            raise ValueError("index pointer size (%d) should be (%d)" %
                                (len(self.indptr), M//R + 1))
        if (self.indptr[0] != 0):
            raise ValueError("index pointer should start with 0")

        # check index and data arrays
        if (len(self.indices) != len(self.data)):
            raise ValueError("indices and data should have the same size")
        if (self.indptr[-1] > len(self.indices)):
            raise ValueError("Last value of index pointer should be less than "
                                "the size of index and data arrays")

        self.prune()

        if full_check:
            # check format validity (more expensive)
            if self.nnz > 0:
                if self.indices.max() >= N//C:
                    raise ValueError("column index values must be < %d (now max %d)" % (N//C, self.indices.max()))
                if self.indices.min() < 0:
                    raise ValueError("column index values must be >= 0")
                if np.diff(self.indptr).min() < 0:
                    raise ValueError("index pointer values must form a "
                                        "non-decreasing sequence")
Example #31
0
    def getnnz(self):
        nnz = len(self.data)
        if nnz != len(self.row) or nnz != len(self.col):
            raise ValueError('row, column, and data array must all be the same length')

        if np.rank(self.data) != 1 or np.rank(self.row) != 1 or np.rank(self.col) != 1:
            raise ValueError('row, column, and data arrays must have rank 1')

        return nnz
Example #32
0
File: coo.py Project: 317070/scipy
    def getnnz(self):
        nnz = len(self.data)
        if nnz != len(self.row) or nnz != len(self.col):
            raise ValueError('row, column, and data array must all be the same length')

        if np.rank(self.data) != 1 or np.rank(self.row) != 1 or np.rank(self.col) != 1:
            raise ValueError('row, column, and data arrays must have rank 1')

        return int(nnz)
Example #33
0
    def SwitchToData(self, name):
        '''Usage:  SwitchToData(name)
        
        Switch to viewing the data named <name>.'''

        self._pdbg(3, '->SwitchToData')

        if name == None:
            self.mainframe.fig.clf()
            self.curname == None
        else:
            
            # working out if should keep same x-y position/zoom
            # as long as some old image is present...
            if not self.curname is None:
                old_lims = self.mainframe.GetLims()
                self._pdbg( 3, "in SwitchToData: old lims" + str( old_lims ) )
            else:
                self._pdbg(3, "in SwitchToData: no old lims")
                old_lims = None

            # for coding convenience
            data = self.ims[name]

            # for colour-table preservation throughout stack
            self.imin = data.min()
            self.imax = data.max()

            # if 2-D set maximum z to be 0
            if numpy.rank(data) == 2:
                zmax = 0
                # if 3-D go to same (current) plane or max available
            elif numpy.rank(data) == 3:
                if self.zpos > data.shape[2]:
                    self.zpos = data.shape[2] - 1
                zmax = data.shape[2] - 1
        
            # for later reference
            self.curname = name

            # make sure current item selected in Data window is correct
            if self.dataframe.GetCurrentItemText() != name:
                self.dataframe.SelectItem(name)

            # update maximum for z-slider in Views window
            self.viewsframe.zslider.SetMax( zmax )

            # continue working out if old x-y position
            # is appropriate for new data
            keepaxes = False
            if not old_lims is None:
                if numpy.all( numpy.array(old_lims).max(1) < data.shape[0:2] ):
                    keepaxes = True

            # update the screen image
            self.UpdateImage(keepaxes)
Example #34
0
    def pack(self, tod):
        """
        Convert a Tod which only includes all the detectors into 
        another Tod which contains only the valid ones under the control
        of the detector mask

        Parameters
        ----------
        tod : Tod
              rank-2 Tod to be packed.

        Returns
        -------
        packed_tod : Tod
              This will be a new view object if possible; otherwise, it will
              be a copy.

        See Also
        --------
        unpack: inverse method

        """
        if np.rank(tod) != np.rank(self.instrument.detector.removed)+1:
            raise ValueError('The input Tod is not unpacked.')
        ndetectors = self.instrument.detector.size
        nvalids = self.get_ndetectors()
        nsamples = tod.shape[-1]

        newshape = (nvalids, nsamples)
        if tod.shape[0:-1] != self.instrument.shape:
            raise ValueError("The detector mask has a shape '" + \
                str(self.instrument.shape) + "' incompatible with the unpacke" \
                "d input '" + str(tod.shape[0:-1]) + "'.")

        # return a view if all detectors are valid
        if nvalids == ndetectors:
            return tod.reshape((ndetectors, nsamples))

        # otherwise copy the detector timelines one by one
        mask = self.instrument.detector.removed.ravel()
        ptod = Tod.empty(newshape, nsamples=tod.nsamples, unit=tod.unit,
                         derived_units=tod.derived_units, dtype=tod.dtype, 
                         mask=np.empty(newshape, dtype='int8'))
        rtod = tod.reshape((ndetectors, nsamples))
        
        i = 0
        for iall in range(mask.size):
            if mask[iall] != 0:
                 continue
            ptod[i,:] = rtod[iall,:]
            if tod.mask is not None:
                ptod.mask[i,:] = rtod.mask[iall,:]
            else:
                ptod.mask[i,:] = 0
            i += 1
        return ptod
Example #35
0
def rho2v(var_rho):

    if np.rank(var_rho)==1:
        var_v = 0.5*(var_rho[1:]+var_rho[:-1])
    elif np.rank(var_rho)==2:
        var_v = rho2v_2d(var_rho)
    else:
        var_v = rho2v_3d(var_rho)

    return var_v
Example #36
0
def rho2u(var_rho):

    if np.rank(var_rho)==1:
        var_u = 0.5*(var_rho[1:]+var_rho[:-1])
    elif np.rank(var_rho)==2:       
        var_u = rho2u_2d(var_rho)
    else:
        var_u = rho2u_3d(var_rho)

    return var_u
Example #37
0
def _add_to_array(array, to_add):
    assert (type(array) == type(None)) | (type(array) == type(np.array(0)))
    if type(array) != type(None):
        assert (np.rank(array) == np.rank(to_add))
        
    if array == None:
        array = to_add
    else:
        array = np.hstack((array, to_add))
    return array
    def _sample_by_agent_and_stratum(
        self, index1, index2, stratum, prob_array, chosen_choice_index, strata_sample_setting
    ):
        """agent by agent and stratum by stratum stratified sampling, suitable for 2d prob_array and/or sample_size varies for agents
        this method is slower than _sample_by_stratum, for simpler stratified sampling use _sample_by_stratum instead"""

        rank_of_prob = rank(prob_array)
        rank_of_strata = rank(strata_sample_setting)

        J = self.__determine_sampled_index_size(strata_sample_setting, rank_of_strata)
        sampled_index = zeros((index1.size, J), dtype="int32") - 1
        self._sampling_probability = zeros((index1.size, J), dtype=float32)
        self._stratum_id = ones((index1.size, J), dtype="int32") * NO_STRATUM_ID

        for i in range(index1.size):
            if rank_of_strata == 3:
                strata_sample_pairs = strata_sample_setting[i, :]
            else:
                strata_sample_pairs = strata_sample_setting

            if rank_of_prob == 2:
                prob = prob_array[i, :]
            else:
                prob = prob_array

            j = 0
            for (this_stratum, this_size) in strata_sample_pairs:
                if this_size <= 0:
                    continue
                index_not_in_stratum = where(stratum != this_stratum)[0]
                this_prob = copy.copy(prob)

                this_prob[index_not_in_stratum] = 0.0
                this_prob = normalize(this_prob)

                if nonzerocounts(this_prob) < this_size:
                    logger.log_warning("weight array dosen't have enough non-zero counts, use sample with replacement")

                #                chosen_index_to_index2 = where(index2 == chosen_choice_index[i])[0]
                # exclude_index passed to probsample_noreplace needs to be indexed to index2
                this_sampled_index = probsample_noreplace(
                    index2,
                    sample_size=this_size,
                    prob_array=this_prob,
                    exclude_index=chosen_choice_index[i],
                    return_index=True,
                )
                sampled_index[i, j : j + this_size] = this_sampled_index

                self._sampling_probability[i, j : j + this_size] = this_prob[this_sampled_index]
                self._stratum_id[i, j : j + this_size] = ones((this_sampled_index.size,), dtype="int32") * this_stratum

                j += this_size

        return index2[sampled_index]
Example #39
0
	def __init__(self,signal,shot,tree=None,connection=None,nomds=False):

		# Save object values
		self.signal 		= signal
		self.shot   		= shot
		self.zdata              = -1
		self.xdata              = -1
		self.ydata 		= -1
		self.zunits             = ''
                self.xunits             = ''
		self.yunits		= ''
		self.rank 		= -1
		self.connection		= connection

		## Retrieve Data 
                t0 =  time.time()
		found = 0

		# Create the MDSplus connection (thin) if not passed in  
		if self.connection is None:
                                self.connection = MDSplus.Connection('atlas.gat.com')

		# Retrieve data from MDSplus (thin)
  		if nomds == False:
   			try:     
				if tree != None:
					tag 	= self.signal
					fstree 	= tree 
				else:
					tag 		= self.connection.get('findsig("'+self.signal+'",_fstree)').value
  					fstree    	= self.connection.get('_fstree').value 
 		
				self.connection.openTree(fstree,shot)
				self.zdata  	= self.connection.get('_s = '+tag).data()
				self.zunits 	= self.connection.get('units_of(_s)').data()  
				self.rank   	= numpy.rank(self.zdata)	
				self.xdata     	= self.connection.get('dim_of(_s)').data()
				self.xunits 	= self.connection.get('units_of(dim_of(_s))').data()
				if self.xunits == '' or self.xunits == ' ': 
   					self.xunits     = self.connection.get('units(dim_of(_s))').data()
				if self.rank > 1:
					self.ydata 	= self.connection.get('dim_of(_s,1)').data()
                       			self.yunits 	= self.connection.get('units_of(dim_of(_s,1))').data()
					if self.yunits == '' or self.yunits == ' ':
 						self.yunits     = self.connection.get('units(dim_of(_s,1))').data()
	
				found = 1	

				# MDSplus seems to return 2-D arrays transposed.  Change them back.
				if numpy.rank(self.zdata) == 2: self.zdata = numpy.transpose(self.zdata)
				if numpy.rank(self.ydata) == 2: self.ydata = numpy.transpose(self.ydata)
				if numpy.rank(self.xdata) == 2: self.xdata = numpy.transpose(self.xdata)

                	except Exception,e:
				print '   Signal not in MDSplus: %s' % (signal,) 
Example #40
0
    def cube_array(self):
        """
        Return a cube array that represents this mesh's bitmap
        """
        cubes = vstack(self.bitmap.nonzero()).transpose()
        cubes = hstack(
            (cubes,
             zeros((cubes.shape[0], rank(self.bitmap)), dtype=cubes.dtype) +
             arange(rank(self.bitmap))))

        return cubes
Example #41
0
def _condition_inputs(data, kernel):
    data, kernel = np.asarray(data), np.asarray(kernel)
    if np.rank(data) == 0:
        data.shape = (1, )
    if np.rank(kernel) == 0:
        kernel.shape = (1, )
    if np.rank(data) > 1 or np.rank(kernel) > 1:
        raise ValueError("arrays must be 1D")
    if len(data) < len(kernel):
        data, kernel = kernel, data
    return data, kernel
Example #42
0
def _condition_inputs(data, kernel):
    data, kernel = num.asarray(data), num.asarray(kernel)
    if num.rank(data) == 0:
        data.shape = (1,)
    if num.rank(kernel) == 0:
        kernel.shape = (1,)
    if num.rank(data) > 1 or num.rank(kernel) > 1:
        raise ValueError("arrays must be 1D")
    if len(data) < len(kernel):
        data, kernel = kernel, data
    return data, kernel
Example #43
0
    def kernel(output, x, *args):
        kwargs = {}
        for (slot, _), arg in zip(expression._user_args, args):
            kwargs[slot] = arg
        X = numpy.dot(X_remap.T, x)

        for i in range(len(output)):
            # Pass a slice for the scalar case but just the
            # current vector in the VFS case. This ensures the
            # eval method has a Dolfin compatible API.
            expression.eval(output[i:i+1, ...] if numpy.rank(output) == 1 else output[i, ...],
                            X[i:i+1, ...] if numpy.rank(X) == 1 else X[i, ...], **kwargs)
Example #44
0
		def Kpred(A,P,Sigma_e,x):
					
			if (np.rank(A)!=2 or type(self.A)!=np.ndarray):raise TypeError('A should be rank two array')
			if (np.rank(Sigma_e)!=2 or type(self.Sigma_e)!=np.ndarray):raise TypeError('Sigma_e should be rank two array')
			if (np.rank(P)!=2 or type(self.Sigma_e)!=np.ndarray):raise TypeError('P should be rank two array')
			if type(x)!=np.ndarray:raise TypeError('x0 should be rank two array')
			
			# predict state
			x_ = pb.dot(A,x) 
			# predict state covariance matrix
			P_ =dots(A,P,A.T) + Sigma_e
			return x_,P_
Example #45
0
def orientation_product(T, Bb):

    assert np.rank(T) == 3

    if np.rank(Bb) == 3:
        C = np.einsum('aij,ajk->aik', T, Bb)
    elif np.rank(Bb) == 2:
        C = np.einsum('aij,aj->ai', T, Bb)
    else:
        raise Exception, 'bad B rank'

    return C
def orientation_product(T,Bb):
    
    assert np.rank(T) == 3
    
    if np.rank(Bb) == 3:
        C = np.einsum('aij,ajk->aik', T, Bb )
    elif np.rank(Bb) == 2:
        C = np.einsum('aij,aj->ai', T, Bb )
    else:
        raise Exception , 'bad B rank'
        
    return C
Example #47
0
    def kernel(output, x, *args):
        kwargs = {}
        for (slot, _), arg in zip(expression._user_args, args):
            kwargs[slot] = arg
        X = numpy.dot(X_remap.T, x)

        for i in range(len(output)):
            # Pass a slice for the scalar case but just the
            # current vector in the VFS case. This ensures the
            # eval method has a Dolfin compatible API.
            expression.eval(output[i:i+1, ...] if numpy.rank(output) == 1 else output[i, ...],
                            X[i:i+1, ...] if numpy.rank(X) == 1 else X[i, ...], **kwargs)
Example #48
0
def _estimate_count_from_rate(rate, bin_edges):
    '''
    Convert a `rate` (in Hz) to a count, given window edges `bin_edges`.
    '''
    assert rate.shape[-1] == bin_edges.shape[-1] -1
    
    window = np.diff(bin_edges, axis=-1)
    
    if np.rank(rate) == np.rank(bin_edges) + 1:
        # add dimension at -2 if there is one more dim in rate
        window = window[...,None,:]
    return rate * window
Example #49
0
def convolve(in1, in2, mode='full'):
    """
    Convolve two N-dimensional arrays.

    Convolve in1 and in2 with output size determined by mode.

    Parameters
    ----------
    in1: array
        first input.
    in2: array
        second input. Should have the same number of dimensions as in1.
    mode: str {'valid', 'same', 'full'}
        a string indicating the size of the output:

        ``valid`` : the output consists only of those elements that do not
           rely on the zero-padding.

        ``same`` : the output is the same size as ``in1`` centered
           with respect to the 'full' output.

        ``full`` : the output is the full discrete linear cross-correlation
           of the inputs. (Default)


    Returns
    -------
    out: array
        an N-dimensional array containing a subset of the discrete linear
        cross-correlation of in1 with in2.

    """
    volume = asarray(in1)
    kernel = asarray(in2)

    if rank(volume) == rank(kernel) == 0:
        return volume * kernel
    elif not volume.ndim == kernel.ndim:
        raise ValueError("in1 and in2 should have the same rank")

    slice_obj = [slice(None, None, -1)] * len(kernel.shape)

    if mode == 'valid':
        for d1, d2 in zip(volume.shape, kernel.shape):
            if not d1 >= d2:
                raise ValueError(
                    "in1 should have at least as many items as in2 in " \
                    "every dimension for valid mode.")
    if np.iscomplexobj(kernel):
        return correlate(volume, kernel[slice_obj].conj(), mode)
    else:
        return correlate(volume, kernel[slice_obj], mode)
Example #50
0
def isshape(x):
    """Is x a valid 2-tuple of dimensions?
    """
    try:
        # Assume it's a tuple of matrix dimensions (M, N)
        (M, N) = x
    except:
        return False
    else:
        if isintlike(M) and isintlike(N):
            if np.rank(M) == 0 and np.rank(N) == 0:
                return True
        return False
Example #51
0
    def expand_rows(self, rows):
        """ Makes a 1-D array the right size. Often used after a mission is initialized to size out the vectors to the
            right size.
        
            Assumptions:
            Doesn't expand initials or numerics
    
            Source:
            N/A
    
            Inputs:
            rows   [int]
    
            Outputs:
            None
    
            Properties Used:
            None
        """

        # store
        self._size = rows

        for k, v in self.items():

            # don't expand initials or numerics
            if k in ('initials', 'numerics'):
                continue

            # recursion
            elif isinstance(v, Conditions):
                v.expand_rows(rows)
            # need arrays here
            elif np.rank(v) == 2:
                self[k] = np.resize(v, [rows, v.shape[1]])
Example #52
0
 def _is_action_bounded(action_component):
     if not isinstance(action_component, FloatBox):
         return False
     if isinstance(action_component.low, (list, np.ndarray)) and np.rank(action_component.low) > 0:
         # TODO: we need to properly split the action space in case *some* of the dimensions are bounded!
         if (not np.all(np.isinf(action_component.low)) and np.any(np.isinf(action_component.low))) or \
                 (not np.all(np.isinf(action_component.high)) and np.any(np.isinf(action_component.high))):
             raise RLGraphError("Actions with mix of unbounded and bounded dimensions are not supported yet!"
                                "The boundaries are low={} high={}.".format(action_component.low, action_component.high))
         low = action_component.low[0]
         high = action_component.high[0]
     else:
         low = action_component.low
         high = action_component.high
     # Unbounded.
     if low == float("-inf") and high == float("inf"):
         return False
     # Bounded.
     elif low != float("-inf") and high != float("inf"):
         return True
     # TODO: Semi-bounded -> Exponential distribution.
     else:
         raise RLGraphError(
             "Semi-bounded action spaces are not supported yet! You passed in low={} high={}.".
             format(action_component.low, action_component.high)
         )
Example #53
0
 def do_pack(D):
     for v in D.values():
         # type checking
         if isinstance(v, dict):
             do_pack(v)  # recursion!
             continue
         elif not isinstance(v, valid_types):
             continue
         elif np.rank(v) > 2:
             continue
         # make column vectors
         v = atleast_2d_col(v)
         # handle output type
         if vector:
             # unravel into 1d vector
             v = v.ravel(order='F')
         else:
             # check array size
             size[0] = size[0] or v.shape[
                 0]  # updates size once on first array
             if v.shape[0] != size[0]:
                 #warn ('array size mismatch, skipping. all values in data must have same number of rows for array packing',RuntimeWarning)
                 continue
         # dump to list
         M.append(v)
Example #54
0
def ce(input_signal, target_signal):
    """ 
    ce(input_signal, target_signal)-> cross-entropy
    Compute cross-entropy loss function

    Returns the negative log-likelyhood of the target_signal labels as predicted by
    the input_signal values.

    Parameters:
        - input_signal : array
        - target_signal : array
    """
    check_signal_dimensions(input_signal, target_signal)

    if np.rank(target_signal) > 1 and target_signal.shape[1] > 1:
        error = mdp.numx.sum(-mdp.numx.log(input_signal[target_signal == 1]))

        if mdp.numx.isnan(error):
            inp = input_signal[target_signal == 1]
            inp[inp == 0] = float(np.finfo(input_signal.dtype).tiny)
            error = -mdp.numx.sum(mdp.numx.log(inp))
    else:
        error = -mdp.numx.sum(mdp.numx.log(input_signal[target_signal == 1]))
        error -= mdp.numx.sum(
            mdp.numx.log(1 - input_signal[target_signal == 0]))

        if mdp.numx.isnan(error):
            inp = input_signal[target_signal == 1]
            inp[inp == 0] = float(np.finfo(input_signal.dtype).tiny)
            error = -mdp.numx.sum(mdp.numx.log(inp))
            inp = 1 - input_signal[target_signal == 0]
            inp[inp == 0] = float(np.finfo(input_signal.dtype).tiny)
            error -= mdp.numx.sum(mdp.numx.log(inp))

    return error
Example #55
0
def var_regression_matrix(H, x, model, sigma=1):
    """
    Compute the variance of the 'regression error'.
    
    Parameters
    ----------
    H : 2d-array
        The regression matrix
    x : 2d-array
        The coordinates to calculate the regression error variance at.
    model : str
        A string of tokens that define the regression model (e.g. 
        '1 x1 x2 x1*x2')
    sigma : scalar
        An estimate of the variance (default: 1).
    
    Returns
    -------
    var : scalar
        The variance of the regression error, evaluated at ``x``.
        
    """
    x = np.atleast_2d(x)
    H = np.atleast_2d(H)

    if x.shape[0] == 1:
        x = x.T

    if np.rank(H) < (np.dot(H.T, H)).shape[0]:
        raise ValueError("model and DOE don't suit together")

    x_mod = build_regression_matrix(x, model)
    var = sigma**2 * np.dot(np.dot(x_mod.T, np.linalg.inv(np.dot(H.T, H))),
                            x_mod)
    return var
Example #56
0
def list_of_frames(img_name):
    """Return the list of frames for an image file.

    Details are as described in the imgimport_intelligent docstring.

    """

    img = Image.open(img_name)
    imglist = []

    try:
        for i in xrange(8):
            if img.mode == 'I':
                imdata = np.asarray(img, dtype=np.int16)
            else:
                imdata = np.asarray(img, dtype=np.float32)
            # fix 3-channel TIFF images
            if np.rank(imdata) == 3:
                imdata = imdata[:, :,
                                0] + 256 * imdata[:, :,
                                                  1] + 65536 * imdata[:, :, 2]
            imglist.append(imdata)
            img.seek(i + 1)  # next frame
    except EOFError:
        pass

    if not imglist:
        raise ImportError, 'No frames detected in file %s' % img_name
    else:
        return imglist
Example #57
0
def myTW(T, Td, P):
    td_lt_t = Td < T
    t_ge_cold = T >= 100.0

    typical = td_lt_t & t_ge_cold

    satVP = zeros(T.shape, dtype=T.dtype)
    satVP[typical] = calcSatVP(T[typical])

    norm_svp = satVP <= 10.0
    sane = typical & norm_svp

    veryCold = td_lt_t & ~t_ge_cold
    td_satvp_bad = ~(td_lt_t & norm_svp)  # includes NaN cells

    if rank(P) == 0:
        sel = lambda val, mask: val
    else:
        sel = lambda val, mask: val[mask]

    TW = empty(T.shape, dtype=T.dtype)
    TW[td_satvp_bad] = (T[td_satvp_bad] + Td[td_satvp_bad]) / 2
    TW[veryCold] = T[veryCold]
    TW[sane] = calcTW(T[sane], Td[sane], sel(P, sane), satVP[sane])

    return (TW)
Example #58
0
    def compute_corner_simplex(self):
        if rank(self.indices) < 2:
            self.corner_simplex = Simplex(self.indices)
        else:
            corner_value = self.indices.min()
            corner_index = (self.indices == corner_value).nonzero()

            rest = self.indices[[
                tuple(x) for x in bitwise_xor(
                    eye(rank(self.indices), dtype=int), array(corner_index))
            ]]

            parity = sum(corner_index)[0] % 2

            self.corner_simplex = Simplex([corner_value] + rest.tolist(),
                                          parity)
Example #59
0
 def _addCustomData(self, value, name, **kwargs):
     '''
     The custom data will be added as a comment line in the form:: 
     
     #C name : value
     
     ..note:: non-scalar values (or name/values containing end-of-line) will not be written
     '''
     if self.filename is None:
         self.info('Custom data "%s" will not be stored in SPEC file. Reason: uninitialized file',name)
         return
     if numpy.rank(value) > 0:  #ignore non-scalars
         self.info('Custom data "%s" will not be stored in SPEC file. Reason: value is non-scalar', name)
         return
     v = str(value)
     if '\n' in v or '\n' in name: #ignore if name or the string representation of the value contains end-of-line
         self.info('Custom data "%s" will not be stored in SPEC file. Reason: unsupported format',name)
         return
     
     fileWasClosed = self.fd is None or self.fd.closed
     if fileWasClosed:
         try:
             self.fd = open(self.filename,'a')
         except:
             self.info('Custom data "%s" will not be stored in SPEC file. Reason: cannot open file',name)
             return
     self.fd.write('#C %s : %s\n' % (name, v))
     self.fd.flush()
     if fileWasClosed:
         self.fd.close() #leave the file descriptor as found