Beispiel #1
0
    def intersect(self, spec):
        """Intersect with the region specification.

        'spec' is a region specification of the form defined in the grid module.

        Returns (mask, indexspecs) where
        'mask' is the mask of the result grid AFTER self and region spec are interested.
        'indexspecs' is a list of index specifications suitable for slicing a
          variable with the given grid.
        """

        ni, nj = self.shape
        index = self.getIndex()
        latspec = spec[CoordTypeToLoc[LatitudeType]]
        lonspec = spec[CoordTypeToLoc[LongitudeType]]
        latlin = numpy.ravel(numpy.ma.filled(self._lataxis_))
        lonlin = numpy.ravel(numpy.ma.filled(self._lonaxis_))
        points = bindex.intersectHorizontalGrid(latspec, lonspec, latlin, lonlin, index)
        if len(points)==0:
            raise CDMSError, 'No data in the specified region, longitude=%s, latitude=%s'%(`lonspec`, `latspec`)

        fullmask = numpy.ones(ni*nj)
        numpy.put(fullmask, points, 0)
        fullmask = numpy.reshape(fullmask, (ni,nj))
        
        iind = points/nj
        jind = points - iind*nj
        imin, imax, jmin, jmax = (min(iind), max(iind)+1, min(jind), max(jind)+1)
        submask = fullmask[imin:imax, jmin:jmax]

        yid = self.getAxis(0).id
        xid = self.getAxis(1).id
        indexspecs = {yid:slice(imin,imax), xid:slice(jmin,jmax)}

        return submask, indexspecs
Beispiel #2
0
 def _writeFunc(self):
     nrows = 0
     arr = np.ndarray(shape=(self.write_rows, self.totalCols), dtype=np.float32)
     np_write_arr = [i for i in range(self.totalCols)]
     while nrows < self.totalRows:
         # remaining rows < write_rows, only write in as many as are extra
         if nrows + self.write_rows >= self.totalRows:
             rem = self.totalRows - nrows
             arr.resize((rem, self.totalCols))
             try:
                 for row in range(rem):
                     np.put(arr[row], np_write_arr, self.input_pipe.recv())
             except EOFError:
                 print "Pipe closed unexpectedly"
                 self.stop()
         else:
             # write in as many rows as write_rows indicates
             try:
                 for row in range(self.write_rows):
                     np.put(arr[row], np_write_arr, self.input_pipe.recv())
             except EOFError:
                 print "Pipe closed unexpectedly"
                 self.stop()
         # write out rows
         self.dataset.GetRasterBand(1).WriteArray(arr, 0, nrows)
         nrows+=self.write_rows
         self.pb.step(self.write_rows)
         self.rt.update()
     # write out remaining lines
     self.dataset.FlushCache()
     print "Output %s written to disk" % self.file_name
Beispiel #3
0
    def transform(self, func, *args, **kwargs):
        """
        Call function producing a like-indexed Series on each group and return
        a Series with the transformed values

        Parameters
        ----------
        func : function
            To apply to each group. Should return a Series with the same index

        Example
        -------
        >>> grouped.transform(lambda x: (x - x.mean()) / x.std())

        Returns
        -------
        transformed : Series
        """
        result = self.obj.copy()

        for name, group in self:
            group.name = name
            res = func(group, *args, **kwargs)
            indexer = self.obj.index.get_indexer(group.index)
            np.put(result, indexer, res)

        return result
Beispiel #4
0
    def _select_mono(self, chunk):
        keep_monomorphic = self.keep_monomorphic

        gts = chunk[GT_FIELD]
        if is_dataset(gts):
            gts = gts[:]

        shape = gts.shape

        # we count how many different alleles are per row
        # we do it adding a complex part to each number. The complex part is
        # related with the row. Then we use unique
        weight = 1j * numpy.arange(0, shape[0])
        weight = numpy.repeat(weight, shape[1] * shape[2]).reshape(shape)
        b = gts + weight
        _, ind = numpy.unique(b, return_index=True)
        b = numpy.zeros_like(gts)
        c = numpy.ones_like(gts)
        numpy.put(b, ind, c.flat[ind])
        c = numpy.sum(b, axis=(2, 1))

        # we remove the missing values from the count
        rows_with_missing = numpy.any(gts == -1, axis=(1, 2))
        c -= rows_with_missing

        if keep_monomorphic:
            selected_rows = (c <= 2)
        else:
            selected_rows = (c == 2)
        return selected_rows
 def basic_mutation(self_individual, individual):
     """Performs a basic mutation where one value in the chromosome is replaced by another valid value."""
     idx = numpy.random.randint(0, len(individual.genotype))
     value = numpy.random.uniform(low=-100.0, high=100.0)
     numpy.put(individual.genotype, [idx], [value])
     individual.fitness = individual.fitness_evaluator.evaluate(individual)
     return individual
Beispiel #6
0
def load_csv(fname, items_num, columns_used=None, memmap=False, dtype=np.float32):
    #
    #   Load csv file with features. Last column is treated as Label.
    #
    #   Parameters:
    #       fname -- file name
    #       items_num -- number of
    #       columns_used -- specify indices of used features
    #       memmap -- if True then numpy array is mapped onto disk (not supported yet)
    #
    with open(fname, 'rb') as csv_file:
        reader = csv.reader(csv_file, delimiter=',')
        header = reader.next()

        if columns_used:
            feature_dim = len(columns_used)
            columns_used = np.asarray(columns_used)
        else:
            feature_dim = len(header)-1

        features = np.zeros((items_num, feature_dim), dtype=dtype)
        results = np.zeros((items_num, 1), dtype=np.int32)

        for i in range(items_num):
            line = np.asarray(reader.next(), dtype=dtype)

            if columns_used:
                np.put(features[i], np.arange(0, feature_dim), line[columns_used])
            else:
                np.put(features[i], np.arange(0, feature_dim), line[:-1])

            results[i] = int(line[-1])

        return features, results
Beispiel #7
0
def _inverse_permutation(p):
    """inverse permutation p"""
    n = p.size
    s = np.zeros(n, dtype=np.int32)
    i = np.arange(n, dtype=np.int32)
    np.put(s, p, i)  # s[p] = i
    return s
Beispiel #8
0
  def getPattern(self, idx, sparseBinaryForm=False, cat=None):
    """Gets a training pattern either by index or category number.

    @param idx Index of the training pattern

    @param sparseBinaryForm If true, returns a list of the indices of the
        non-zero bits in the training pattern

    @param cat If not None, get the first pattern belonging to category cat. If
        this is specified, idx must be None.

    @return The training pattern with specified index
    """
    if cat is not None:
      assert idx is None
      idx = self._categoryList.index(cat)

    if not self.useSparseMemory:
      pattern = self._Memory[idx]
      if sparseBinaryForm:
        pattern = pattern.nonzero()[0]

    else:
      (nz, values) = self._Memory.rowNonZeros(idx)
      if not sparseBinaryForm:
        pattern = numpy.zeros(self._Memory.nCols())
        numpy.put(pattern, nz, 1)
      else:
        pattern = nz

      return pattern
Beispiel #9
0
  def getPattern(self, idx, sparseBinaryForm=False, cat=None):
    """Return a training pattern either by index or category number

    Parameters:
    ------------------------------------------------------------------------
    idx:                Index of the training pattern
    sparseBinaryForm:   If true, return only a list of the non-zeros in the
                          training pattern
    cat:                If not None, get the first pattern belonging to category
                          cat. If this is specified, idx must be None

    """

    if cat is not None:
      assert idx is None
      idx = self._categoryList.index(cat)

    if not self.useSparseMemory:
      pattern = self._Memory[idx]
      if sparseBinaryForm:
        pattern = pattern.nonzero()[0]

    else:
      (nz, values) = self._Memory.rowNonZeros(idx)
      if not sparseBinaryForm:
        pattern = numpy.zeros(self._Memory.nCols())
        numpy.put(pattern, nz, 1)
      else:
        pattern = nz

      return pattern
Beispiel #10
0
    def dimensions(self, box):
        """Set unitcell with (*A*, *B*, *C*, *alpha*, *beta*, *gamma*)

        .. versionadded:: 0.9.0
        """
        # note that we can re-use self._ts_order with put!
        np.put(self._unitcell, self._ts_order, box)
Beispiel #11
0
	def _process(self, X, column, model_class):
		# Remove values that are in mask
		mask = np.array(self._get_mask(X)[:, column].T)[0]
		mask_indices = np.where(mask==True)[0]
		X_data = np.delete(X, mask_indices, 0)

		# Instantiate the model
		model = model_class()

		# Slice out the column to predict and delete the column.
		y_data = X[:, column]
		X_data = np.delete(X_data, column, 1)

		# Split training and test data
		X_train, X_test, y_train, y_test = train_test_split(X_data, y_data, test_size=0.33, random_state=42)

		# Fit the model
		model.fit(X_train, y_train)

		# Score the model
		scores = model.score(X_test, y_test)

		# Predict missing vars
		X_predict = np.delete(X, column, 1)
		y = model.predict(X_predict)

		# Replace values in X with their predictions
		predict_indices = np.where(mask==False)[0]
		np.put(X, predict_indicies, np.take(y, predict_indices))
	
		# Return model and scores
		return (model, scores)
Beispiel #12
0
def koskinon(n):
    flags = resize((0,1,0,0,0,1), (n**2,))
    put(flags, (0,2,3), 1)
    for i in arange(5,n,2):
        if flags[i]:
            flags[i*i::i] = 0
    return flatnonzero(flags)[2:]
Beispiel #13
0
def sortedlist(leng):
    counter=0
    aray=np.random.randint(1,1000,leng)
    for i in range(0,leng):
        ini=0
        ini1=1
        for i in aray:
            i2=aray[ini1]
            if i>i2:
                np.put(aray,ini1,i)
                np.put(aray,ini,i2)
                counter=counter+1
                print(aray)
                ini1=ini1+1
                ini=ini+1
                if ini1==len(aray):
                    break
            else:
                ini1=ini1+1
                ini=ini+1
                if ini1==len(aray):
                    break
    
    print"the number of shifts that occured are: ",counter-1
    return(aray)
    
def cluster_sanity(sres):

    def clusters_intersect(c1, c2):
        s1 = set( c1.voxels )
        s2 = set( c2.voxels )
        return len(s1.intersection(s2)) > 0

    mn_pt = 1e10
    mx_nt = -1e10
    g, m = calc_grid_and_map(sres.vox_idx)
    img = np.zeros(g)
    for i, clist in enumerate((sres.ptail_clusters, sres.ntail_clusters)):
        for t in xrange(sres.t.shape[1]):
            for f in xrange(sres.t.shape[2]):
                np.put(img, m, sres.t[:,t,f])
                c_tf = clist[t][f]
                for c in c_tf:
                    cvals = np.take(img, c.voxels)
                    if i==1 and cvals.max() > mx_nt:
                        mx_nt = cvals.max()
                    if i==0 and cvals.min() < mn_pt:
                        mn_pt = cvals.min()
                    
                if len(c_tf) > 1:
                    for c1, c2 in zip(c_tf[:-1], c_tf[1:]):
                        assert not clusters_intersect(c1, c2), \
                               'Cluster intersection at tf=(%d,%d)'%(t,f)
    print 'estimated ntail cutoff: %1.3f, estimated ptail cutoff: %1.3f'%(mx_nt, mn_pt)
 def fit_final_model(self):
     final_model = RandomForestClassifier(n_estimators = self.ntrees, criterion = self.criterion)
     ws = np.zeros(len(self.y))
     np.put(ws, np.nonzero(self.y == 1)[0], self.params["weight"])
     np.put(ws, np.nonzero(self.y == 0)[0], 1 - self.params["weight"])
     final_model.fit(self.X[:, self.params["var_subset"]], self.y, sample_weight = ws)
     return final_model
Beispiel #16
0
def clustercolors(X, labels, means):
	"""
	Put colors into the means given
	Parameters
	----------
	X : (n,d) ndarray
	labels : (d,) ndarray
	means : (n,k) ndarray

	Return
	------
	clustereddata (d,n) ndarray with colored by means data
	"""
	n,d = np.shape(X)
	nm,k = np.shape(means)
	clustereddata = np.zeros((n,d))
#	print clustereddata[:,labels == 0].shape
	print means[:,0]
	for temp in range(k):
#		clustereddata[:,labels == temp] = means[:,temp].flatten()
		print np.where(labels == temp)
		ind = np.where(labels == temp)
		for temp2 in range(n):
			np.put(clustereddata[temp2,:],ind,means[temp2,temp])

	return clustereddata
Beispiel #17
0
def tip_distances(a, bound_indices, tip_indices):
    """Sets each tip to its distance from the root."""
    for i, s in bound_indices:
        i += s
    mask = zeros(len(a))
    put(mask, tip_indices, 1)
    a *= mask[:,newaxis]
Beispiel #18
0
def gaussian_profile(N, phase, fwhm):
    """
    gaussian_profile(N, phase, fwhm):
        Return a gaussian pulse profile with 'N' bins and
        an integrated 'flux' of 1 unit.
            'N' = the number of points in the profile
            'phase' = the pulse phase (0-1)
            'fwhm' = the gaussian pulses full width at half-max
        Note:  The FWHM of a gaussian is approx 2.35482 sigma
    """
    sigma = fwhm / 2.35482
    mean = phase % 1.0
    phsval = np.arange(N, dtype='d') / float(N)
    if (mean < 0.5):
        phsval = np.where(np.greater(phsval, mean+0.5),
                           phsval-1.0, phsval)
    else:
        phsval = np.where(np.less(phsval, mean-0.5),
                           phsval+1.0, phsval)
    try:
        zs = (phsval-mean)/sigma
        okzinds = np.compress(np.fabs(zs)<20.0, np.arange(N))
        okzs = np.take(zs, okzinds)
        retval = np.zeros(N, 'd')
        np.put(retval, okzinds, np.exp(-0.5*(okzs)**2.0)/(sigma*np.sqrt(2*np.pi)))
        return retval
    except OverflowError:
        log.warning("Problem in gaussian prof:  mean = %f  sigma = %f" % \
              (mean, sigma))
        return np.zeros(N, 'd')
def acceleration_operator(function, speed, diff_order):
    diff1 = numeric_diff(function, diff_order)
    diff2 = numeric_mydiff(diff1, diff_order)
    result = np.zeros(function.size)
    np.put(result, np.arange(1, diff2.size), diff2)
    result = result*float(speed**2)
    return result
Beispiel #20
0
    def optimizer_array(self, p):
        """
        Make sure the optimizer copy does not get touched, thus, we only want to
        set the values *inside* not the array itself.

        Also we want to update param_array in here.
        """
        f = None
        if self.has_parent() and self.constraints[__fixed__].size != 0:
            f = np.ones(self.size).astype(bool)
            f[self.constraints[__fixed__]] = FIXED
        elif self._has_fixes():
            f = self._fixes_
        if f is None:
            self.param_array.flat = p
            [np.put(self.param_array, ind, c.f(self.param_array.flat[ind]))
             #py3 fix
             #for c, ind in self.constraints.iteritems() if c != __fixed__]
             for c, ind in self.constraints.items() if c != __fixed__]
        else:
            self.param_array.flat[f] = p
            [np.put(self.param_array, ind[f[ind]], c.f(self.param_array.flat[ind[f[ind]]]))
             #py3 fix
             #for c, ind in self.constraints.iteritems() if c != __fixed__]
             for c, ind in self.constraints.items() if c != __fixed__]
        #self._highest_parent_.tie.propagate_val()

        self._optimizer_copy_transformed = False
        self.trigger_update()
Beispiel #21
0
def G1DConnBiasedMutateWeights(genome, **args):
	## args['mutation_conns'] takes the list of indices of connections to be mutated 
	if not args['mutation_conns']:
		mutation_conn_indices=rand_sample(numpy.array(len(genome)-1),int(args['pmut']*len(genome)))
		#		print "Indices of mutation weights:"
		#		print mutation_conn_indices
	else:
		mutation_conn_indices = args['mutation_conns']
		#		print "Indices of mutation weights:"
		#		print mutation_conn_indices

	mu = genome.getParam("gauss_mu")
	sigma = genome.getParam("gauss_sigma")

	new_mutation_list=[]
	for it in mutation_conn_indices:
		final_value = genome['weight'][it] + rand_gauss(mu, sigma)

		#final_value = min(final_value, genome.getParam("rangemax", Consts.CdefRangeMax))
		#final_value = max(final_value, genome.getParam("rangemin", Consts.CdefRangeMin))

		new_mutation_list.append(final_value)
	
	numpy.put(genome['weight'],[mutation_conn_indices],[new_mutation_list])
		
	return genome
Beispiel #22
0
    def intersect(self, spec):
        """Intersect with the region specification.

        'spec' is a region specification of the form defined in the grid module.

        Returns (mask, indexspecs) where
        'mask' is the mask of the result grid AFTER self and region spec are interested.
        'indexspecs' is a dictionary of index specifications suitable for slicing a
          variable with the given grid.
        """

        ncell = self.shape
        index = self.getIndex()
        latspec = spec[CoordTypeToLoc[LatitudeType]]
        lonspec = spec[CoordTypeToLoc[LongitudeType]]
        latlin = numpy.ma.filled(self._lataxis_)
        lonlin = numpy.ma.filled(self._lonaxis_)
        lonlin = numpy.ma.where(numpy.ma.greater_equal(lonlin,360.0), lonlin-360.0, lonlin)
        points = bindex.intersectHorizontalGrid(latspec, lonspec, latlin, lonlin, index)
        if len(points)==0:
            raise CDMSError, 'No data in the specified region, longitude=%s, latitude=%s'%(`lonspec`, `latspec`)

        fullmask = numpy.ones(ncell)
        numpy.put(fullmask, points, 0)
        
        imin, imax  = (min(points), max(points)+1)
        submask = fullmask[imin:imax]

        cellid = self.getAxis(0).id
        indexspecs = {cellid:slice(imin,imax)}

        return submask, indexspecs
Beispiel #23
0
    def __init__(self, data) :

        if type(data) == type('') :
            print 'file name:', data            
            data = datafunc.PyVectorDataSet(data, idColumn = 0, headerRow = True, hint = 'csv')

        self.data = data
        self.idDict = misc.list2dict(data.labels.patternID,
                                     range(len(data)))

        print numpy.shape(data.X)
        self.mean = numpy.mean(data.X, 1)
        self.std = std(data.X, 1)
        eps = 1e-5
        I = numpy.nonzero(numpy.less(self.std, eps))[0]
        print 'num zeros:',len(I)
        numpy.put(self.std, I, 1)
        
        self.numCorrelations = 10000
        correlations = numpy.zeros(self.numCorrelations, numpy.float)
        
        for i in range(self.numCorrelations) :
            i1 = random.randrange(0, len(data))
            i2 = random.randrange(0, len(data))
            correlations[i] = self._corrcoef(i1, i2)
        self.meanCorrelation = numpy.mean(correlations)
        self.numCorrelations = 1000        
Beispiel #24
0
 def shift(x):
     x_shape = np.shape(x)        
     total_elements = x_shape[0] * x_shape[1]
     elements_to_roll = total_elements - (x_shape[1] * time_step)
     x = np.roll(AA(x, dtype=PRECISION_TO_TYPE[precision]), elements_to_roll)
     np.put(x, range(elements_to_roll, total_elements), default_value)
     return x
Beispiel #25
0
    def _untransform_params(self, x):
        """
        The transformation required for _set_params_transformed.

        This moves the vector x seen by the optimiser (unconstrained) to the
        valid parameter vector seen by the model

        Note:
          - This function is separate from _set_params_transformed for downstream flexibility
        """
        # work out how many places are fixed, and where they are. tricky logic!
        fix_places = self.fixed_indices + [t[1:] for t in self.tied_indices]
        if len(fix_places):
            fix_places = np.hstack(fix_places)
            Nfix_places = fix_places.size
        else:
            Nfix_places = 0

        free_places = np.setdiff1d(np.arange(Nfix_places + x.size, dtype=np.int), fix_places)

        # put the models values in the vector xx
        xx = np.zeros(Nfix_places + free_places.size, dtype=np.float64)

        xx[free_places] = x
        [np.put(xx, i, v) for i, v in zip(self.fixed_indices, self.fixed_values)]
        [np.put(xx, i, v) for i, v in [(t[1:], xx[t[0]]) for t in self.tied_indices] ]

        [np.put(xx, i, t.f(xx[i])) for i, t in zip(self.constrained_indices, self.constraints)]
        if hasattr(self, 'debug'):
            stop # @UndefinedVariable

        return xx
def testAntisymmetric(matrix):
    size = matrix.shape 
    if size[0] != size [1]:
        return False
    if size[0] == size[1]:

        inputArray = numpy.array(matrix)
        transposeArray = inputArray.T
        transposeMatrix = numpy.matrix(transposeArray)
        identityArray = numpy.identity(size[0])
        identityMatrix = numpy.matrix(identityArray)
        finalProduct = numpy.arange(size[0] ** 2)
        topVal = size[0] ** 2
        counter = 0
        
        while (counter < topVal):
            replaceVal = finalProduct.item(counter)
            if matrix.item(counter) == 1 and transposeMatrix.item(counter) == 1:
                numpy.put(finalProduct, [replaceVal], [1])
            else:
                numpy.put(finalProduct, [replaceVal], [0])
            counter += 1
            
        finalMatrix = numpy.matrix(finalProduct)
                
        
        if lessThanOrEqual(finalMatrix, identityMatrix, size[0]):
            return True
        return False
Beispiel #27
0
    def _add_ids(self, ids):
        n = len(ids)
        if n == 0:
            return

        id_max = max(ids)
        id_max_old = len(self._inds)-1
        n_array_old = len(self)

        ids_existing = np.take(ids, np.flatnonzero(np.less(ids, id_max_old)))
        # print '  ids',ids,'id_max_old',id_max_old,'ids_existing',ids_existing

        # check here if ids are still available
        # if np.sometrue(  np.not_equal( np.take(self._inds, ids_existing), -1)  ):
        #    print 'WARNING in create_ids: some ids already in use',ids_existing
        #    return np.zeros(0,int)

        # extend index map with -1 as necessary
        if id_max > id_max_old:
            # print 'ext',-1*ones(id_max-id_max_old)
            self._inds = np.concatenate((self._inds, -1*np.ones(id_max-id_max_old, int)))

        # assign n new indexes to new ids
        ind_new = np.arange(n_array_old, n_array_old+n, dtype=np.int32)

        # print 'ind_new',ind_new
        np.put(self._inds, ids, ind_new)

        # print '  concat ids..',self._ids,ids
        self._ids = np.concatenate((self._ids, ids))
Beispiel #28
0
    def expand( self, prof, mask, default ):
        """
        Expand profile to have a value also for masked positions.

        :param prof: input profile
        :type  prof: list OR array
        :param mask: atom mask
        :type  mask: [int]
        :param default: default value
        :type  default: any
        
        :return: profile
        :rtype: list OR array
        """
        if mask is not None:

            ## optimized variant for arrays
            if isinstance( prof, N.ndarray ):
                p = N.resize( prof, (len(mask), ) )
                p[:] = default
                N.put( p, N.nonzero( mask )[0], prof )
                return p

            p = [ default ] * len( mask )
            prof.reverse()
            for i in N.nonzero( mask )[0]:
                p[i] = prof.pop()
            return p

        return prof
    def python_metropolis(self):
        """Implentation of the Metropolis alogrithm."""
        energy = cy_potts_model.calculate_lattice_energy(self.lattice, self.lattice_size, self.bond_energy)
        magnetization = self.potts_order_parameter()
        for t in range(self.sweeps):
            # Measurement every sweep.
            np.put(self.energy_history, t, energy)
            np.put(self.magnetization_history, t, magnetization)
            for k in range(self.lattice_size**2):
                states = [0, 1, 2]
                # Pick a random location on the lattice.
                rand_y = np.random.randint(0, self.lattice_size)
                rand_x = np.random.randint(0, self.lattice_size)

                spin = self.lattice[rand_y, rand_x]  # Get spin at the random location.
                # Remove the state that the spin at the random location currently occupies.
                states.remove(spin)
                temp_lattice = copy.deepcopy(self.lattice)
                random_new_spin = np.random.choice(states)
                temp_lattice[rand_y, rand_x] = random_new_spin
                assert temp_lattice[rand_y, rand_x] != self.lattice[rand_y, rand_x]
                new_energy = cy_potts_model.calculate_lattice_energy(temp_lattice, self.lattice_size, self.bond_energy)
                energy_delta = new_energy - energy

                # Energy may always be lowered.
                if energy_delta <= 0:
                    acceptance_probability = 1
                # Energy is increased with probability proportional to Boltzmann distribution.
                else:
                    acceptance_probability = np.exp(-self.beta * energy_delta)
                if np.random.random() <= acceptance_probability:
                    # Flip the spin and change the energy.
                    self.lattice[rand_y, rand_x] = random_new_spin
                    energy += energy_delta
                    magnetization = self.potts_order_parameter()
Beispiel #30
0
 def test_get_strain_state_dict(self):
     strain_inds = [(0,), (1,), (2,), (1, 3), (1, 2, 3)]
     vecs = {}
     strain_states = []
     for strain_ind in strain_inds:
         ss = np.zeros(6)
         np.put(ss, strain_ind, 1)
         strain_states.append(tuple(ss))
         vec = np.zeros((4, 6))
         rand_values = np.random.uniform(0.1, 1, 4)
         for i in strain_ind:
             vec[:, i] = rand_values
         vecs[strain_ind] = vec
     all_strains = [Strain.from_voigt(v).zeroed() for vec in vecs.values()
                    for v in vec]
     random.shuffle(all_strains)
     all_stresses = [Stress.from_voigt(np.random.random(6)).zeroed()
                     for s in all_strains]
     strain_dict = {k.tostring():v for k,v in zip(all_strains, all_stresses)}
     ss_dict = get_strain_state_dict(all_strains, all_stresses, add_eq=False)
     # Check length of ss_dict
     self.assertEqual(len(strain_inds), len(ss_dict))
     # Check sets of strain states are correct
     self.assertEqual(set(strain_states), set(ss_dict.keys()))
     for strain_state, data in ss_dict.items():
         # Check correspondence of strains/stresses
         for strain, stress in zip(data["strains"], data["stresses"]):
             self.assertArrayAlmostEqual(Stress.from_voigt(stress), 
                                         strain_dict[Strain.from_voigt(strain).tostring()])
# Compute z-values from q-values
for hemi in hemis:
    for test in tests:
        results[hemi]['q_z({0})'.format(test)] = norm.ppf(1 - results[hemi]['q({0})'.format(test)])

# Stack all these up, reinsert medial wall, and save
for hemi in hemis:
    for test in tests:
        stack = []
        subtests = ['mean({0})'.format(test), 't({0})'.format(test),
                    'p({0})'.format(test), 'z({0})'.format(test),
                    'q({0})'.format(test), 'q_z({0})'.format(test)]
        for subtest in subtests:
            result = np.zeros(40962)
            np.put(result, cortical_vertices[hemi], results[hemi][subtest])
            stack.append(result)
        stack = np.vstack(stack)
        assert stack.shape == (6, 40962)
        mv.niml.write('group_diff_{0}.{1}.niml.dset'.format(test, hemi), stack)

# Compute proportion greater per subject
proportions = {}
for test in tests:
    proportions[test] = {}
    proportions[test]['all'] = (np.sum(np.hstack((
                                    runwise_diffs['lh']['mean({0})'.format(test)],
                                    runwise_diffs['rh']['mean({0})'.format(test)])) > 0, axis=1) /
                                float(len(cortical_vertices['lh']) + len(cortical_vertices['rh'])))
    proportions[test]['mean'] = np.mean(proportions[test]['all'])
    proportions[test]['t-value'], proportions[test]['p-value'] = ttest_1samp(proportions[test]['all'], .5)
Beispiel #32
0
    def make_weights(self):
        self.lat_ctr = self.lat
        self.lon_ctr = self.lon[self.lon.shape[0] // 2]
        print('lat ctr, lon ctr', self.lat_ctr, self.lon_ctr)
        lon1 = self.lon[np.abs(self.lon_ctr - self.lon) < self.params.lon_r]
        if self.lat.shape[0] % 2 == 0:
            indla = self.lat.shape[0] // 2
        else:
            indla = self.lat.shape[0] // 2 + 1
        Gauss_wts = np.full([lon1.shape[0], lon1.shape[0], indla], np.nan)
        lat_wts = np.full([lon1.shape[0], indla], np.nan)
        lon_wts = lon1

        print(' in make_weights', self.latd, indla, lat_wts.shape)
        # compute gaussian weights for the first half of the latitude grid
        for jj, la in enumerate(self.latd[0:indla]):
            print(
                'lat center = {:3.3f}, lon center = {:1.3f}'.format(
                    la, self.lon_ctr), jj, lat_wts.shape)
            lat1 = self.lat[np.abs(la - self.lat) < self.params.lat_r]
            #print(self.params.lat_r,lat1)
            np.put(lat_wts[:, jj], range(lat1.shape[0]), lat1)
            dd = np.array([
                distance.distance((lat1[j], lon1[i]), (la, self.lon_ctr)).m
                for j in range(len(lat1)) for i in range(len(lon1))
            ]).reshape(len(lat1), len(lon1))
            Gauss_wts[range(lat1.shape[0]), :,
                      jj] = 1 / (2 * np.pi * self.params.sigma**2) * np.exp(
                          -dd**2 / (2 * self.params.sigma**2))
            #dd_wts[range(lat1.shape[0]),:,jj] = dd
#        plt.figure(56);plt.clf()
#        plt.subplot(211)
#        plt.imshow(lat_wts,vmin=-80,vmax=80);plt.colorbar()
# concatenate with a mirror image for the second hemiphsere
        if self.lat.shape[0] % 2 == 0:
            lat_wts = np.concatenate(
                [lat_wts, -1 * np.flipud(np.fliplr(lat_wts))], axis=1)
            Gauss_wts = np.concatenate([
                Gauss_wts,
                np.moveaxis(
                    np.array([
                        np.flipud(Gauss_wts[:, :, jj])
                        for jj in range(indla - 1, -1, -1)
                    ]), 0, -1)
            ],
                                       axis=2)

        else:  # if lat vector has a length that is odd, HAS NOT BEEN CHECKED!!!!!
            print('you are in uncharted terr')
            lat_wts = np.concatenate(
                [lat_wts, -1 * np.flipud(np.fliplr(lat_wts[0:-1]))], axis=1)
            Gauss_wts = np.concatenate([
                Gauss_wts,
                np.moveaxis(
                    np.array([
                        np.flipud(Gauss_wts[:, :, jj])
                        for jj in range(indla - 2, -1, -1)
                    ]), 0, 1)
            ],
                                       axis=2)
            #dd_wts    = np.concatenate([dd_wts,np.moveaxis(np.array([np.flipud(dd_wts[:,:,jj]) for jj in range(indla-2,-1,-1)]),0,1)],axis=2)
#        print(Gauss_wts.shape,dd.shape)
#        plt.figure(56)
#        plt.subplot(212)
#        plt.imshow(lat_wts,vmin=-80,vmax=80);plt.colorbar()
#        plt.figure(55);plt.clf()
#        plt.imshow(Gauss_wts[:,:,90]);plt.colorbar()
#        plt.figure(54);plt.clf()
#        plt.imshow(dd);plt.colorbar()

#        class Object(object):
#            pass
#        Gauss = Object()
#        Gauss.wts = Gauss_wts
#        Gauss.lat_wts = lat_wts
#        Gauss.lon_wts = lon_wts
#        Gauss.lat_ctr = self.lat_ctr
#        Gauss.lon_ctr = self.lon_ctr
#        return  Gauss
        self.wts = Gauss_wts
        self.lat_wts = lat_wts
        self.lon_wts = lon_wts
        return self
Beispiel #33
0
def normalizeAlpha(df, taxaDict, mySet, factor):
    df2 = df.reset_index()
    taxaID = [
        'kingdomid', 'phylaid', 'classid', 'orderid', 'familyid', 'genusid',
        'speciesid'
    ]

    countDF = pd.DataFrame()
    if factor in ['min', '10th percentile', '25th percentile', 'median']:
        countDF = df2[taxaID].reset_index(drop=True)
        col_totals = np.array(df.sum(axis=0))

        reads = 0
        if factor == 'min':
            reads = int(np.min(col_totals))
        elif factor == '10th percentile':
            reads = int(np.percentile(col_totals, 10))
        elif factor == '25th percentile':
            reads = int(np.percentile(col_totals, 25))
        elif factor == 'median':
            reads = int(np.median(col_totals))

        for i in mySet:
            arr = asarray(df[i].T)
            cols = shape(arr)

            sample = arr.astype(dtype=np.float64)
            myLambda = 0.1

            #Lidstone's approximation
            prob = (sample + myLambda) / (sample.sum() + cols[0] * myLambda)

            final = np.zeros(cols)
            for x in xrange(reads):
                sub = np.random.mtrand.choice(range(sample.size),
                                              size=1,
                                              replace=True,
                                              p=prob)
                temp = np.zeros(cols)
                np.put(temp, sub, 1)
                final = np.add(final, temp)

            tempDF = pd.DataFrame(final, columns=[i])
            countDF = countDF.join(tempDF)
    elif factor == 'none':
        countDF = df2.reset_index(drop=True)

    relabundDF = countDF[taxaID]
    binaryDF = countDF[taxaID]
    diversityDF = countDF[taxaID]
    for i in mySet:
        relabundDF[i] = countDF[i].div(countDF[i].sum(axis=1), axis=0)
        binaryDF[i] = countDF[i].apply(lambda x: 1 if x != 0 else 0)
        diversityDF[i] = relabundDF[i].apply(lambda x: -1 * x * math.log(x)
                                             if x > 0 else 0)

    rowsList = []
    namesDF = pd.DataFrame()
    normDF = pd.DataFrame()
    for key in taxaDict:
        taxaList = taxaDict[key]

        if isinstance(taxaList, unicode):
            if key == 'Kingdom':
                qs1 = Kingdom.objects.filter(kingdomid=taxaList).values(
                    'kingdomid', 'kingdomName')
                namesDF = pd.DataFrame.from_records(
                    qs1, columns=['kingdomid', 'kingdomName'])
                namesDF.rename(columns={
                    'kingdomid': 'taxa_id',
                    'kingdomName': 'taxa_name'
                },
                               inplace=True)
            elif key == 'Phyla':
                qs1 = Phyla.objects.filter(phylaid=taxaList).values(
                    'phylaid', 'phylaName')
                namesDF = pd.DataFrame.from_records(
                    qs1, columns=['phylaid', 'phylaName'])
                namesDF.rename(columns={
                    'phylaid': 'taxa_id',
                    'phylaName': 'taxa_name'
                },
                               inplace=True)
            elif key == 'Class':
                qs1 = Class.objects.filter(classid=taxaList).values(
                    'classid', 'className')
                namesDF = pd.DataFrame.from_records(
                    qs1, columns=['classid', 'className'])
                namesDF.rename(columns={
                    'classid': 'taxa_id',
                    'className': 'taxa_name'
                },
                               inplace=True)
            elif key == 'Order':
                qs1 = Order.objects.filter(orderid=taxaList).values(
                    'orderid', 'orderName')
                namesDF = pd.DataFrame.from_records(
                    qs1, columns=['orderid', 'orderName'])
                namesDF.rename(columns={
                    'orderid': 'taxa_id',
                    'orderName': 'taxa_name'
                },
                               inplace=True)
            elif key == 'Family':
                qs1 = Family.objects.filter(familyid=taxaList).values(
                    'familyid', 'familyName')
                namesDF = pd.DataFrame.from_records(
                    qs1, columns=['familyid', 'familyName'])
                namesDF.rename(columns={
                    'familyid': 'taxa_id',
                    'familyName': 'taxa_name'
                },
                               inplace=True)
            elif key == 'Genus':
                qs1 = Genus.objects.filter(genusid=taxaList).values(
                    'genusid', 'genusName')
                namesDF = pd.DataFrame.from_records(
                    qs1, columns=['genusid', 'genusName'])
                namesDF.rename(columns={
                    'genusid': 'taxa_id',
                    'genusName': 'taxa_name'
                },
                               inplace=True)
            elif key == 'Species':
                qs1 = Species.objects.filter(speciesid=taxaList).values(
                    'speciesid', 'speciesName')
                namesDF = pd.DataFrame.from_records(
                    qs1, columns=['speciesid', 'speciesName'])
                namesDF.rename(columns={
                    'speciesid': 'taxa_id',
                    'speciesName': 'taxa_name'
                },
                               inplace=True)
        else:
            if key == 'Kingdom':
                qs1 = Kingdom.objects.filter(kingdomid__in=taxaList).values(
                    'kingdomid', 'kingdomName')
                namesDF = pd.DataFrame.from_records(
                    qs1, columns=['kingdomid', 'kingdomName'])
                namesDF.rename(columns={
                    'kingdomid': 'taxa_id',
                    'kingdomName': 'taxa_name'
                },
                               inplace=True)
            elif key == 'Phyla':
                qs1 = Phyla.objects.filter(phylaid__in=taxaList).values(
                    'phylaid', 'phylaName')
                namesDF = pd.DataFrame.from_records(
                    qs1, columns=['phylaid', 'phylaName'])
                namesDF.rename(columns={
                    'phylaid': 'taxa_id',
                    'phylaName': 'taxa_name'
                },
                               inplace=True)
            elif key == 'Class':
                qs1 = Class.objects.filter(classid__in=taxaList).values(
                    'classid', 'className')
                namesDF = pd.DataFrame.from_records(
                    qs1, columns=['classid', 'className'])
                namesDF.rename(columns={
                    'classid': 'taxa_id',
                    'className': 'taxa_name'
                },
                               inplace=True)
            elif key == 'Order':
                qs1 = Order.objects.filter(orderid__in=taxaList).values(
                    'orderid', 'orderName')
                namesDF = pd.DataFrame.from_records(
                    qs1, columns=['orderid', 'orderName'])
                namesDF.rename(columns={
                    'orderid': 'taxa_id',
                    'orderName': 'taxa_name'
                },
                               inplace=True)
            elif key == 'Family':
                qs1 = Family.objects.filter(familyid__in=taxaList).values(
                    'familyid', 'familyName')
                namesDF = pd.DataFrame.from_records(
                    qs1, columns=['familyid', 'familyName'])
                namesDF.rename(columns={
                    'familyid': 'taxa_id',
                    'familyName': 'taxa_name'
                },
                               inplace=True)
            elif key == 'Genus':
                qs1 = Genus.objects.filter(genusid__in=taxaList).values(
                    'genusid', 'genusName')
                namesDF = pd.DataFrame.from_records(
                    qs1, columns=['genusid', 'genusName'])
                namesDF.rename(columns={
                    'genusid': 'taxa_id',
                    'genusName': 'taxa_name'
                },
                               inplace=True)
            elif key == 'Species':
                qs1 = Species.objects.filter(speciesid__in=taxaList).values(
                    'speciesid', 'speciesName')
                namesDF = pd.DataFrame.from_records(
                    qs1, columns=['speciesid', 'speciesName'])
                namesDF.rename(columns={
                    'speciesid': 'taxa_id',
                    'speciesName': 'taxa_name'
                },
                               inplace=True)

        if key == 'Kingdom':
            rank = 'Kingdom'
            field = 'kingdomid'
        elif key == 'Phyla':
            rank = 'Phyla'
            field = 'phylaid'
        elif key == 'Class':
            rank = 'Class'
            field = 'classid'
        elif key == 'Order':
            rank = 'Order'
            field = 'orderid'
        elif key == 'Family':
            rank = 'Family'
            field = 'familyid'
        elif key == 'Genus':
            rank = 'Genus'
            field = 'genusid'
        elif key == 'Species':
            rank = 'Species'
            field = 'speciesid'

        for i in mySet:
            groupReads = countDF.groupby(field)[i].sum()
            groupAbund = relabundDF.groupby(field)[i].sum()
            groupRich = binaryDF.groupby(field)[i].sum()
            groupDiversity = diversityDF.groupby(field)[i].sum()
            if isinstance(taxaList, unicode):
                myDict = {}
                myDict['sampleid'] = i
                myDict['rank'] = rank
                myDict['taxa_id'] = taxaList
                myDict['count'] = groupReads[taxaList]
                myDict['total'] = groupReads.sum()
                myDict['rel_abund'] = groupAbund[taxaList]
                myDict['rich'] = groupRich[taxaList]
                myDict['diversity'] = groupDiversity[taxaList]
                rowsList.append(myDict)
            else:
                for j in taxaList:
                    myDict = {}
                    myDict['sampleid'] = i
                    myDict['rank'] = rank
                    myDict['taxa_id'] = j
                    myDict['count'] = groupReads[j]
                    myDict['total'] = groupReads.sum()
                    myDict['rel_abund'] = groupAbund[j]
                    myDict['rich'] = groupRich[j]
                    myDict['diversity'] = groupDiversity[j]
                    rowsList.append(myDict)
        DF1 = pd.DataFrame(rowsList,
                           columns=[
                               'sampleid', 'rank', 'taxa_id', 'count', 'total',
                               'rel_abund', 'rich', 'diversity'
                           ])
        DF1 = DF1.merge(namesDF, on='taxa_id', how='outer')
        DF1 = DF1[[
            'sampleid', 'rank', 'taxa_id', 'taxa_name', 'count', 'total',
            'rel_abund', 'rich', 'diversity'
        ]]
        if normDF.empty:
            normDF = DF1
        else:
            normDF = normDF.append(DF1)
    return normDF
Beispiel #34
0
    def _apply_second_pass(
        self,
        waveforms,
        telid,
        selected_gain_channel,
        charge_1stpass,
        pulse_time_1stpass,
        correction,
    ):
        """
        Follow steps from 2 to 7.

        Parameters
        ----------
        waveforms : array of shape (N_pixels, N_samples)
            DL0-level waveforms of one event.
        telid : int
            Index of the telescope.
        selected_gain_channel: array of shape (N_channels, N_pixels)
            Array containing the index of the selected gain channel for each
            pixel (0 for low gain, 1 for high gain).
        charge_1stpass : array of shape N_pixels
            Pixel charges reconstructed with the 1st pass, but not corrected.
        pulse_time_1stpass : array of shape N_pixels
            Pixel-wise pulse times reconstructed with the 1st pass.
        correction: array of shape N_pixels
            Charge correction from 1st pass.

        Returns
        -------
        charge : array_like
            Integrated charge per pixel.
            Note that in the case of a very bright full-camera image this can
            coincide the 1st pass information.
            Also in the case of very dim images the 1st pass will be recycled,
            but in this case the resulting image should be discarded
            from further analysis.
            Shape: (n_pix)
        pulse_time : array_like
            Samples in which the waveform peak has been recognized.
            Same specifications as above.
            Shape: (n_pix)
        """
        # STEP 2

        # Apply correction to 1st pass charges
        charge_1stpass = charge_1stpass * correction

        # Set thresholds for core-pixels depending on telescope
        core_th = self.core_threshold.tel[telid]
        # Boundary thresholds will be half of core thresholds.

        # Preliminary image cleaning with simple two-level tail-cut
        camera_geometry = self.subarray.tel[telid].camera.geometry
        mask_1 = tailcuts_clean(
            camera_geometry,
            charge_1stpass,
            picture_thresh=core_th,
            boundary_thresh=core_th / 2,
            keep_isolated_pixels=False,
            min_number_picture_neighbors=1,
        )
        image_1 = charge_1stpass.copy()
        image_1[~mask_1] = 0

        # STEP 3

        # find all islands using this cleaning
        num_islands, labels = number_of_islands(camera_geometry, mask_1)
        if num_islands == 0:
            image_2 = image_1.copy()  # no islands = image unchanged
        else:
            # ...find the biggest one
            mask_biggest = largest_island(labels)
            image_2 = image_1.copy()
            image_2[~mask_biggest] = 0

        # Indexes of pixels that will need the 2nd pass
        nonCore_pixels_ids = np.where(image_2 < core_th)[0]
        nonCore_pixels_mask = image_2 < core_th

        # STEP 4

        # if the resulting image has less then 3 pixels
        # or there are more than 3 pixels but all contain a number of
        # photoelectrons above the core threshold
        if np.count_nonzero(image_2) < 3:
            # we return the 1st pass information
            # NOTE: In this case, the image was not bright enough!
            # We should label it as "bad and NOT use it"
            return charge_1stpass, pulse_time_1stpass
        elif len(nonCore_pixels_ids) == 0:
            # Since all reconstructed charges are above the core threshold,
            # there is no need to perform the 2nd pass.
            # We return the 1st pass information.
            # NOTE: In this case, even if this is 1st pass information,
            # the image is actually very bright! We should label it as "good"!
            return charge_1stpass, pulse_time_1stpass

        # otherwise we proceed by parametrizing the image
        hillas = hillas_parameters(camera_geometry, image_2)

        # STEP 5

        # linear fit of pulse time vs. distance along major image axis
        # using only the main island surviving the preliminary
        # image cleaning
        # WARNING: in case of outliers, the fit can perform better if
        # it is a robust algorithm.
        timing = timing_parameters(camera_geometry, image_2,
                                   pulse_time_1stpass, hillas)

        # get projected distances along main image axis
        long, _ = camera_to_shower_coordinates(
            camera_geometry.pix_x,
            camera_geometry.pix_y,
            hillas.x,
            hillas.y,
            hillas.psi,
        )

        # get the predicted times as a linear relation
        predicted_pulse_times = (timing.slope * long[nonCore_pixels_ids] +
                                 timing.intercept)

        predicted_peaks = np.zeros(len(predicted_pulse_times))

        # Convert time in ns to sample index using the sampling rate from
        # the readout.
        # Approximate the value obtained to nearest integer, then cast to
        # int64 otherwise 'extract_around_peak' complains.
        sampling_rate = self.sampling_rate[telid]
        np.rint(predicted_pulse_times.value * sampling_rate, predicted_peaks)
        predicted_peaks = predicted_peaks.astype(np.int64)

        # Due to the fit these peak indexes can now be also outside of the
        # readout window, so later we check for this.

        # STEP 6

        # select only the waveforms correspondent to the non-core pixels
        # of the main island survived from the 1st pass image cleaning
        nonCore_waveforms = waveforms[nonCore_pixels_ids]

        # Build 'width' and 'shift' arrays that adapt on the position of the
        # window along each waveform

        # Now the definition of peak_index is really the peak.
        # We have to add 2 samples each side, so the shist will always
        # be (-)2, while width will always end 4 samples to the right.
        # This "always" refers to a 5-samples window of course
        window_widths = np.full_like(predicted_peaks, 4, dtype=np.int64)
        window_shifts = np.full_like(predicted_peaks, 2, dtype=np.int64)

        # BUT, if the resulting 5-samples window falls outside of the readout
        # window then we take the first (or last) 5 samples
        window_widths[predicted_peaks < 0] = 4
        window_shifts[predicted_peaks < 0] = 0
        window_widths[predicted_peaks > (waveforms.shape[1] - 1)] = 4
        window_shifts[predicted_peaks > (waveforms.shape[1] - 1)] = 4

        # Now we can also (re)define the patological predicted times
        # because (we needed them to define the corrispective widths
        # and shifts)

        # set sample to 0 (beginning of the waveform) if predicted time
        # falls before
        predicted_peaks[predicted_peaks < 0] = 0
        # set sample to max-1 (first sample has index 0)
        # if predicted time falls after
        predicted_peaks[predicted_peaks > (waveforms.shape[1] - 1)] = (
            waveforms.shape[1] - 1)

        # re-calibrate non-core pixels using the fixed 5-samples window
        charge_noCore, pulse_times_noCore = extract_around_peak(
            nonCore_waveforms,
            predicted_peaks,
            window_widths,
            window_shifts,
            self.sampling_rate[telid],
        )

        # Modify integration correction factors only for non-core pixels
        correction_2ndPass = self._calculate_correction(
            telid,
            window_widths,
            window_shifts,
            selected_gain_channel[nonCore_pixels_ids],
        )
        np.put(correction, [nonCore_pixels_ids], correction_2ndPass)

        # STEP 7

        # Combine core and non-core pixels in the final output

        # this is the biggest cluster from the cleaned image
        # it contains the core pixels (which we leave untouched)
        # plus possibly some non-core pixels
        charge_2ndpass = image_2.copy()
        # Now we overwrite the charges of all non-core pixels in the camera
        # plus all those pixels which didn't survive the preliminary
        # cleaning.
        # We apply also their corrections.
        charge_2ndpass[
            nonCore_pixels_mask] = charge_noCore * correction_2ndPass

        # Same approach for the pulse times
        pulse_time_2npass = pulse_time_1stpass  # core + non-core pixels
        pulse_time_2npass[
            nonCore_pixels_mask] = pulse_times_noCore  # non-core pixels

        return charge_2ndpass, pulse_time_2npass
Beispiel #35
0
 def dimensions(self, box):
     x, y, _ = triclinic_vectors(box)
     np.put(self._unitcell, self._ts_order_x, x)
     np.put(self._unitcell, self._ts_order_y, y)
Beispiel #36
0
def _int_to_tag(tag_int, tag_vocab_size):
    # creates the one-hot vector
    a = np.empty(tag_vocab_size)
    a.fill(0)
    np.put(a, tag_int, 1)
    return a
Beispiel #37
0
#Now describe the DM - this is for the GUI only, not the RTC.
#The format is: ndms, N for each DM, actuator numbers...
#Where ndms is the number of DMs, N is the number of linear actuators for each, and the actuator numbers are then an array of size NxN with entries -1 for unused actuators, or the actuator number that will set this actuator in the DMC array.
dmDescription = numpy.zeros((8 * 8 + 2 * 2 + 2 + 1, ), numpy.int16)
dmDescription[0] = 2  #1 DM
dmDescription[1] = 2  #1st DM has 2 linear actuators
dmDescription[2] = 8  #1st DM has nacts linear actuators
tmp = dmDescription[3:7]
tmp[:] = -1
tmp[:2] = [52, 53]  #tip/tilt
tmp = dmDescription[7:]
tmp[:] = -1
tmp.shape = 8, 8
dmflag = tel.Pupil(8, 4, 0).fn.ravel()
numpy.put(tmp, dmflag.nonzero()[0], numpy.arange(52))

control = {
    "switchRequested":
    0,  #this is the only item in a currently active buffer that can be changed...
    "pause": 0,
    "go": 1,
    #"DMgain":0.25,
    #"staticTerm":None,
    "maxClipped": nacts,
    "refCentroids": None,
    #"dmControlState":0,
    #"gainReconmxT":None,#numpy.random.random((ncents,nacts)).astype("f"),#reconstructor with each row i multiplied by gain[i].
    #"dmPause":0,
    #"reconMode":"closedLoop",
    #"applyPxlCalibration":0,
Beispiel #38
0
def get_strain_state_dict(strains,
                          stresses,
                          eq_stress=None,
                          tol=1e-10,
                          add_eq=True,
                          sort=True):
    """
    Creates a dictionary of voigt-notation stress-strain sets
    keyed by "strain state", i. e. a tuple corresponding to
    the non-zero entries in ratios to the lowest nonzero value,
    e.g. [0, 0.1, 0, 0.2, 0, 0] -> (0,1,0,2,0,0)
    This allows strains to be collected in stencils as to
    evaluate parameterized finite difference derivatives

    Args:
        strains (Nx3x3 array-like): strain matrices
        stresses (Nx3x3 array-like): stress matrices
        eq_stress (Nx3x3 array-like): equilibrium stress
        tol (float): tolerance for sorting strain states
        add_eq (bool): flag for whether to add eq_strain
            to stress-strain sets for each strain state
        sort (bool): flag for whether to sort strain states

    Returns:
        OrderedDict with strain state keys and dictionaries
        with stress-strain data corresponding to strain state
    """
    # Recast stress/strains
    vstrains = np.array([Strain(s).zeroed(tol).voigt for s in strains])
    vstresses = np.array([Stress(s).zeroed(tol).voigt for s in stresses])
    # Collect independent strain states:
    independent = {
        tuple(np.nonzero(vstrain)[0].tolist())
        for vstrain in vstrains
    }
    strain_state_dict = OrderedDict()
    if add_eq:
        if eq_stress is not None:
            veq_stress = Stress(eq_stress).voigt
        else:
            veq_stress = find_eq_stress(strains, stresses).voigt

    for n, ind in enumerate(independent):
        # match strains with templates
        template = np.zeros(6, dtype=bool)
        np.put(template, ind, True)
        template = np.tile(template, [vstresses.shape[0], 1])
        mode = (template == (np.abs(vstrains) > 1e-10)).all(axis=1)
        mstresses = vstresses[mode]
        mstrains = vstrains[mode]
        # Get "strain state", i.e. ratio of each value to minimum strain
        min_nonzero_ind = np.argmin(np.abs(np.take(mstrains[-1], ind)))
        min_nonzero_val = np.take(mstrains[-1], ind)[min_nonzero_ind]
        strain_state = mstrains[-1] / min_nonzero_val
        strain_state = tuple(strain_state)

        if add_eq:
            # add zero strain state
            mstrains = np.vstack([mstrains, np.zeros(6)])
            mstresses = np.vstack([mstresses, veq_stress])
        # sort strains/stresses by strain values
        if sort:
            mstresses = mstresses[mstrains[:, ind[0]].argsort()]
            mstrains = mstrains[mstrains[:, ind[0]].argsort()]
        strain_state_dict[strain_state] = {
            "strains": mstrains,
            "stresses": mstresses
        }
    return strain_state_dict
Beispiel #39
0
def getTopReco(scoreNarray, n, removeIndexList):
    np.put(scoreNarray, removeIndexList, [-1])
    indices = np.argpartition(scoreNarray, -n)[-n:]
    indices = indices[np.argsort(-scoreNarray[indices])]
    scores = scoreNarray[indices]
    return list(zip(indices, scores))
Beispiel #40
0
    print(index, value)

# In[146]:

for index in np.ndindex(Z.shape):
    print(index, Z[index])

# #### 2차원 배열에 무작위로 집어넣기

# In[149]:

n = 5
p = 3
Z = np.zeros((n, n))
position = np.random.choice(range(n * n), p, replace=False)
print(position)
np.put(Z, position, 1)
print(Z)

# #### 가까운 값 찾기

# In[150]:

Z = np.random.uniform(0, 1, 10)
print("Z배열\n", Z)
target = 0.5
print("목표 값과의 차이\n", np.abs(Z - target))
print("가장 근접한 값의 위치\n", np.abs(Z - target).argmin())
m = Z[np.abs(Z - target).argmin()]
print("그 값\n", m)
Beispiel #41
0
    def get_input_representation(self, words, pos, state):

        input_repr = np.zeros(6)

        if len(state.stack) >= 3:

            for i in range(1, 4):

                word_ind = state.stack[-i]

                word = words[word_ind]

                if word == None:
                    inx = self.word_vocab['<ROOT>']

                    np.put(input_repr, [i - 1], [inx])

                elif pos[word_ind] == 'CD':
                    inx = self.word_vocab['<CD>']

                    np.put(input_repr, [i - 1], [inx])

                elif pos[word_ind] == 'NNP':
                    inx = self.word_vocab['<NNP>']

                    np.put(input_repr, [i - 1], [inx])

                else:

                    if word.lower() in self.word_vocab:

                        inx = self.word_vocab[word.lower()]

                        np.put(input_repr, [i - 1], [inx])

                    else:
                        inx = self.word_vocab['<UNK>']

                        np.put(input_repr, [i - 1], [inx])

        else:

            c = 0

            for i in range(1, len(state.stack) + 1):

                word_ind = state.stack[-i]

                word = words[word_ind]

                if word == None:
                    inx = self.word_vocab['<ROOT>']
                    np.put(input_repr, [i - 1], [inx])

                elif pos[word_ind] == 'CD':
                    inx = self.word_vocab['<CD>']
                    np.put(input_repr, [i - 1], [inx])

                elif pos[word_ind] == 'NNP':
                    inx = self.word_vocab['<NNP>']
                    np.put(input_repr, [i - 1], [inx])

                else:

                    if word.lower() in self.word_vocab:

                        inx = self.word_vocab[word.lower()]

                        np.put(input_repr, [i - 1], [inx])

                    else:
                        inx = self.word_vocab['<UNK>']
                        np.put(input_repr, [i - 1], [inx])

                c += 1

            if c == 1:
                inx = self.word_vocab['<NULL>']
                np.put(input_repr, [1, 2], [inx, inx])

            elif c == 2:
                inx = self.word_vocab['<NULL>']
                np.put(input_repr, [2], [inx])

            else:
                inx = self.word_vocab['<NULL>']
                np.put(input_repr, [0, 1, 2], [inx, inx, inx])

        if len(state.buffer) >= 3:

            for i in range(1, 4):

                word_ind = state.buffer[-i]

                word = words[word_ind]

                if word == None:
                    inx = self.word_vocab['<ROOT>']
                    np.put(input_repr, [i + 2], [inx])

                elif pos[word_ind] == 'CD':
                    inx = self.word_vocab['<CD>']
                    np.put(input_repr, [i + 2], [inx])

                elif pos[word_ind] == 'NNP':
                    inx = self.word_vocab['<NNP>']
                    np.put(input_repr, [i + 2], [inx])

                else:

                    if word.lower() in self.word_vocab:

                        inx = self.word_vocab[word.lower()]

                        np.put(input_repr, [i + 2], [inx])

                    else:
                        inx = self.word_vocab['<UNK>']
                        np.put(input_repr, [i + 2], [inx])

        else:

            c = 0

            for i in range(1, len(state.buffer) + 1):

                word_ind = state.buffer[-i]

                word = words[word_ind]

                if word == None:
                    inx = self.word_vocab['<ROOT>']
                    np.put(input_repr, [i + 2], [inx])

                elif pos[word_ind] == 'CD':
                    inx = self.word_vocab['<CD>']
                    np.put(input_repr, [i + 2], [inx])

                elif pos[word_ind] == 'NNP':
                    inx = self.word_vocab['<NNP>']
                    np.put(input_repr, [i + 2], [inx])

                else:

                    if word.lower() in self.word_vocab:

                        inx = self.word_vocab[word.lower()]

                        np.put(input_repr, [i + 2], [inx])

                    else:
                        inx = self.word_vocab['<UNK>']
                        np.put(input_repr, [i + 2], [inx])

                c += 1

            if c == 1:
                inx = self.word_vocab['<NULL>']
                np.put(input_repr, [4, 5], [inx, inx])

            elif c == 2:
                inx = self.word_vocab['<NULL>']
                np.put(input_repr, [5], [inx])

            else:
                inx = self.word_vocab['<NULL>']
                np.put(input_repr, [3, 4, 5], [inx, inx, inx])

        return input_repr
Beispiel #42
0
def extract_faces(data_dir):
    preprocessed_files = [
        '/faces_train.npy', '/faces_train_labels.npy', '/faces_test.npy',
        '/faces_test_labels.npy'
    ]

    all_preprocessed = True
    for file in preprocessed_files:
        if not tf.gfile.Exists(data_dir + file):
            all_preprocessed = False
            break

    # if False:
    if all_preprocessed:
        # Reload pre-processed training data from numpy dumps
        train_data = np.load(data_dir + preprocessed_files[0])
        train_labels = np.load(data_dir + preprocessed_files[1])

        # Reload pre-processed testing data from numpy dumps
        test_data = np.load(data_dir + preprocessed_files[2])
        test_labels = np.load(data_dir + preprocessed_files[3])

    else:
        # Do everything from scratch
        # Define lists of all files we should extract
        train_files = glob.iglob("/home/Cooper/faces95/*")

        # Load training images and labels
        images = []
        labels = []
        test_images = []
        test_labels = []
        for label, person in enumerate(train_files):
            pictures = list(glob.iglob(person + "/*"))
            for picture in pictures[0:15]:
                images.append(resize(misc.imread(picture), (30, 30)))
                labels.append(label)
            for picture in pictures[15:20]:
                test_images.append(resize(misc.imread(picture), (30, 30)))
                test_labels.append(label)
        p = np.random.permutation(len(images))
        images = np.put(np.zeros(len(images)), p, images)
        labels = np.put(np.zeros(len(images)), p, labels)
        # Convert to numpy arrays and reshape in the expected format
        train_data = np.asarray(images, dtype=np.float32).reshape(
            (72 * 15, 3, 30, 30))
        train_data = np.swapaxes(train_data, 1, 3)
        train_labels = np.asarray(labels, dtype=np.int32).reshape(72 * 15)

        # Save so we don't have to do this again
        np.save(data_dir + preprocessed_files[0], train_data)
        np.save(data_dir + preprocessed_files[1], train_labels)

        # Convert to numpy arrays and reshape in the expected format
        test_data = np.asarray(test_images, dtype=np.float32).reshape(
            (72 * 5, 3, 30, 30))
        test_data = np.swapaxes(test_data, 1, 3)
        test_labels = np.asarray(test_labels, dtype=np.int32).reshape(72 * 5)

        # Save so we don't have to do this again
        np.save(data_dir + preprocessed_files[2], test_data)
        np.save(data_dir + preprocessed_files[3], test_labels)

    return train_data, train_labels, test_data, test_labels
Beispiel #43
0
 def idx2vec(idx):
     a = np.zeros([1, self.num_states_])
     np.put(a, idx, 1)
     return a
def build_image(number, average_color, image_being_built):
    img = numpy.zeros((OUTPUT_HEIGHT, 1, 3), numpy.uint8)

    first = 0
    second = 1
    third = 2
    # normally just put the average color into the array
    while third < (OUTPUT_BUCKET_CAP - 12):
        numpy.put(img, [first, second, third], average_color)
        first += 3
        second += 3
        third += 3
    # stopped four short, add 2 pixels every major, 4 pixels every minor
    if (number % MAJOR_FRAMEMODULO) == 0:
        while third < (OUTPUT_BUCKET_CAP - 3):
            numpy.put(img, [first, second, third], MARK_COLOR)
            first += 3
            second += 3
            third += 3
        numpy.put(img, [first, second, third], MARK_COLOR)
    elif (number % MINOR_FRAMEMODULO) == 0:
        while third < (OUTPUT_BUCKET_CAP - 12):
            numpy.put(img, [first, second, third], average_color)
            first += 3
            second += 3
            third += 3
        numpy.put(img, [first, second, third], MARK_COLOR)
        first += 3
        second += 3
        third += 3
        numpy.put(img, [first, second, third], MARK_COLOR)
    else:
        while third < OUTPUT_BUCKET_CAP:
            numpy.put(img, [first, second, third], average_color)
            first += 3
            second += 3
            third += 3

    if image_being_built is None:
        image_being_built = img
    else:
        image_being_built = cv2.hconcat([image_being_built, img])

    return image_being_built
Beispiel #45
0
 def get_x_pos(self, z):
     x0 = self.x1 - self.tx * self.z1
     suggestions = x0 + z * np.sin(self.tx)
     # replace position before the track starts with a large number
     np.put(suggestions, np.where(z < self.z1), 999000)
     return suggestions
Beispiel #46
0
 def toNumeric(self, data):
     numericLabel = np.unique(data)
     data_new = data
     for i in range(0, len(numericLabel)):
         np.put(data_new, np.where(data == numericLabel[i]), [i])
     return data_new, numericLabel
Beispiel #47
0
def unfold_population(freq, bin_edges, binsize, mid_points, normalize=True):
    """ Applies a Scheil-Schwartz-Saltykov-type method to unfold the population
    of apparent (2D) diameters into the actual (3D) population of grain sizes.
    Following the reasoning of Higgins (2000), R (or D) is placed at the center
    of the classes (i.e. the midpoints).

    Reference
    ----------
    Higgins (2000) doi:10.2138/am-2000-8-901

    Parameters
    ----------
    freq : array_like
        frequency values of the different classes

    bin_edges : array_like
        the edges of the classes

    mid_points : array_like
        the midpoints of the classes

    normalize : boolean, optional
        when True negative frequency values are set to zero and the
        distribution normalized. True by default.

    Call function
    -------------
    - wicksell_eq

    Returns
    -------
    The normalized frequencies of the unfolded population such that the integral
    over the range is one. If normalize is False the raw frequencies of the
    unfolded population.
    """

    d_values = np.copy(bin_edges)
    midpoints = np.copy(mid_points)
    i = len(midpoints) - 1

    while i > 0:
        j = i
        D = d_values[-1]
        Pi = wicksell_solution(D, d_values[i], d_values[i + 1])

        if freq[i] > 0:
            while j > 0:
                D = midpoints[-1]
                Pj = wicksell_solution(D, d_values[j - 1], d_values[j])
                P_norm = (Pj * freq[i]) / Pi
                np.put(freq, j - 1, freq[j - 1] -
                       P_norm)  # replace specified elements of an array
                j -= 1

            i -= 1
            d_values = delete(d_values, -1)
            midpoints = delete(midpoints, -1)

        # if the value of the current class is zero or negative move to the next class
        else:
            i -= 1
            d_values = delete(d_values, -1)
            midpoints = delete(midpoints, -1)

    if normalize is True:
        freq = np.clip(freq, 0., 2**20)  # replacing negative values with zero
        freq_norm = freq / sum(freq)  # normalize to one
        freq_norm = freq_norm / binsize  # normalize such that the integral over the range is one
        return freq_norm

    else:
        return freq
Beispiel #48
0
 def get_y_pos(self, z):
     y0 = self.y1 - self.ty * self.z1
     suggestions = y0 + z * np.sin(self.ty)
     # replace position before the track starts with a large number
     np.put(suggestions, np.where(z < self.z1), 999000)
     return suggestions
Beispiel #49
0
def get_entropy(predicted_probs):
    entropy = np.log(predicted_probs) * predicted_probs
    nan_indexes = np.where(np.isnan(entropy))[0]
    np.put(entropy, nan_indexes, 0)
    return entropy
Beispiel #50
0
def show_webcam(cam):

    #cv2.namedWindow("roast", cv2.WINDOW_NORMAL)
    #cv2.resizeWindow("roast", ROLLING_WINDOW_WIDTH, OUTPUT_HEIGHT)

    time.sleep(2)
    #framerate = cam.set(cv2.CAP_PROP_FPS, FRAMES_PER_SECOND)
    #cam.set(3, 1600)
    #cam.set(4, 1200)

    start_time = time.time()

    frame_time = (1.0 / FRAMES_PER_SECOND)

    frame_time_millis = int((frame_time * 1000))
    next_frame_time = start_time + frame_time

    full_image = None

    number = 0

    while True:
        try:
            #waiting until at least next frame time
            now = time.time()
            if now < next_frame_time:
                time.sleep(next_frame_time - now)

            next_frame_time = start_time + ((number + 1) * frame_time)

            ret_val, orig_img = cam.read()
            if not ret_val:
                print("trouble connecting to %s" % (cam))
                break
            #cropped_img = orig_img[360:640, 720:1280]

#for 1600x1200
#cropped_img = orig_img[300:900, 400:1200]

#for 1024x768
            cropped_img = orig_img[192:576, 256:768]
            cv2.imshow("CROP", cropped_img)

            avg_color_per_row = numpy.average(cropped_img, axis=0)
            avg_color = numpy.average(avg_color_per_row, axis=0)
            img = numpy.zeros((OUTPUT_HEIGHT, 1, 3), numpy.uint8)

            first = 0
            second = 1
            third = 2

            while third < (OUTPUT_BUCKET_CAP - 12):
                numpy.put(img, [first, second, third], avg_color)
                first += 3
                second += 3
                third += 3

#stopped four short, add 2 black pixels every major, 4 black pixels every minor

            if (number % MAJOR_FRAMEMODULO) == 0:
                while third < OUTPUT_BUCKET_CAP:
                    numpy.put(img, [first, second, third], MARK_COLOR)
                    first += 3
                    second += 3
                    third += 3
            elif (number % MINOR_FRAMEMODULO) == 0:
                while third < (OUTPUT_BUCKET_CAP - 6):
                    numpy.put(img, [first, second, third], avg_color)
                    first += 3
                    second += 3
                    third += 3
                numpy.put(img, [first, second, third], MARK_COLOR)
                first += 3
                second += 3
                third += 3
                numpy.put(img, [first, second, third], MARK_COLOR)
            else:
                while third < OUTPUT_BUCKET_CAP:
                    numpy.put(img, [first, second, third], avg_color)
                    first += 3
                    second += 3
                    third += 3

            if number == 0:
                full_image = img
            else:
                full_image = cv2.hconcat([full_image, img])

            number += 1

            #see if we can show this to the user
            #THIS CAN BE IMPROVED BY DEFINING THE SPACE AHEAD OF TIME I THINK

            if number > ROLLING_WINDOW_WIDTH:
                startx = number - ROLLING_WINDOW_WIDTH
                endx = number
                partial_image = full_image[0:OUTPUT_HEIGHT, startx:endx]
            else:
                partial_image = full_image

            cv2.imshow("roast", partial_image)
            cv2.waitKey(1)
        except KeyboardInterrupt:
            end_datetime = datetime.now().strftime("%Y.%m.%d.%H%M%S")
            end_time = time.time()
            DURATION = int(round(end_time - start_time))
            if DURATION > 60:
                mins = DURATION / 60
                remain = DURATION % 60
                DURATION = str(mins) + "Min" + str(remain)
            filename = "./%s-%sSec-%sFPS.png" % (end_datetime, DURATION,
                                                 FRAMES_PER_SECOND)
            cv2.imwrite(filename, full_image)
            break

    #SHOW IS OVER
    #DO SOMETHING TO INDICATE WE ARE DONE RECORDING BUT ALSO JUST SHOW THE THING
    #time.sleep(5)
    cv2.destroyAllWindows()
Beispiel #51
0
def overlayfiles(ncfilein1, ncfilein2, ncfileout):
    with Dataset(ncfilein1) as src1, Dataset(ncfilein2) as src2, Dataset(ncfileout, "w") as dst:
   
        # dimensions in common to be merge/overlay
        idx1={}
        idx2={}
        udims = {}
        for name, dimension in src1.dimensions.items():
            if (name in src2.dimensions.keys()):
                dimension2 = src2.dimensions[name]
                if name in src1.variables.keys() and name in src2.variables.keys(): # overlay or merge
                    d1 = np.asarray(src1[name])
                    d2 = np.asarray(src2[name])
                    d12 = np.union1d(d1, d2)
                    idx1[name] = np.asarray(np.where(np.isin(d12, d1))[0]) # indexing of d1 in d12, but converted to integer array
                    idx2[name] = np.asarray(np.where(np.isin(d12, d2))[0])
                    len_dimension = len(d12)
                else:
                    len_dimension = len(dimension) + len(dimension2)
            else:
                len_dimension = len(dimension)

            if dimension.isunlimited(): # save current unlimited dim length for using below
                udims[name] = len_dimension
            
            dst.createDimension(name, len_dimension if not dimension.isunlimited() else None)
        
        #   
        # copy all file data except for matched-up variables, which do merge/overlay
        for name, variable in src1.variables.items():

            # create variables, but will update its values later 
            # NOTE: here the variable value size updated due to newly-created dimensions above
            dst.createVariable(name, variable.datatype, variable.dimensions)
            
            # copy variable attributes all at once via dictionary after created
            dst[name].setncatts(src1[name].__dict__)

            #
            varvals1 = np.copy(src1[name][...])
            tmp = varvals1
            
            
            if (name in src2.variables.keys()) and (not dst[name].dtype==np.str):
                varvals2=np.copy(src2[name][...])

                # nan-filled 'dst[name]' array for putting data from src1/src2
                dst_shape = np.asarray(dst[name].shape)
                for udim in udims.keys():
                    if udim in variable.dimensions:
                        udim_idx = variable.dimensions.index(udim)
                        # unlimited length in 'dst_shape' from dst is default as 0
                        # must assign current length, otherwise 'tmp1'/'tmp2' below is incorrect in shape
                        dst_shape[udim_idx] = udims[udim]
                temp1 = np.full(dst_shape, np.nan, dtype=dst[name].dtype) # nan-filled array for src1
                temp2 = np.full(dst_shape, np.nan, dtype=dst[name].dtype) # nan-filled array for src2
                
                vdim = np.asarray(variable.dimensions)
                temp_indx1 = np.asarray(np.where(np.isnan(temp1)))
                temp_indx2 = np.asarray(np.where(np.isnan(temp2))) # whole-set multi-tuples indices
                for i in range(len(vdim)):
                    if vdim[i] in idx1.keys():  # dimension to be merge/overlay
                        idx = np.asarray(np.where(np.isin(temp_indx1[i],idx1[vdim[i]])))
                        temp_indx1 = temp_indx1[:,np.squeeze(idx)]
                        idx = np.asarray(np.where(np.isin(temp_indx2[i],idx2[vdim[i]])))
                        temp_indx2 = temp_indx2[:,np.squeeze(idx)]
                #
                vdim_indx1=np.ravel_multi_index(temp_indx1,temp1.shape)
                vdim_indx2=np.ravel_multi_index(temp_indx2,temp2.shape)
                np.put(temp1, vdim_indx1, varvals1)
                np.put(temp2, vdim_indx2, varvals2)
                temp1 = np.expand_dims(temp1, axis=0)
                temp2 = np.expand_dims(temp2, axis=0)
                tmp  = np.nanmean(np.concatenate((temp1, temp2), axis=0), axis=0)
                #
            #
            dst[name][...] = np.copy(tmp)
        #
        
    #            
    print('done!')
Beispiel #52
0
    def update_desvars_oc(self):
        """
        Update the design variables by means of OC-like or equivalently SAO
        method, using the filtered sensitivities; return the updated design
        variables.

        EXAMPLES:
            >>> t.update_desvars_oc()

        See also: sens_analysis, filter_sens_sigmund

        """
        if not self.topydict:
            raise Exception('You must first load a TPD file!')
        # 'p' stays constant for a specified number of iterations from start.
        # 'p' is incremented, but not more than the maximum allowable value.
        # If continuation parameters are not specified in the input file, 'p'
        # will stay constant:
        if self.pcount >= self._phold:
            if (self.p + self._pincr) < self._pmax:
                if (self.pcount - self._phold) % self._pcon == 0:
                    self.p += self._pincr

        if self.qcount >= self._qhold:
            if (self.q + self._qincr) < self._qmax:
                if (self.qcount - self._qhold) % self._qcon == 0:
                    self.q += self._qincr

        self.pcount += 1
        self.qcount += 1

        # Exponential approximation of eta (damping factor):
        if self.itercount > 1:
            if self.topydict['ETA'] == 'exp':  #  Check TPD specified value
                mask = np.equal(self.desvarsold / self.desvars, 1)
                self.a = 1 + np.log2(np.abs(self.dfold / self.df)) / \
                np.log2(self.desvarsold / self.desvars + mask) + \
                mask * (self.a - 1)
                self.a = np.clip(self.a, A_LOW, A_UPP)
                self.eta = 1 / (1 - self.a)

        self.dfold = self.df.copy()
        self.desvarsold = self.desvars.copy()

        # Change move limit for compliant mechanism synthesis:
        if self.probtype == 'mech':
            move = 0.1
        else:
            move = 0.2
        lam1, lam2 = 0, 100e3
        dims = self.desvars.shape
        while (lam2 - lam1) / (lam2 + lam1) > 1e-8 and lam2 > 1e-40:
            lammid = 0.5 * (lam1 + lam2)
            if self.probtype == 'mech':
                if self.approx == 'dquad':
                    curv = -1 / (self.eta * self.desvars) * self.df
                    beta = np.maximum(self.desvars - (self.df + lammid) / curv,
                                      VOID)
                    move_upper = np.minimum(move, self.desvars / 3)
                    desvars = np.maximum(VOID, np.maximum((self.desvars - move),\
                    np.minimum(SOLID,  np.minimum((self.desvars + move), \
                    (self.desvars * np.maximum(1e-10, \
                    (-self.df / lammid))**self.eta)**self.q))))
                else:  # reciprocal or exponential
                    desvars = np.maximum(VOID, np.maximum((self.desvars - move),\
                    np.minimum(SOLID,  np.minimum((self.desvars + move), \
                    (self.desvars * np.maximum(1e-10, \
                    (-self.df / lammid))**self.eta)**self.q))))
            else:  # compliance or heat
                if self.approx == 'dquad':
                    curv = -1 / (self.eta * self.desvars) * self.df
                    beta = np.maximum(self.desvars - (self.df + lammid) / curv,
                                      VOID)
                    move_upper = np.minimum(move, self.desvars / 3)
                    desvars = np.maximum(VOID, np.maximum((self.desvars - move),\
                    np.minimum(SOLID,  np.minimum((self.desvars + move_upper), \
                    beta**self.q))))
                else:  # reciprocal or exponential
                    desvars = np.maximum(VOID, np.maximum((self.desvars - move),\
                    np.minimum(SOLID,  np.minimum((self.desvars + move), \
                    (self.desvars * (-self.df / lammid)**self.eta)**self.q))))

            # Check for passive and active elements, modify updated x:
            if self.pasv.any() or self.actv.any():
                flatx = desvars.flatten()
                idx = []
                if self.nelz == 0:
                    y, x = dims
                    for j in range(x):
                        for k in range(y):
                            idx.append(k * x + j)
                else:
                    z, y, x = dims
                    for i in range(z):
                        for j in range(x):
                            for k in range(y):
                                idx.append(k * x + j + i * x * y)
                if self.pasv.any():
                    pasv = np.take(idx, self.pasv)  #  new indices
                    np.put(flatx, pasv, VOID)  #  = zero density
                if self.actv.any():
                    actv = np.take(idx, self.actv)  #  new indices
                    np.put(flatx, actv, SOLID)  #  = solid
                desvars = flatx.reshape(dims)

            if self.nelz == 0:
                if desvars.sum() - self.nelx * self.nely * self.volfrac > 0:
                    lam1 = lammid
                else:
                    lam2 = lammid
            else:
                if desvars.sum() - self.nelx * self.nely * self.nelz *\
                self.volfrac > 0:
                    lam1 = lammid
                else:
                    lam2 = lammid
        self.lam = lammid

        self.desvars = desvars

        # Change in design variables:
        self.change = (np.abs(self.desvars - self.desvarsold)).max()

        # Solid-void fraction:
        nr_s = self.desvars.flatten().tolist().count(SOLID)
        nr_v = self.desvars.flatten().tolist().count(VOID)
        self.svtfrac = (nr_s + nr_v) / self.desvars.size
Beispiel #53
0
def indicator(n, i):
    """Create a binary array of length n that is True for every index that is in
    i and False for every other index. Named after the indicator function."""
    m = np.zeros(n, dtype=np.bool)
    np.put(m, i, True)
    return m
Beispiel #54
0
	def writeto(self, save_to_path, method='ascii',
		tell_sp=None):
		"""
		Save the data as an ascii or a fits file.

		Parameters
		----------
		save_to_path 	:	str
							the path to save the output file

		method 			: 	'ascii' or 'fits'
							the output file format, either in
							a single ascii file or several fits
							files labeled in the order of 
							wavelength


		Optional Parameters
		-------------------
		tell_sp 		: 	Spectrum object
							the telluric data for the corresponding
							wavelength calibration

		Returns
		-------
		ascii or fits 	: 	see the method keyword
							The wavelength is in microns


		"""
		#pixel = np.delete(np.arange(1024),list(self.mask))
		pixel = np.arange(len(self.oriWave))
		## create the output mask array 0=good; 1=bad
		if self.applymask:
			mask = np.zeros((len(self.oriWave),),dtype=int)
			np.put(mask,self.mask.tolist(),int(1))
		else:
			mask = np.zeros((len(self.oriWave),),dtype=int)

		if method == 'fits':
			#fullpath = self.path + '/' + self.name + '_' + str(self.order) + '_all.fits'
			#hdulist = fits.open(fullpath, ignore_missing_end=True)
			#hdulist.writeto(save_to_path)
			#hdulist.close()
			save_to_path2 = save_to_path + self.header['FILENAME'].split('.')[0]\
			+ '_O' + str(self.order)
			## wavelength
			hdu1 = fits.PrimaryHDU(self.wave/10000, header=self.header)
			save_to_path2_1 = save_to_path2 + '_wave.fits'
			hdu1.writeto(save_to_path2_1)
			## flux
			hdu2 = fits.PrimaryHDU(self.flux, header=self.header)
			save_to_path2_2 = save_to_path2 + '_flux.fits'
			hdu2.writeto(save_to_path2_2)
			## uncertainty
			hdu3 = fits.PrimaryHDU(self.noise, header=self.header)
			save_to_path2_3 = save_to_path2 + '_uncertainty.fits'
			hdu3.writeto(save_to_path2_3)
			## pixel
			hdu4 = fits.PrimaryHDU(pixel, header=self.header)
			save_to_path2_4 = save_to_path2 + '_pixel.fits'
			hdu4.writeto(save_to_path2_4)
			## mask
			hdu5 = fits.PrimaryHDU(mask, header=self.header)
			save_to_path2_5 = save_to_path2 + '_mask.fits'
			hdu5.writeto(save_to_path2_5)

			if tell_sp is not None:
				tell_sp2 = copy.deepcopy(tell_sp)
				# the telluric standard model
				wavelow = tell_sp2.wave[0] - 20
				wavehigh = tell_sp2.wave[-1] + 20
				tell_mdl = nsp.getTelluric(wavelow=wavelow,wavehigh=wavehigh)
				# continuum correction for the data
				tell_sp2 = nsp.continuumTelluric(data=tell_sp2, 
					model=tell_mdl,order=tell_sp2.order)
				# telluric flux
				hdu6 = fits.PrimaryHDU(tell_sp.flux, header=tell_sp.header)
				save_to_path2_6 = save_to_path2 + '_telluric_flux.fits'
				hdu5.writeto(save_to_path2_6)
				# telluric uncertainty
				hdu7 = fits.PrimaryHDU(tell_sp.noise, header=tell_sp.header)
				save_to_path2_7 = save_to_path2 + '_telluric_uncertainty.fits'
				hdu5.writeto(save_to_path2_7)
				# telluric model
				hdu8 = fits.PrimaryHDU(tell_mdl.flux, header=tell_sp.header)
				save_to_path2_8 = save_to_path2 + '_telluric_model.fits'
				hdu5.writeto(save_to_path2_8)
				

		elif method == 'ascii':
			save_to_path2 = save_to_path + self.header['FILENAME'].split('.')[0]\
			+ '_O' + str(self.order) + '.txt'

			if tell_sp is None:
				df = pd.DataFrame(data={'1_wavelength':list(self.oriWave/10000),
					'2_flux':list(self.oriFlux),
					'3_uncertainty':list(self.oriNoise),
					'4_pixel':list(pixel),
					'5_mask':list(mask)})
			
			elif tell_sp is not None:
				tell_sp2 = copy.deepcopy(tell_sp)
				tell_sp2 = nsp.continuumTelluric(data=tell_sp2
					,order=self.order)
				lsf0 = nsp.getLSF(tell_sp2)
				tell_sp2.flux = tell_sp2.oriFlux
				tell_sp2.wave = tell_sp2.oriWave
				tell_mdl = nsp.convolveTelluric(lsf0, tell_sp2)

				df = pd.DataFrame(data={'wavelength':list(self.oriWave/10000),
					'flux':list(self.oriFlux),
					'uncertainty':list(self.oriNoise),
					'telluric_flux':list(tell_sp.oriFlux),
					'telluric_uncertainty':list(tell_sp.oriNoise),
					'telluric_model':list(tell_mdl.flux),
					'pixel':list(pixel),
					'mask':list(mask)})


			df.to_csv(save_to_path2, index=None, sep='\t', mode='a',
				header=True, columns=['wavelength', 'flux', 'uncertainty', 
				'telluric_flux', 'telluric_uncertainty', 'telluric_model',
				'pixel', 'mask'])
Beispiel #55
0
def main(random_state=None):

    # settings
    data_fname = 'data/wikipedia/features/independent/data.csv'
    folds = 10
    invert = False
    spammer_percentage = 0.5

    data_df = pd.read_csv(data_fname)
    data_df = data_df.fillna(0)

    # create unbalanced data with mostly benign users
    if spammer_percentage < 0.5:
        benign_df = data_df[data_df['label'] == 0]
        spammer_df = data_df[data_df['label'] == 1].sample(
            frac=spammer_percentage, random_state=random_state)
        data_df = pd.concat([benign_df, spammer_df])

    # shuffle dataset
    data_df = data_df.sample(frac=1, random_state=random_state)

    # get list of features
    X_cols = list(data_df.columns)
    X_cols.remove('user_id')
    X_cols.remove('label')

    # convert data to numpy
    X = data_df[X_cols].to_numpy()
    y = data_df['label'].to_numpy()
    target_col = data_df['user_id'].to_numpy()

    # setup models
    xgb = XGBClassifier()
    eggs_param_grid = {
        'sgl_method': [None],
        'stacks': [1, 2],
        'joint_model': [None, 'mrf'],
        'relations': [['burst_id'], ['burst_id', 'link_id']]
    }
    metrics = [('roc_auc', roc_auc_score), ('aupr', average_precision_score),
               ('accuracy', accuracy_score)]
    models = [('xgb', EGGS(estimator=xgb), None),
              ('eggs',
               EGGS(estimator=xgb,
                    sgl_method=None,
                    sgl_func=pr.pseudo_relational_features,
                    joint_model='mrf',
                    pgm_func=pgm.create_files,
                    relations=['burst_id'],
                    validation_size=0.2,
                    verbose=1), eggs_param_grid)]

    # setup score containers
    scores = defaultdict(list)
    all_scores = defaultdict(list)
    predictions = {name: y.copy().astype(float) for name, _, _ in models}
    predictions_binary = {
        name: y.copy().astype(float)
        for name, _, _ in models
    }

    # test models using cross-validation
    kf = KFold(n_splits=folds, random_state=random_state, shuffle=True)
    for fold, (train_index, test_index) in enumerate(kf.split(X)):
        print('\nfold %d...' % fold)

        # flip the training and test sets
        if invert:
            temp = train_index
            train_index = test_index
            test_index = temp

        X_train, X_test = X[train_index], X[test_index]
        y_train, y_test = y[train_index], y[test_index]
        target_col_train, target_col_test = target_col[
            train_index], target_col[test_index]

        for name, model, param_grid in models:
            print('%s...' % name)

            if param_grid is not None:
                model = GridSearch(model,
                                   param_grid,
                                   scoring='average_precision',
                                   random_state=random_state,
                                   n_jobs=4)

            model = model.fit(X_train, y_train, target_col_train)
            y_hat = model.predict_proba(X_test, target_col_test)[:, 1]
            y_hat_binary = model.predict(X_test, target_col_test)

            if hasattr(model, 'best_params_'):
                print('best params: %s' % model.best_params_)

            np.put(predictions[name], test_index, y_hat)
            np.put(predictions_binary[name], test_index, y_hat_binary)

            for metric, scorer in metrics:
                score = scorer(
                    y_test, y_hat_binary) if metric == 'accuracy' else scorer(
                        y_test, y_hat)
                scores[name + '|' + metric].append(score)

    # compute single score using predictions from all folds
    for name, model, _ in models:
        for metric, scorer in metrics:
            score = scorer(
                y,
                predictions_binary[name]) if metric == 'accuracy' else scorer(
                    y, predictions[name])
            all_scores[name + '|' + metric].append(score)

    print_utils.print_scores(scores, models, metrics)
    print_utils.print_scores(all_scores, models, metrics)
Beispiel #56
0
def compute_affine_xform(corners1, corners2, matches):
    """Compute affine transformation given matched feature locations.

  Args:
  - corners1 (list of 2-tuples)
  - corners2 (list of 2-tuples)
  - matches (list of 2-tuples)

  Returns:
  - xform (2D float64 array): A 3x3 matrix representing the affine
      transformation that maps coordinates in image1 to the corresponding
      coordinates in image2.
  - outlier_labels (list of bool): A list of Boolean values indicating whether
      the corresponding match in `matches` is an outlier or not. For example,
      if `matches[42]` is determined as an outlier match after RANSAC, then
      `outlier_labels[42]` should have value `True`.
  """
    # SELF-DEFINED
    inlier_threshold = 3  # shift tolerance of corners measured in pixels
    round_time = 50
    # SELF-DEFINED

    round_affine_matrix_sets = []
    round_outliers_sets = []
    round_inliers_number_sets = []

    for round_i in range(round_time):
        round_inliers = []
        round_outliers = []

        # 1. Randomly choose s samples (6 for affine transformation, which is 3 pairs)
        trans_match = [
            matches[i]
            for i in np.random.choice(len(matches), 3, replace=False)
        ]
        # ? redo if repeated

        # 2. Fit a model to those samples
        A = np.array([[
            corners1[trans_match[0][0]][0], corners1[trans_match[0][0]][1], 1
        ], [corners1[trans_match[1][0]][0], corners1[trans_match[1][0]][1], 1],
                      [
                          corners1[trans_match[2][0]][0],
                          corners1[trans_match[2][0]][1], 1
                      ]])
        B = np.array([[corners2[trans_match[0][1]][0]],
                      [corners2[trans_match[1][1]][0]],
                      [corners2[trans_match[2][1]][0]]])
        A_inv = np.linalg.inv(A)
        abc_vector = A_inv.dot(B).T

        F = np.array([[corners2[trans_match[0][1]][1]],
                      [corners2[trans_match[1][1]][1]],
                      [corners2[trans_match[2][1]][1]]])
        def_vector = A_inv.dot(F).T

        affine_matrix = np.vstack(
            [abc_vector, def_vector,
             [0, 0, 1]])  # Affine matrix is coverting image1 to image2

        # 3. Count the number of inliers that approximately fit the model
        for i in range(len(matches)):
            weighted_c1 = np.append(np.array(list(corners1[matches[i][0]])),
                                    1).T
            weighted_est_c2 = affine_matrix.dot(weighted_c1)
            est_c2 = np.delete(weighted_est_c2, 2)

            err = np.sqrt(np.sum((est_c2 - corners2[matches[i][1]])**2))
            if err <= inlier_threshold:
                round_inliers.append(i)
            else:
                round_outliers.append(i)

        round_affine_matrix_sets.append(affine_matrix)
        round_outliers_sets.append(round_outliers)
        round_inliers_number_sets.append(len(round_inliers))

    # Choose the best model
    best_idx = np.argmax(round_inliers_number_sets)
    xform = round_affine_matrix_sets[best_idx]
    outlier_labels = np.zeros(len(matches), dtype=bool)
    np.put(outlier_labels, round_outliers_sets[best_idx], True)
    outlier_labels = outlier_labels.tolist()

    return xform, outlier_labels
Beispiel #57
0
            betaF = np.random.beta(1,num_data-y) # This is a float from 0 to 1
            x = (1-initVal)*betaF+initVal
            constraint_pos = solver.Constraint(y,solver.infinity())
            constraint_pos.SetCoefficient(beta,x)
            constraint_pos.SetCoefficient(t,1)
            constraint_neg = solver.Constraint(-1*y,solver.infinity())
            constraint_neg.SetCoefficient(beta,-1*x)
            constraint_neg.SetCoefficient(t,1)
            y+=1
            initVal = x
        objective = solver.Objective()
        objective.SetCoefficient(t,1)
        objective.SetMinimization()

        solver.Solve()
        np.put(batch,[j%batch_size],[t.solution_value()])    
        if (j%batch_size==batch_size-1):
            pandas.DataFrame(batch).to_csv("bucket\\outputLinfinity{}.csv".format(i), mode='a', header=False, index=False)
        
            

"""
#https://rosettacode.org/wiki/External_sort
import io

def sort_large_file(n: int, source: open, sink: open, file_opener = open)->None:

    '''
        approach:
            break the source into files of size n
            sort each of these files
Beispiel #58
0
a = np.array([[1], [4]], np.int32)
print(a.squeeze())
print(a.squeeze(axis=1))
a = np.array([[[0], [1], [2]]])
print(a.squeeze(axis=0))
print("--")
x = np.array([[1, 2, 3]])
print(np.swapaxes(x, 0, 1))
print("--")
x = np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]])
print(x, "------", x[0, 1, 1])
# 比如 x[0,1,1]=2 第一维度,1第二维度,1第三维度
x_swap = x.swapaxes(0, 2)  # #就是将第一个维度和第三个维度交换
# 那么x_swap[1,1,0] = 2
print(x_swap, "------", x_swap[1, 1, 0])

print(" -------------------------item 选择和操作 methods")

a = [4, 3, 5, 7, 6, 8]
indices = [0, 1, 4]
print(np.take(a, indices))

a = np.array(a)
print(a[indices])

print(np.take(a, [[0, 1], [2, 3]]))

a = np.arange(5)
np.put(a, [0, 2], [-44, -55])
print(a)
Beispiel #59
0
    print(index, z[index])

print(' ')
# 42
x, y = np.meshgrid(np.linspace(-1, 1, 10), np.linspace(-1, 1, 10))
d = np.hypot(x, y)
sigma, mu = 1.0, 0.0
g = np.exp(-((d - mu)**2 / (2.0 * sigma**2)))
print(g)

print(' ')
# 43
n = 10
p = 3
z = np.zeros((n, n))
np.put(z, np.random.choice(range(n * n), p, replace=False), 1)

print(' ')
# 44
x = np.random.rand(5, 10)
y = x - x.mean(axis=1, keepdims=True)

print(' ')
# 45
z = np.random.randint(0, 10, (3, 3))
n = 1  #нумерація з нуля
print(z)
print(z[z[:, n].argsort()])

print(' ')
# 46
Beispiel #60
0
def get_sparse_model(model_file, param_file, ratio, save_path):
    """
    Using the unstructured sparse algorithm to compress the network. 
    This interface is only used to evaluate the latency of the compressed network, and does not consider the loss of accuracy.
    Args:
        model_file(str), param_file(str): The inference model to be pruned.
        ratio(float): The ratio to prune the model.
        save_path(str): The save path of pruned model.
    """
    assert os.path.exists(model_file), f'{model_file} does not exist.'
    assert os.path.exists(
        param_file) or param_file is None, f'{param_file} does not exist.'
    paddle.enable_static()

    SKIP = ['image', 'feed', 'pool2d_0.tmp_0']

    folder = os.path.dirname(model_file)
    model_name = model_file.split('/')[-1]
    model_name = model_file.split('/')[-1]
    if param_file is None:
        param_name = None
    else:
        param_name = param_file.split('/')[-1]

    main_prog = static.Program()
    startup_prog = static.Program()
    exe = paddle.static.Executor(paddle.CPUPlace())
    exe.run(startup_prog)

    [inference_program, feed_target_names, fetch_targets
     ] = (fluid.io.load_inference_model(folder,
                                        exe,
                                        model_filename=model_name,
                                        params_filename=param_name))
    thresholds = {}

    graph = GraphWrapper(inference_program)
    for op in graph.ops():
        for inp in op.all_inputs():
            name = inp.name()
            if inp.name() in SKIP: continue
            if 'tmp' in inp.name(): continue
            # 1x1_conv
            cond_conv = len(
                inp._var.shape
            ) == 4 and inp._var.shape[2] == 1 and inp._var.shape[3] == 1
            cond_fc = False

            if cond_fc or cond_conv:
                array = np.array(
                    paddle.static.global_scope().find_var(name).get_tensor())
                flatten = np.abs(array.flatten())
                index = min(len(flatten) - 1, int(ratio * len(flatten)))
                ind = np.unravel_index(np.argsort(flatten, axis=None),
                                       flatten.shape)
                thresholds[name] = ind[0][:index]

    for op in graph.ops():
        for inp in op.all_inputs():
            name = inp.name()
            if name in SKIP: continue
            if 'tmp' in inp.name(): continue

            cond_conv = (len(inp._var.shape) == 4 and inp._var.shape[2] == 1
                         and inp._var.shape[3] == 1)
            cond_fc = False

            # only support 1x1_conv now
            if not (cond_conv or cond_fc): continue
            array = np.array(
                paddle.static.global_scope().find_var(name).get_tensor())
            if thresholds.get(name) is not None:
                np.put(array, thresholds.get(name), 0)
            assert (abs(1 - np.count_nonzero(array) / array.size - ratio) <
                    1e-2), 'The model sparsity is abnormal.'
            paddle.static.global_scope().find_var(name).get_tensor().set(
                array, paddle.CPUPlace())

    fluid.io.save_inference_model(save_path,
                                  feeded_var_names=feed_target_names,
                                  target_vars=fetch_targets,
                                  executor=exe,
                                  main_program=inference_program,
                                  model_filename=model_name,
                                  params_filename=param_name)
    print("The pruned model is saved in: ", save_path)