示例#1
0
def test_frame_2ufunc_out():
    input_matrix = np.random.randint(1, 100, size=(20, 2))

    df = pd.DataFrame(input_matrix, columns=['A', 'B'])
    ddf = dd.from_pandas(df, 3)

    # column number mismatch
    df_out = pd.DataFrame(np.random.randint(1, 100, size=(20, 3)),
                          columns=['X', 'Y', 'Z'])
    ddf_out = dd.from_pandas(df_out, 3)

    with pytest.raises(ValueError):
        np.sin(ddf, out=ddf_out)

    # types mismatch
    ddf_out = dd.from_pandas(pd.Series([0]),1)
    with pytest.raises(TypeError):
        np.sin(ddf, out=ddf_out)

    df_out = pd.DataFrame(np.random.randint(1, 100, size=(20, 2)),
                          columns=['X', 'Y'])
    ddf_out = dd.from_pandas(df_out, 3)

    np.sin(ddf, out=ddf_out)
    np.add(ddf_out, 10, out=ddf_out)

    expected = pd.DataFrame(np.sin(input_matrix) + 10, columns=['A', 'B'])

    assert_eq(ddf_out, expected)
示例#2
0
    def test_ufunc_coercions(self):
        idx = date_range('2011-01-01', periods=3, freq='2D', name='x')

        delta = np.timedelta64(1, 'D')
        for result in [idx + delta, np.add(idx, delta)]:
            assert isinstance(result, DatetimeIndex)
            exp = date_range('2011-01-02', periods=3, freq='2D', name='x')
            tm.assert_index_equal(result, exp)
            assert result.freq == '2D'

        for result in [idx - delta, np.subtract(idx, delta)]:
            assert isinstance(result, DatetimeIndex)
            exp = date_range('2010-12-31', periods=3, freq='2D', name='x')
            tm.assert_index_equal(result, exp)
            assert result.freq == '2D'

        delta = np.array([np.timedelta64(1, 'D'), np.timedelta64(2, 'D'),
                          np.timedelta64(3, 'D')])
        for result in [idx + delta, np.add(idx, delta)]:
            assert isinstance(result, DatetimeIndex)
            exp = DatetimeIndex(['2011-01-02', '2011-01-05', '2011-01-08'],
                                freq='3D', name='x')
            tm.assert_index_equal(result, exp)
            assert result.freq == '3D'

        for result in [idx - delta, np.subtract(idx, delta)]:
            assert isinstance(result, DatetimeIndex)
            exp = DatetimeIndex(['2010-12-31', '2011-01-01', '2011-01-02'],
                                freq='D', name='x')
            tm.assert_index_equal(result, exp)
            assert result.freq == 'D'
示例#3
0
 def perceptron_update(self, prevFrameID, flag):
     weights = self.classifier.coef_
     if not flag:
         wrongData = self.prevStates[prevFrameID]
         #normData = np.linalg.norm(wrongData, ord=2) * np.ones(self.numWords)
         #wrongData = np.divide(wrongData, normData)
         wrongLabel = self.prevLabels[prevFrameID]
         wrongScores = self.prevScores[prevFrameID]
         wrongScore = max(wrongScores)
         if wrongLabel > 0:
             wrongWeights = weights[wrongLabel-1]
             newWeights = np.subtract(wrongWeights, (self.learningRate/self.numReinforce)*wrongData)
             weights[wrongLabel-1] = newWeights
         else:
             k = cv2.waitKey(-1)
             rightLabel = k - 48
             if rightLabel > 0 and rightLabel <= weights.shape[0]:
                 wrongWeights = weights[rightLabel-1]
                 newWeights = np.add(wrongWeights, (self.learningRate/self.numReinforce)*wrongData)
                 weights[rightLabel-1] = newWeights
     else:
         rightData = self.prevStates[prevFrameID]
         #normData = np.linalg.norm(rightData, ord=2) * np.ones(self.numWords)
         #rightData = np.divide(rightData, normData)
         rightLabel = self.prevLabels[prevFrameID]
         rightScores = self.prevScores[prevFrameID]
         rightScore = max(rightScores)
         if rightLabel > 0:
             rightWeights = weights[rightLabel-1]
             newWeights = np.add(rightWeights, (self.learningRate/self.numReinforce)*rightData)
             weights[rightLabel-1] = newWeights
     #self.numReinforce += 1
     self.classifier.coef_ = weights
示例#4
0
 def __call__(self, values, clip=True, out=None):
     values = _prepare(values, clip=clip, out=out)
     np.multiply(values, self.exp, out=values)
     np.add(values, 1., out=values)
     np.log(values, out=values)
     np.true_divide(values, np.log(self.exp + 1.), out=values)
     return values
示例#5
0
def morphological_laplace(input, size = None, footprint = None,
                          structure = None, output = None,
                          mode = "reflect", cval = 0.0, origin = 0):
    """Multi-dimensional morphological laplace.

    Either a size or a footprint, or the structure must be provided. An
    output array can optionally be provided. The origin parameter
    controls the placement of the filter. The mode parameter
    determines how the array borders are handled, where cval is the
    value when mode is equal to 'constant'.
    """
    tmp1 = grey_dilation(input, size, footprint, structure, None, mode,
                         cval, origin)
    if isinstance(output, numpy.ndarray):
        grey_erosion(input, size, footprint, structure, output, mode,
                     cval, origin)
        numpy.add(tmp1, output, output)
        del tmp1
        numpy.subtract(output, input, output)
        return numpy.subtract(output, input, output)
    else:
        tmp2 = grey_erosion(input, size, footprint, structure, None, mode,
                            cval, origin)
        numpy.add(tmp1, tmp2, tmp2)
        del tmp1
        numpy.subtract(tmp2, input, tmp2)
        numpy.subtract(tmp2, input, tmp2)
        return tmp2
def verySimpleModelMikolov(sentence1,sentence2):
	bagS1 = returnPossibleKeys(sentence1.strip())
	bagS2 = returnPossibleKeys(sentence2.strip())
	if len(bagS1) == 0 or len(bagS2) == 0:
		print "111"
		return -2
	else:
		try:
			sVector1 = vspace[bagS1[0]]
			for ss in bagS1[1:]:
				try:
					sVector1 = numpy.add(sVector1,vspace[ss])
				except:
					pass
			sVector2 = vspace[bagS2[0]]
			for ss in bagS2[1:]:
				try:
					sVector2 = numpy.add(sVector2,vspace[ss])
				except:
					pass
			try:
				cos = scipy.spatial.distance.cosine(sVector1,sVector2)
				#print cos
				return cos
			except:
				return -4
		except:
			return -3
示例#7
0
  def movementCompute(self, displacement, noiseFactor = 0):
    """
    Shift the current active cells by a vector.

    @param displacement (pair of floats)
    A translation vector [di, dj].
    """

    if noiseFactor != 0:
      displacement = copy.deepcopy(displacement)
      xnoise = np.random.normal(0, noiseFactor)
      ynoise = np.random.normal(0, noiseFactor)
      displacement[0] += xnoise
      displacement[1] += ynoise


    # Calculate delta in the module's coordinates.
    phaseDisplacement = (np.matmul(self.rotationMatrix, displacement) *
                         self.phasesPerUnitDistance)

    # Shift the active coordinates.
    np.add(self.activePhases, phaseDisplacement, out=self.activePhases)

    # In Python, (x % 1.0) can return 1.0 because of floating point goofiness.
    # Generally this doesn't cause problems, it's just confusing when you're
    # debugging.
    np.round(self.activePhases, decimals=9, out=self.activePhases)
    np.mod(self.activePhases, 1.0, out=self.activePhases)

    self._computeActiveCells()
    self.phaseDisplacement = phaseDisplacement
示例#8
0
    def test_parr_ops_errors(self, ng, box_with_array):
        idx = PeriodIndex(['2011-01', '2011-02', '2011-03', '2011-04'],
                          freq='M', name='idx')
        obj = tm.box_expected(idx, box_with_array)

        msg = r"unsupported operand type\(s\)"
        with pytest.raises(TypeError, match=msg):
            obj + ng

        with pytest.raises(TypeError):
            # error message differs between PY2 and 3
            ng + obj

        with pytest.raises(TypeError, match=msg):
            obj - ng

        with pytest.raises(TypeError):
            np.add(obj, ng)

        with pytest.raises(TypeError):
            np.add(ng, obj)

        with pytest.raises(TypeError):
            np.subtract(obj, ng)

        with pytest.raises(TypeError):
            np.subtract(ng, obj)
示例#9
0
    def test_pi_ops_nat(self):
        idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'],
                          freq='M', name='idx')
        expected = PeriodIndex(['2011-03', '2011-04', 'NaT', '2011-06'],
                               freq='M', name='idx')

        self._check(idx, lambda x: x + 2, expected)
        self._check(idx, lambda x: 2 + x, expected)
        self._check(idx, lambda x: np.add(x, 2), expected)

        self._check(idx + 2, lambda x: x - 2, idx)
        self._check(idx + 2, lambda x: np.subtract(x, 2), idx)

        # freq with mult
        idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'],
                          freq='2M', name='idx')
        expected = PeriodIndex(['2011-07', '2011-08', 'NaT', '2011-10'],
                               freq='2M', name='idx')

        self._check(idx, lambda x: x + 3, expected)
        self._check(idx, lambda x: 3 + x, expected)
        self._check(idx, lambda x: np.add(x, 3), expected)

        self._check(idx + 3, lambda x: x - 3, idx)
        self._check(idx + 3, lambda x: np.subtract(x, 3), idx)
示例#10
0
	def assign_weights(self,network,matTargetNeurons):
		numInput = self.dicProperties["IODim"]
		numNodesReservoir = self.dicProperties["ReservoirDim"]
		numInhib = numInput*numNodesReservoir*self.dicProperties["InhibFrac"]
		nRowLength = len(matTargetNeurons[0])
		numInhibPerRow = int(np.floor(nRowLength*self.dicProperties["InhibFrac"]))
		if self.dicProperties["Distribution"] == "Betweenness":
			if self.lstBetweenness == []:
				self.lstBetweenness = betwCentrality(network)[0].a
			rMaxBetw = self.lstBetweenness.max()
			rMinBetw = self.lstBetweenness.min()
			rMaxWeight = self.dicProperties["Max"]
			rMinWeight = self.dicProperties["Min"]
			for i in range(self.dicProperties["IODim"]):
				self.lstBetweenness = np.multiply(np.add(self.lstBetweenness,-rMinBetw+rMinWeight*rMaxBetw/(rMaxWeight-rMinWeight)),(rMaxWeight-rMinWeight)/rMaxBetw)
				self.__matConnect[i,matTargetNeurons[i]] = self.lstBetweenness[matTargetNeurons[i]] # does not take duplicate indices into account... never mind
			# generate the necessary inhibitory connections
			lstNonZero = np.nonzero(self.__matConnect)
			lstInhib = np.random.randint(0,len(lstNonZero),numInhib)
			self.__matConnect[lstInhib] = -self.__matConnect[lstInhib]
			rFactor = (self.dicProperties["Max"]-self.dicProperties["Min"])/(rMaxBetw-rMinBetw) # entre 0 et Max-Min
			self.__matConnect = np.add(np.multiply(self.__matConnect,rFactor),self.dicProperties["Min"]) # entre Min et Max
		elif self.dicProperties["Distribution"] == "Gaussian":
			for i in range(self.dicProperties["IODim"]):
				self.__matConnect[i,matTargetNeurons[i,:numInhibPerRow]] = -np.random.normal(self.dicProperties["MeanInhib"],self.dicProperties["VarInhib"],numInhibPerRow)
				self.__matConnect[i,matTargetNeurons[i,numInhibPerRow:]] = np.random.normal(self.dicProperties["MeanExc"],self.dicProperties["VarExc"],nRowLength-numInhibPerRow)
		elif self.dicProperties["Distribution"] == "Lognormal":
			for i in range(self.dicProperties["IODim"]):
				self.__matConnect[i,matTargetNeurons[i][:numInhibPerRow]] = -np.random.lognormal(self.dicProperties["LocationInhib"],self.dicProperties["ScaleInhib"],numInhibPerRow)
				self.__matConnect[i,matTargetNeurons[i][numInhibPerRow:]] = np.random.lognormal(self.dicProperties["LocationExc"],self.dicProperties["ScaleExc"],nRowLength-numInhibPerRow)
		else:
			None # I don't know what to do for the degree correlations yet
示例#11
0
def makeFeatureVec(words, model, num_features):

	# Pre-initialize an empty numpy array (for speed)
	featureVec = np.zeros((num_features,),dtype="float32")

	# Count number of words
	nwords = 0.

	# Loop over word by word
	# If in vocabulary, add its feature vector to the total
	for word in words.split():

		if word in model: 
			nwords += 1.
			featureVec = np.add(featureVec,model[word])
		else:
			missingWord = handleMissingWord(word, model, num_features)
			featureVec = np.add(featureVec, missingWord)
			nwords += 1.

	# Divide the result by the number of words to get the average
	featureVec = np.divide(featureVec,nwords)
	
	# If number of words zero
	if nwords == 0:
		featureVec = characterVec(words, model, num_features)
	
	return featureVec
def feature_extraction_Doc2Vec(data_pos, data_neg, model): # use the word2vec under the hood
    vec_size = 300
    data_pos_vec, data_neg_vec = [], []
    for term in data_pos:
        raw_vecs = np.zeros(vec_size)
        vec_num = 0
        for item in term:
            try:
                raw_vecs = np.add(raw_vecs, model[item])
                vec_num += 1
            except:
                pass
        doc_vec = raw_vecs / float(vec_num+0.00001)
        data_pos_vec.append(doc_vec.tolist())
    for term in data_neg:
        raw_vecs = np.zeros(vec_size)
        vec_num = 0
        for item in term:
            try:
                raw_vecs = np.add(raw_vecs, model[item])
                vec_num += 1
            except:
                pass
        doc_vec = raw_vecs / float(vec_num + 0.00001)
        data_neg_vec.append(doc_vec.tolist())
    return data_pos_vec, data_neg_vec
示例#13
0
def wavefunction(coords, mocoeffs, gbasis, volume):
    """Calculate the magnitude of the wavefunction at every point in a volume.
    
    Attributes:
        coords -- the coordinates of the atoms
        mocoeffs -- mocoeffs for one eigenvalue
        gbasis -- gbasis from a parser object
        volume -- a template Volume object (will not be altered)
    """
    bfs = getbfs(coords, gbasis)
    
    wavefn = copy.copy(volume)
    wavefn.data = numpy.zeros( wavefn.data.shape, "d")

    conversion = convertor(1,"bohr","Angstrom")
    x = numpy.arange(wavefn.origin[0], wavefn.topcorner[0]+wavefn.spacing[0], wavefn.spacing[0]) / conversion
    y = numpy.arange(wavefn.origin[1], wavefn.topcorner[1]+wavefn.spacing[1], wavefn.spacing[1]) / conversion
    z = numpy.arange(wavefn.origin[2], wavefn.topcorner[2]+wavefn.spacing[2], wavefn.spacing[2]) / conversion

    for bs in range(len(bfs)):
        data = numpy.zeros( wavefn.data.shape, "d")
        for i,xval in enumerate(x):
            for j,yval in enumerate(y):
                for k,zval in enumerate(z):
                    data[i, j, k] = bfs[bs].amp(xval,yval,zval)
        numpy.multiply(data, mocoeffs[bs], data)
        numpy.add(wavefn.data, data, wavefn.data)
    
    return wavefn
示例#14
0
def trainClassifier(postingVec, classVec):
    nWGivenC0 = np.zeros(len(postingVec[0]))
    nWGivenC1 = np.zeros(len(postingVec[0]))
    nWs = np.zeros(len(postingVec[0]))
    numAllWsC0 = 0
    numAllWsC1 = 0
    numC1 = 0
    index = 0
    for post in postingVec:
        nWs = np.add(nWs, np.array(post))
        if (classVec[index] == 0):
            nWGivenC0 = np.add(nWGivenC0, np.array(post))
            numAllWsC0 += sum(post)
        else:
            nWGivenC1 = np.add(nWGivenC1, np.array(post))
            numAllWsC1 += sum(post)
            numC1 += 1
        index += 1

    pWGivenC0 = nWGivenC0/numAllWsC0 # probability of each word, given class C0
    pWGivenC1 = nWGivenC1/numAllWsC1 # probability of each word, given class C1

    pC1 = float(numC1) / len(classVec) # probability of class 1
    pC0 = 1 - pC1 # probability of class 0

    return (pC0,pWGivenC0), (pC1,pWGivenC1)
示例#15
0
def numeric_gemm_var1_flat(A, B, C, mc, kc, nc, mr=1, nr=1):
  M, N = C.shape
  K = A.shape[0]

  mc = min(mc, M)
  kc = min(kc, K)
  nc = min(nc, N)  

  tA = numpy.zeros((mc, kc), dtype = numpy.float)
  tB = numpy.zeros((kc, N), dtype = numpy.float)  

  for k in range(0, K, kc):
    # Pack B into tB
    tB[:,:] = B[k:k+kc:,:]
    
    for i in range(0, M, mc):
      imc = i+mc
      # Pack A into tA
      tA[:,:] = A[i:imc,k:k+kc]
      
      for j in range(0, N): # , nc):
        # Cj += ABj + Cj
        # jnc = j+nc
        ABj = numpy.matrixmultiply(tA, tB[:,j])
        numpy.add(C[i:imc:,j], ABj, C[i:imc:,j])
        
        # Store Caux into memory
  return
示例#16
0
 def __iadd__(self, other):
     self.fc = NP.add(self.fc, other.fc)
     self.y = NP.add(self.y, other.y)
     self.x = NP.add(self.x, other.x)
     self.turn = NP.add(self.turn, other.turn)
     self.weight = NP.add(self.weight, other.weight)
     return self
示例#17
0
def scale_samples(params, bounds):
    '''
    Rescales samples in 0-to-1 range to arbitrary bounds.

    Arguments:
        bounds - list of lists of dimensions num_params-by-2
        params - numpy array of dimensions num_params-by-N,
        where N is the number of samples
    '''
    # Check bounds are legal (upper bound is greater than lower bound)
    b = np.array(bounds)
    lower_bounds = b[:, 0]
    upper_bounds = b[:, 1]

    if np.any(lower_bounds >= upper_bounds):
        raise ValueError("Bounds are not legal")

    # This scales the samples in-place, by using the optional output
    # argument for the numpy ufunctions
    # The calculation is equivalent to:
    #   sample * (upper_bound - lower_bound) + lower_bound
    np.add(np.multiply(params,
                       (upper_bounds - lower_bounds),
                       out=params),
           lower_bounds,
           out=params)
def triangle_areas_from_array(arr):
    """
    take an (N,2) array of points and return an (N,1)
    array of the areas of those triangles, where the first
    and last areas are np.inf

    see triangle_area for algorithm
    """

    result = np.empty((len(arr),), arr.dtype)
    result[0] = np.inf;
    result[-1] = np.inf

    p1 = arr[:-2]
    p2 = arr[1:-1]
    p3 = arr[2:]

    # an accumulators to avoid unnecessary intermediate arrays
    accr = result[1:-1]  # Accumulate directly into result
    acc1 = np.empty_like(accr)

    np.subtract(p2[:, 1], p3[:, 1], out=accr)
    np.multiply(p1[:, 0], accr, out=accr)
    np.subtract(p3[:, 1], p1[:, 1], out=acc1)
    np.multiply(p2[:, 0], acc1, out=acc1)
    np.add(acc1, accr, out=accr)
    np.subtract(p1[:, 1], p2[:, 1], out=acc1)
    np.multiply(p3[:, 0], acc1, out=acc1)
    np.add(acc1, accr, out=accr)
    np.abs(accr, out=accr)
    accr /= 2.
    # Notice: accr was writing into result, so the answer is in there
    return result
示例#19
0
文件: wcsutil.py 项目: erykoff/esutil
def arrscl(arr, minval, maxval, arrmin=None, arrmax=None):
    # makes a copy either way (asarray would not if it was an array already)
    output = numpy.array(arr)
    
    if arrmin == None: arrmin = output.min()
    if arrmax == None: arrmax = output.max()
    
    if output.size == 1:
        return output
    
    if (arrmin == arrmax):
        sys.stdout.write('arrmin must not equal arrmax\n')
        return None

    try:
        a = (maxval - minval)/(arrmax - arrmin)
        b = (arrmax*minval - arrmin*maxval)/(arrmax - arrmin)
    except:
        sys.stdout.write("Error calculating a,b: %s %s\n" % 
                         (sys.exc_info()[0], sys.exc_info()[1]) )
        return None

    # in place
    numpy.multiply(output, a, output)
    numpy.add(output, b, output)
    
    return output
示例#20
0
文件: core.py 项目: RzeBar/Dipoles3D
	def __step(self,vis=False):
		## liczenie natezenia w kazdym punkcie
		for to_mod_dipole in self.dlist:
			to_mod_dipole.E1=np.array([0.,0.,0.])
			to_mod_dipole.E2=np.array([0.,0.,0.])
			for get_dipole in self.dlist:
				if not to_mod_dipole == get_dipole:
					to_mod_dipole.E1=np.add(to_mod_dipole.E1,GetElectricField(to_mod_dipole.r1,get_dipole))
					to_mod_dipole.E2=np.add(to_mod_dipole.E2,GetElectricField(to_mod_dipole.r2,get_dipole))
					
		## poddajemy dipole dzialaniom sil 
		for i in range(len(self.dlist)):
			if IsInWorld(self.dlist[i],self.world):		# sprawdzenie czy dipol jest w swiecie
				LetElectricForceWork(self.dlist[i],self.dt)
			else:
				raise 0	# jesli wyszedl poza swiat - koniec sym.

		## wizualizacja stepu
		if vis: 
			print "Time: "+str(self.time)+"  Refreshing..."
			self.visdata.NextFrame(self.dlist)



		## zapis danych do pliku 
		self.fobj.write(str(self.time)+"\n")
示例#21
0
    def prepareTransitionalMat(self):
    	#create sigma_x matrix
    	sigmax = np.matrix(self.sigma_x)

    	#non changing channel
    	self.H = self.p0*np.identity(2**self.size) # not changing states
    	
    	# nearest-neighbour changing channel	
    	for i in range(self.size-1):
    	    Tmatrix = np.identity(1)
	    for j in range(self.size):
	    	if j == i or j == i+1:
                    Tmatrix = np.kron(Tmatrix, sigmax)
	    	else:
                    Tmatrix = np.kron(Tmatrix, np.identity(2))	
            self.H = np.add(self.H, Tmatrix * self.p1)
		
	# second-neighbour changing channel	
    	for i in range(self.size-2):
    	    Tmatrix = np.identity(1)
	    for j in range(self.size):
	    	if j == i or j == i+2:
                    Tmatrix = np.kron(Tmatrix, sigmax)
	    	else:
                    Tmatrix = np.kron(Tmatrix, np.identity(2))	
	    self.H = np.add(self.H, Tmatrix * self.p2)
示例#22
0
def plot3d(data):
	windowIndex = 0
	msgrateIndex = 1
	intervalLengthIndex = 3
	dirCPUTOTALIndex=11+6 -2
	DecCPUTotalIndex=23+6 -2
	PunCPUToTalIndex=35+6 -2 
	MaxSize=36+6


	intervals = data[:,intervalLengthIndex]
	dirTimes = data[:,dirCPUTOTALIndex]
	decTime = data[:,DecCPUTotalIndex]
	PunTime = data[:,PunCPUToTalIndex]
	totalTime = np.add(dirTimes,decTime)
	totalTime = np.add(totalTime,PunTime)
	rate = data[:,intervalLengthIndex]

	sizes = data[:,MaxSize]

	fig = plt.figure()
	ax = fig.add_subplot(111, projection='3d')

	ax.scatter(rate,intervals,totalTime)
	ax.set_xlabel(' Message rate (msg/s)')
	ax.set_ylabel('interval')
	ax.set_zlabel('time (s)')
	ax.xaxis.set_scale('log')
	ax.yaxis.set_scale('log')

	plt.show()	
def average_perceptron(feature_matrix, labels, T):
    theta = np.empty_like(feature_matrix[0])
    theta.fill(0.)
    theta_sum = theta  
    theta_0 = 0.0
    theta_0_sum = theta_0
    ticker = 0
    update_track = 0
    
    while ticker < T:
        
        for i in range(len(feature_matrix)):        

            check_before_label = np.add(np.dot(theta, feature_matrix[i]),theta_0)

            check_mult_label = np.multiply(labels[i], check_before_label)
            if check_mult_label == 0 or check_mult_label < 0:
                update_track += 1                
                (theta, theta_0) = perceptron_single_step_update(feature_matrix[i], labels[i], theta, theta_0)
                theta_sum = np.add(theta, theta_sum)
                theta_0_sum += theta_0

        ticker += 1
        
    theta_average = np.divide(theta_sum, update_track)
    theta_0_average = theta_0_sum/update_track
        
    return (theta_average, theta_0_average)
    def discretize(self, time_slice_length):
        self.time_slice_length = time_slice_length

        # compute the total number of time-slices
        time_delta = (self.end_date - self.start_date)
        time_delta = time_delta.total_seconds()/60
        self.time_slice_count = int(time_delta // self.time_slice_length) + 1

        # parallelize tweet partitioning using a pool of processes (number of processes = number of cores).
        nb_processes = cpu_count()
        nb_tweets_per_process = self.size // nb_processes
        portions = []
        for i in range(0, self.size, nb_tweets_per_process):
            j = i + nb_tweets_per_process if i + nb_tweets_per_process < self.size else self.size
            portions.append((i, j))
        p = Pool()
        results = p.map(self.discretize_job, portions)
        results.sort(key=lambda x: x[0])

        # insert the time-slices number in the data frame and compute the final frequency matrices
        time_slices = []
        self.tweet_count = np.zeros(self.time_slice_count, dtype=np.int)
        self.global_freq = csr_matrix((len(self.vocabulary), self.time_slice_count), dtype=np.short)
        self.mention_freq = csr_matrix((len(self.vocabulary), self.time_slice_count), dtype=np.short)
        for a_tuple in results:
            time_slices.extend(a_tuple[1])
            self.tweet_count = np.add(self.tweet_count, a_tuple[2])
            self.global_freq = np.add(self.global_freq, a_tuple[3])
            self.mention_freq = np.add(self.mention_freq, a_tuple[4])
        self.df['time_slice'] = np.array(time_slices)
示例#25
0
 def __add__(self, other):
     if isinstance(other, Raster):
         result = np.add(self.data, other.data)
     else:
         result = np.add(self.data, other)
     
     return Raster(None, result, self.nodata, self.driver, self.georef, self.proj)
示例#26
0
 def createChord(self,*args):
   if len(args) == 1:
     self.notesCombined = args[0]
   if len(args) == 2:
     self.notesCombined = np.add(args[0],args[1])
   if len(args) == 3:
     self.notesCombined = np.add(args[0],np.add(args[1],args[2]))
示例#27
0
def plot_selfish_cooperative(num_runs):
	import seaborn as sns
	for exp in range(num_runs):
		fig  = plt.figure()
		cooperators = [3,4,5]
		selfish = [0,1,2]

		
		fname = Parameters.dirname + '/%i_assembly_%i.dat' % (exp, 0)
		t, pop = np.loadtxt(fname, unpack = True)

		cooperative_pop = [0.0]*len(pop[:-5])
		selfish_pop = [0.0]*len(pop[:-5])

		for c in cooperators:
			fname = Parameters.dirname + '/%i_assembly_%i.dat' % (exp, c)
			t, pop = np.loadtxt(fname, unpack = True)
			cooperative_pop = np.add(cooperative_pop,pop[:-5] )

		for s in selfish:
			fname = Parameters.dirname + '/%i_assembly_%i.dat' % (exp, s)
			t, pop = np.loadtxt(fname, unpack = True)
			selfish_pop = np.add(selfish_pop,pop[:-5] )

		ax = fig.add_subplot(1,1,1)
		ax.plot(t[:-5], selfish_pop, label = 'selfish', color = 'r')
		ax.plot(t[:-5], cooperative_pop, label = 'cooperative', color = 'g')
		ax.legend(loc = 'upper left')
		plt.xlabel('System Time')
		plt.ylabel('Total Abundance')

		#plt.show()
		plt.savefig(Parameters.dirname + '/%i_cooperative_vs_selfish.png' % exp)
		plt.close()
示例#28
0
文件: column.py 项目: MQQ/astropy
def _wrapx(input, output, nx):
    """
    Wrap the X format column Boolean array into an ``UInt8`` array.

    Parameters
    ----------
    input
        input Boolean array of shape (`s`, `nx`)

    output
        output ``Uint8`` array of shape (`s`, `nbytes`)

    nx
        number of bits
    """

    output[...] = 0  # reset the output
    nbytes = ((nx - 1) // 8) + 1
    unused = nbytes * 8 - nx
    for i in range(nbytes):
        _min = i * 8
        _max = min((i + 1) * 8, nx)
        for j in range(_min, _max):
            if j != _min:
                np.left_shift(output[..., i], 1, output[..., i])
            np.add(output[..., i], input[..., j], output[..., i])

    # shift the unused bits
    np.left_shift(output[..., i], unused, output[..., i])
示例#29
0
文件: CLUBS.py 项目: elgicse/clubs
	def __init__(self, limitsLow, limitsHigh, status = None):
		# Either merge two clusters (bottom-up)
		# or create a new micro-cluster (top-down)
		if status is "merging":
			# The first two input parameters are two newCluster() objects
			first = limitsLow
			second = limitsHigh
			self.limitsLow = [None]*ndim
			self.limitsHigh = [None]*ndim
			for i in xrange(ndim):
				self.limitsLow[i] = min(first.limitsLow[i], second.limitsLow[i])
				self.limitsHigh[i] = max(first.limitsHigh[i], second.limitsHigh[i])
			self.findKeys()
			self.weight = first.weight + second.weight
			self.Sum = np.add(first.Sum, second.Sum)
			self.sqSum = np.add(first.sqSum, second.sqSum)#self.computeSqSum()
			self.computeSSQ()
			self.computeCoG()
		else:
			# The first two parameters are the edges of the cluster
			self.limitsLow = limitsLow
			self.limitsHigh = limitsHigh
			self.findKeys()
			self.computeWeight()
			self.computeSum()
			self.computeSqSum()
			self.computeSSQ()
			self.computeCoG()
示例#30
0
def open_cfd(cfd, which_window):
    if which_window == "watchlists":
        win = watchlists
    elif which_window == "open_positions":
        win = open_positions
    else:
        raise Exception("Window not defined")

    pos = wait_for_window(win, 1)
    if pos:
        screen = load_prev_screenshot()
        moveclick(*add(pos, (5, 5)))  # To activate Window (if not)
    else:
        print "ERROR: " + which_window + " not found"
        return False

    win_pos = pos
    maxval, pos = get_match(screen[win_pos[1] : win_pos[1] + 200, win_pos[0] : win_pos[0] + 400], cfd_list[cfd], 1)
    pos = add(win_pos, pos)
    if maxval > MATCH_OK:
        moveclick(*add(pos, (5, 5)))
    else:
        print "CFD " + cfd + " not found in Screen"
        return False

    return True
示例#31
0
    iteration_start = time.time()
    ret, frame_bg = cap.read()
    cv2.flip(frame_bg, 1, frame_bg)

    # The most essential part, ranging different modes
    frame_bg, frame_fg, MODE = video_mode(frame_bg, frame_fg, MODE, eval('args_'+MODE))

    mask_fg = frame_fg > 0

    if MODE == 'display':
        # Attachment of menu on the top of frame_bg
        frame_bg_with_menu = menu_top.attach_menu(
            frame_bg.copy(), args_menu["menu_dict"], args_menu["icon_len_side"]
        )
        frame = np.add(
            np.multiply(frame_fg, mask_fg),
            np.multiply(frame_bg_with_menu, ~mask_fg)
        )
    else:
        frame = np.add(
            np.multiply(frame_fg, mask_fg),
            np.multiply(
                cv2.resize(
                    frame_bg,
                    mask_fg.shape[1::-1],
                    interpolation=cv2.INTER_CUBIC
                ),
                ~mask_fg
            )
        )
    fps = str(round(1 / (time.time() - iteration_start), 2))
    frame = cv2.resize(frame, (800, 600))
示例#32
0
print(x.dtype)
print(x.shape)
print(x.strides)  # strides: byte jumps to traverse along an axis (x,y,z..)
# strides are required as they reflect how numpy stores arrays in a data buffer

y = np.ones(shape=(2, 3, 4), dtype=np.int32)
print(y)
print(y.strides)

np.arange(20)
np.arange(20).strides

# Array operations (multi-dimensional)
a = np.array([1, 2, 3, 5])
b = np.array([2, 3, 5, 6])
np.add(a, b)

# newaxis
b[:, np.newaxis].shape
np.expand_dims(b, 1).shape
# igual

# outside of numpy array primitives (which runs on c compiled code)
# the python operations on arrays become very slow

# numpy roll
# shift array
np.roll(b, 2)
np.roll(b, -2)

#! NUMBA
示例#33
0
    for pos in snpmatrix:
        num_mutations = pos.count('1')
        if num_mutations == 0:
            continue
        sfs[num_mutations - 1] = sfs[num_mutations - 1] + 1
    final_sfs.append(sfs)

sfs_lengths = [len(x) for x in final_sfs]

data = np.zeros(max(sfs_lengths))

for sfs in final_sfs:
    if len(sfs) < len(data):
        size_diff = len(data) - len(sfs)
        sfs = np.lib.pad(sfs, (0, size_diff), "constant", constant_values=0)
    data = np.add(data, sfs)

if data[-1] == 0:
    data = np.delete(data, -1)
data = np.insert(data, 0, 0)
plt.xkcd()

fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
plt.xticks(xrange(1, len(data)))
plt.yticks([])
ax.set_xlim([1, len(data)])
ax.set_ylim([0, np.amax(data)])
    ax = fig.add_subplot(111)
    x, y = fhist1d(cos12, cosrange[0], cosrange[1], cosrange[2])
    ax.plot(x, y)
    ax.set_title(r'Cos$\theta$(%s,%s)' % (label_species[0], label_species[1]))
    ax.set_xlabel(r'Cos$\theta$')
    ax.set_title('%s + %s' % (label_species[0], label_species[1]))

# KErange=[0,40,0.2] # last ppt
KErange = [0, 40, 0.4]
if Plot_KE:
    # x,y=fhist1d(KE1,KErange[0],KErange[1],KErange[2])
    fig, ax = plt.subplots()
    # ax.plot(x,y,label=label_species[0])
    x, y = fhist1d(KE2, KErange[0], KErange[1], KErange[2])
    # ax.plot(x,y,label=label_species[1])
    x, y = fhist1d(np.add(KE1, KE2), KErange[0], KErange[1], KErange[2])
    KERx = x
    KERy = y
    ax.step(x, y, label='KER')
    if Charge_positions_known:
        ax.axvline(x=KER_th,
                   color='g',
                   linestyle='--',
                   label='KER (CEI model) ')
    ax.legend(frameon=False)
    ax.set_xlabel('KER (eV)')
    ax.set_ylabel('Counts (arb units)')
    ax.set_title('%s + %s' % (label_species[0], label_species[1]))
    plt.savefig(basedir1 + 'no_mom_gate_KER.png', bbox_inches='tight')

    x, y = fhist1d(KE1, KErange[0], KErange[1], KErange[2])
示例#35
0
f = grades.var()

g = grades.mean(axis=0)

h = grades.mean(axis = 1)
print("Average of each student", h)

numbers = np.array([1,4,9,16,25,36])

sqrt = np.sqrt(numbers)

print(sqrt)

numbers2 = np.arange(1,7) * 10

np_add = np.add(numbers, numbers2)

print(np_add)

np_multiply = np.multiply(numbers2, 5)

numbers3 = numbers2.reshape(2,3)

numbers4 = np.array([2,4,6])

np_multiply2 = (numbers3, numbers4)

print(np_multiply2)

#indexing and slicing
示例#36
0
# DESCRIPTION: create an image of a checker piece
# square_width: width of the image for the checker
# color1: color used for the checker
# color2: color used for the checker
def draw_checker(square_width, color1, color2):
    # create a checker image
    checker = np.ones((square_width,square_width,3), dtype=np.uint8)*255
    #cv2.circle(img, center, radius, color[, thickness[, lineType[, shift]]]) → None
    cv2.circle(checker, (square_width//2, square_width//2), square_width//2, color1, -1)
    cv2.circle(checker, (square_width//2, square_width//2), square_width//3, color2, -1)
    cv2.circle(checker, (square_width//2, square_width//2), square_width//4, color1, -1)
    return checker

brown1 = np.subtract((47,62,139),(47,50,50))
brown1 = brown1.tolist()
brown2 = np.add(brown1,(40,40,40))
brown2 = brown2.tolist()
checker_brn = draw_checker(square_width, brown1, brown2)

# DESCRIPTION: place checker on the board graphically
# img: image to draw checker on
# checker: image of the checker
# i: the horizontal index of the square to draw checker on
# j: the vertical index of the square to draw checker on
# board_x: x-coordinate of the board
# board_y: y-coordinate of the board
# square_width: size of the board square
# method: if 1 copy checker over, if 2 use weighted addition, if 3 use threholding, if 4 use color filtering
def place_checker(img, checker, i, j, board_x, board_y, square_width, method=1):

    square = img[(board_x+(i-1)*square_width):(board_x+(i)*square_width),
                batchB = imagesB[index * batch_size:(index + 1) * batch_size]

                # Translate images to opposite domain
                generatedB = generatorAToB.predict(batchA)
                generatedA = generatorBToA.predict(batchB)

                # Train the discriminator A on real and fake images
                dALoss1 = discriminatorA.train_on_batch(batchA, real_labels)
                dALoss2 = discriminatorA.train_on_batch(generatedA, fake_labels)

                # Train the discriminator B on ral and fake images
                dBLoss1 = discriminatorB.train_on_batch(batchB, real_labels)
                dbLoss2 = discriminatorB.train_on_batch(generatedB, fake_labels)

                # Calculate the total discriminator loss
                d_loss = 0.5 * np.add(0.5 * np.add(dALoss1, dALoss2), 0.5 * np.add(dBLoss1, dbLoss2))

                print("d_loss:{}".format(d_loss))

                """
                Train the generator networks
                """
                g_loss = adversarial_model.train_on_batch([batchA, batchB],
                                                          [real_labels, real_labels, batchA, batchB, batchA, batchB])

                print("g_loss:{}".format(g_loss))

                dis_losses.append(d_loss)
                gen_losses.append(g_loss)

            """
示例#38
0
文件: plotFFT.py 项目: stiefen1/epuck
def do_fft(array):
    FFT = np.fft.fft(array)
    FFT_norme = np.sqrt(
        np.add(np.multiply(np.real(FFT), np.real(FFT)),
               (np.multiply(np.imag(FFT), np.imag(FFT)))))
    return FFT_norme
示例#39
0
#form factors silicon
# V3S = -3.04768 
# V8S =  0.74831
# V11S = 0.97961
V3S = -0.224 * convert
V8S =  0.055 * convert
V11S = 0.072 * convert
ffs = numpy.array([V3S, V8S, V11S])




ffg = ffg*(prop)
ffs = ffs*(1-prop)

ffc = numpy.add(ffg,ffs)
#compound form factor


#a function to caluculate structure factor
def strucfact(g):
    nxprime, nyprime, nzprime = g
    #this g will be the vector g - gprime; corresponds to the position in the matrix
    sf = numpy.cos((numpy.pi/4)*(nxprime + nyprime + nzprime))

    return sf


############################################################

示例#40
0
        g=G[ants]
        dt=np.zeros((k,2))
        alpha=np.zeros(k)
        p=prob(e,t)
        for i in range(0,k):
            curr_weight=weight(g,g_ini)
            if(p[i]>random.uniform(0,1)):
                swap=random.randint(0,k-1)
                while(swap == i and swap>=50):
                    swap=random.randint(0,k-1)
                g[:,i]=np.bitwise_xor(g[:,i],g[:,swap])
                dt[i][1]=q/curr_weight
                alpha[i]=1
            else:
                dt[i][0]=q/curr_weight
                #maybe alpha needs to changed
            if(curr_weight<best):
                best=curr_weight
                bestg=g
        print(best)
                
        s=np.add(s,alpha)
        delta_t=np.add(delta_t, dt)
    t=np.add(np.multiply(roh,t),delta_t)
    e[:,0]=np.subtract(np.add(np.full(k,m),np.ones(k)),s)
    e[:,1]=np.add(s,np.ones(k))    
print(best)
print(bestg)
    
          
                   
示例#41
0
    def _sparse_fit(self, X, strategy, missing_values, fill_value):
        """Fit the transformer on sparse data."""
        # Count the zeros
        if missing_values == 0:
            n_zeros_axis = np.zeros(X.shape[1], dtype=int)
        else:
            n_zeros_axis = X.shape[0] - np.diff(X.indptr)

        # Mean
        if strategy == "mean":
            if missing_values != 0:
                n_non_missing = n_zeros_axis

                # Mask the missing elements
                mask_missing_values = _get_mask(X.data, missing_values)
                mask_valids = np.logical_not(mask_missing_values)

                # Sum only the valid elements
                new_data = X.data.copy()
                new_data[mask_missing_values] = 0
                X = sparse.csc_matrix((new_data, X.indices, X.indptr),
                                      copy=False)
                sums = X.sum(axis=0)

                # Count the elements != 0
                mask_non_zeros = sparse.csc_matrix(
                    (mask_valids.astype(np.float64),
                     X.indices,
                     X.indptr), copy=False)
                s = mask_non_zeros.sum(axis=0)
                n_non_missing = np.add(n_non_missing, s)

            else:
                sums = X.sum(axis=0)
                n_non_missing = np.diff(X.indptr)

            # Ignore the error, columns with a np.nan statistics_
            # are not an error at this point. These columns will
            # be removed in transform
            with np.errstate(all="ignore"):
                return np.ravel(sums) / np.ravel(n_non_missing)

        # Median + Most frequent + Constant
        else:
            # Remove the missing values, for each column
            columns_all = np.hsplit(X.data, X.indptr[1:-1])
            mask_missing_values = _get_mask(X.data, missing_values)
            mask_valids = np.hsplit(np.logical_not(mask_missing_values),
                                    X.indptr[1:-1])

            # astype necessary for bug in numpy.hsplit before v1.9
            columns = [col[mask.astype(bool, copy=False)]
                       for col, mask in zip(columns_all, mask_valids)]

            # Median
            if strategy == "median":
                median = np.empty(len(columns))
                for i, column in enumerate(columns):
                    median[i] = _get_median(column, n_zeros_axis[i])

                return median

            # Most frequent
            elif strategy == "most_frequent":
                most_frequent = np.empty(len(columns))

                for i, column in enumerate(columns):
                    most_frequent[i] = _most_frequent(column,
                                                      0,
                                                      n_zeros_axis[i])

                return most_frequent

            # Constant
            elif strategy == "constant":
                return np.full(X.shape[1], fill_value)
示例#42
0
def train_multiple_outputs(n_images,
                           batch_size,
                           log_dir,
                           epoch_num,
                           critic_updates=5):
    data = load_images('../images/train', n_images)
    y_train, x_train = data['B'], data['A']

    g = generator_model()
    d = discriminator_model()
    d_on_g = generator_containing_discriminator_multiple_outputs(g, d)

    d_opt = Adam(lr=1E-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
    d_on_g_opt = Adam(lr=1E-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08)

    d.trainable = True
    d.compile(optimizer=d_opt, loss=wasserstein_loss)
    d.trainable = False
    loss = [perceptual_loss, wasserstein_loss]
    loss_weights = [100, 1]
    d_on_g.compile(optimizer=d_on_g_opt, loss=loss, loss_weights=loss_weights)
    d.trainable = True

    log_path = './logs'
    tensorboard_callback = TensorBoard(log_path)

    hn_num = int(batch_size * 0.5)
    hp_num = int(batch_size * 0.5)

    output_true_batch, output_false_batch = np.ones((batch_size, 1)), -np.ones(
        (batch_size, 1))
    hard_true_batch, hard_false_batch = np.ones(
        (batch_size + hp_num, 1)), -np.ones((batch_size + hn_num, 1))

    for epoch in tqdm.tqdm(range(epoch_num)):
        permutated_indexes = np.random.permutation(x_train.shape[0])

        d_losses = []
        d_on_g_losses = []

        ##############
        init_blur = x_train[-batch_size:]
        init_lab = y_train[-batch_size:]

        init_gen = g.predict(x=init_blur, batch_size=hn_num)
        d.trainable = False
        temp_init = []
        for i in range(init_gen.shape[0]):
            t_s = d.predict(init_gen[i][np.newaxis, ...])[0][0]
            temp_init.append(t_s)
        init_ind = np.argsort(temp_init)[::-1][:hn_num]
        init_ind2 = np.argsort(temp_init)[:hn_num]

        hard_neg = init_gen[init_ind]
        hard_neg_y = init_lab[init_ind]
        hard_g_x = init_gen[init_ind2]
        hard_g_y = init_lab[init_ind2]

        ##########

        for index in range(int(x_train.shape[0] / batch_size)):
            batch_indexes = permutated_indexes[index * batch_size:(index + 1) *
                                               batch_size]
            image_blur_batch = x_train[batch_indexes]
            image_full_batch = y_train[batch_indexes]

            generated_images = g.predict(x=image_blur_batch,
                                         batch_size=batch_size)

            for _ in range(critic_updates):

                d.trainable = True
                neg_train = np.concatenate((generated_images, hard_neg),
                                           axis=0)
                pos_train = np.concatenate((image_full_batch, hard_neg_y),
                                           axis=0)
                d_loss_real = d.train_on_batch(pos_train, hard_true_batch)
                d_loss_fake = d.train_on_batch(neg_train, hard_false_batch)
                d_loss = 0.5 * np.add(d_loss_fake, d_loss_real)
                d_losses.append(d_loss)

            d.trainable = False
            temp_hn = []
            for i in range(neg_train.shape[0]):
                t_s = d.predict(neg_train[i][np.newaxis, ...])[0][0]
                temp_hn.append(t_s)
            hn_ind = np.argsort(temp_hn)[::-1][:hn_num]

            hard_neg = neg_train[hn_ind]
            hard_neg_y = pos_train[hn_ind]

            # #################################
            d.trainable = False

            g_blur = np.concatenate((image_blur_batch, hard_g_x), axis=0)
            g_full = np.concatenate((image_full_batch, hard_g_y), axis=0)
            d_on_g_loss = d_on_g.train_on_batch(g_blur,
                                                [g_full, hard_true_batch])
            d_on_g_losses.append(d_on_g_loss)

            temp_g = []
            for i in range(g_blur.shape[0]):
                t_s = d.predict(g_blur[i][np.newaxis, ...])[0][0]
                temp_g.append(t_s)
            g_ind = np.argsort(temp_g)[:hn_num]

            hard_g_x = g_blur[g_ind]
            hard_g_y = g_full[g_ind]
            #

            d.trainable = True

        # write_log(tensorboard_callback, ['g_loss', 'd_on_g_loss'], [np.mean(d_losses), np.mean(d_on_g_losses)], epoch_num)
        print(np.mean(d_losses), np.mean(d_on_g_losses))
        with open('log.txt', 'a+') as f:
            f.write('{} - {} - {}\n'.format(epoch, np.mean(d_losses),
                                            np.mean(d_on_g_losses)))

        save_all_weights(d, g, epoch, int(np.mean(d_on_g_losses)))
    def Compute_Performance_Reward_fast_fading_with_power_asyn(
            self, actions_power):  # revising based on the fast fading part
        # ===================================================
        #  --------- Used for Testing -------
        # ===================================================
        actions = actions_power[:, :, 0]  # the channel_selection_part
        power_selection = actions_power[:, :, 1]
        Interference = np.zeros(
            self.n_RB)  # Calculate the interference from V2V to V2I
        for i in range(len(self.vehicles)):
            for j in range(len(actions[i, :])):
                if not self.activate_links[i, j]:
                    continue
                Interference[actions[i][j]] += 10**((self.V2V_power_dB_List[power_selection[i,j]] - \
                                                     self.V2I_channels_with_fastfading[i, actions[i,j]] + \
                                                     self.vehAntGain + self.bsAntGain - self.bsNoiseFigure)/10)
        self.V2I_Interference = Interference + self.sig2
        V2V_Interference = np.zeros((len(self.vehicles), 3))
        V2V_Signal = np.zeros((len(self.vehicles), 3))
        Interfence_times = np.zeros((len(self.vehicles), 3))
        actions[(np.logical_not(self.activate_links))] = -1
        for i in range(self.n_RB):
            indexes = np.argwhere(actions == i)
            for j in range(len(indexes)):
                #receiver_j = self.vehicles[indexes[j,0]].neighbors[indexes[j,1]]
                receiver_j = self.vehicles[indexes[j,
                                                   0]].destinations[indexes[j,
                                                                            1]]
                V2V_Signal[indexes[j, 0],indexes[j, 1]] = 10**((self.V2V_power_dB_List[power_selection[indexes[j, 0],indexes[j, 1]]] -\
                self.V2V_channels_with_fastfading[indexes[j][0]][receiver_j][i] + 2*self.vehAntGain - self.vehNoiseFigure)/10)
                #V2V_Signal[indexes[j, 0],indexes[j, 1]] = 10**((self.V2V_power_dB_List[0] - self.V2V_channels_with_fastfading[indexes[j][0]][receiver_j][i])/10)
                if i < self.n_Veh:
                    V2V_Interference[indexes[j,0],indexes[j,1]] += 10**((self.V2I_power_dB - \
                    self.V2V_channels_with_fastfading[i][receiver_j][i] + 2*self.vehAntGain - self.vehNoiseFigure )/10)  # V2I links interference to V2V links
                for k in range(j + 1, len(indexes)):
                    receiver_k = self.vehicles[indexes[k][0]].destinations[
                        indexes[k][1]]
                    V2V_Interference[indexes[j,0],indexes[j,1]] += 10**((self.V2V_power_dB_List[power_selection[indexes[k,0],indexes[k,1]]] -\
                    self.V2V_channels_with_fastfading[indexes[k][0]][receiver_j][i]+ 2*self.vehAntGain - self.vehNoiseFigure)/10)
                    V2V_Interference[indexes[k,0],indexes[k,1]] += 10**((self.V2V_power_dB_List[power_selection[indexes[j,0],indexes[j,1]]] - \
                    self.V2V_channels_with_fastfading[indexes[j][0]][receiver_k][i]+ 2*self.vehAntGain - self.vehNoiseFigure)/10)
                    Interfence_times[indexes[j, 0], indexes[j, 1]] += 1
                    Interfence_times[indexes[k, 0], indexes[k, 1]] += 1

        self.V2V_Interference = V2V_Interference + self.sig2
        V2V_Rate = np.log2(1 + np.divide(V2V_Signal, self.V2V_Interference))
        V2I_Signals = self.V2I_power_dB - self.V2I_channels_abs[
            0:min(self.n_RB, self.n_Veh
                  )] + self.vehAntGain + self.bsAntGain - self.bsNoiseFigure
        V2I_Rate = np.log2(1 + np.divide(
            10**(V2I_Signals /
                 10), self.V2I_Interference[0:min(self.n_RB, self.n_Veh)]))
        #print("V2I information", V2I_Signals, self.V2I_Interference, V2I_Rate)

        # -- compute the latency constraits --
        self.demand -= V2V_Rate * self.update_time_asyn * 1500  # decrease the demand
        self.test_time_count -= self.update_time_asyn  # compute the time left for estimation
        self.individual_time_limit -= self.update_time_asyn  # compute the time left for individual V2V transmission
        self.individual_time_interval -= self.update_time_asyn  # compute the time interval left for next transmission

        # --- update the demand ---
        new_active = self.individual_time_interval <= 0
        self.activate_links[new_active] = True
        self.individual_time_interval[new_active] = np.random.exponential(
            0.02,
            self.individual_time_interval[new_active].shape) + self.V2V_limit
        self.individual_time_limit[new_active] = self.V2V_limit
        self.demand[new_active] = self.demand_amount

        # -- update the statistics---
        early_finish = np.multiply(self.demand <= 0, self.activate_links)
        unqulified = np.multiply(self.individual_time_limit <= 0,
                                 self.activate_links)
        self.activate_links[np.add(early_finish, unqulified)] = False
        self.success_transmission += np.sum(early_finish)
        self.failed_transmission += np.sum(unqulified)
        fail_percent = self.failed_transmission / (
            self.failed_transmission + self.success_transmission + 0.0001)
        return V2I_Rate, fail_percent
示例#44
0
# Funciones en arrays

import numpy as np
array = np.arange(5)

# Nos devuelve la raíz cuadrada de cada uno de los elementos
print(np.sqrt(array))

# Para crear arrays de forma aleatoria
array2 = np.random.rand(5)
print(array2)

lista = [5,6,7,8,9]
array3 = np.array(lista)
print(array)
print(array3)

suma = np.add(array, array3)
print(suma)

# máximo de los dos arrays
maximo = np.maximum(array,array3)
print(maximo)
示例#45
0
def generate_data_clusters(n_train=1000,
                           n_test=500,
                           n_clusters=2,
                           n_features=2,
                           contamination=0.1,
                           size='same',
                           density='same',
                           dist=0.25,
                           random_state=None,
                           return_in_clusters=False):
    """Utility function to generate synthesized data in clusters.
       Generated data can involve the low density pattern problem and global
       outliers which are considered as difficult tasks for outliers detection
       algorithms.

    Parameters
    ----------
    n_train : int, (default=1000)
        The number of training points to generate.

    n_test : int, (default=500)
        The number of test points to generate.

    n_clusters : int, optional (default=2)
       The number of centers (i.e. clusters) to generate.

    n_features : int, optional (default=2)
       The number of features for each sample.

    contamination : float in (0., 0.5), optional (default=0.1)
       The amount of contamination of the data set, i.e.
       the proportion of outliers in the data set.

    size : str, optional (default='same')
       Size of each cluster: 'same' generates clusters with same size,
       'different' generate clusters with different sizes.

    density : str, optional (default='same')
       Density of each cluster: 'same' generates clusters with same density,
       'different' generate clusters with different densities.

    dist: float, optional (default=0.25)
       Distance between clusters. Should be between 0. and 1.0
       It is used to avoid clusters overlapping as much as possible.
       However, if number of samples and number of clusters are too high,
       it is unlikely to separate them fully even if ``dist`` set to 1.0

    random_state : int, RandomState instance or None, optional (default=None)
        If int, random_state is the seed used by the random number generator;
        If RandomState instance, random_state is the random number generator;
        If None, the random number generator is the RandomState instance used
        by `np.random`.

    return_in_clusters : bool, optional (default=False)
        If True, the function returns x_train, y_train, x_test, y_test each as
        a list of numpy arrays where each index represents a cluster.
        If False, it returns x_train, y_train, x_test, y_test each as numpy
        array after joining the sequence of clusters arrays,

    Returns
    -------
    X_train : numpy array of shape (n_train, n_features)
        Training data.

    y_train : numpy array of shape (n_train,)
        Training ground truth.

    X_test : numpy array of shape (n_test, n_features)
        Test data.

    y_test : numpy array of shape (n_test,)
        Test ground truth.
    """
    # initialize a random state and seeds for the instance
    random_state = check_random_state(random_state)

    if isinstance(n_clusters, int):
        check_parameter(n_clusters, low=1, param_name='n_clusters')
    else:
        raise ValueError("n_clusters should be int, got %s" % n_clusters)

    if isinstance(n_features, int):
        check_parameter(n_features, low=1, param_name='n_features')
    else:
        raise ValueError("n_features should be int, got %s" % n_features)

    if isinstance(contamination, float):
        check_parameter(contamination,
                        low=0,
                        high=0.5,
                        param_name='contamination')
    else:
        raise ValueError("contamination should be float, got %s" %
                         contamination)

    if isinstance(dist, float):
        check_parameter(dist, low=0, high=1.0, param_name='dist')
    else:
        raise ValueError("dist should be float, got %s" % dist)

    if not isinstance(return_in_clusters, bool):
        raise ValueError("return_in_clusters should be of type bool, "
                         "got %s" % return_in_clusters)

    # find the required number of outliers and inliers
    n_samples = n_train + n_test
    n_outliers = int(n_samples * contamination)
    n_inliers = n_samples - n_outliers

    if size == 'same':
        a_ = [int(n_inliers / n_clusters)] * (n_clusters - 1)
        clusters_size = a_ + [int(n_inliers - sum(a_))]
    elif size == 'different':
        if (n_clusters * 10) > n_samples:
            raise ValueError('number of samples should be at least 10 times of'
                             'the number of clusters')
        if (n_clusters * 10) > n_inliers:
            raise ValueError(
                'contamination ratio is too high, try to increase'
                ' number of samples or decrease the contamination')
        _r = 1. / n_clusters
        _offset = random_state.uniform(_r * 0.2,
                                       _r * 0.4,
                                       size=(int(n_clusters / 2), )).tolist()
        _offset += [i * -1. for i in _offset]
        clusters_size = np.round(np.multiply(n_inliers,
                                             np.add(_r, _offset))).astype(int)
        if n_clusters % 2 == 0:  # if it is even number
            clusters_size[n_clusters - 1] += n_inliers - sum(clusters_size)
        else:
            clusters_size = np.append(clusters_size,
                                      n_inliers - sum(clusters_size))
    else:
        raise ValueError(
            'size should be a string of value \'same\' or \'different\'')

    # check for clusters densities and apply split accordingly
    if density == 'same':
        clusters_density = random_state.uniform(
            low=0.1, high=0.5, size=(1, )).tolist() * n_clusters
    elif density == 'different':
        clusters_density = random_state.uniform(low=0.1,
                                                high=0.5,
                                                size=(n_clusters, ))
    else:
        raise ValueError(
            'density should be a string of value \'same\' or \'different\'')

    # calculate number of outliers for every cluster
    n_outliers_ = []
    for i in range(n_clusters):
        n_outliers_.append(int(round(clusters_size[i] * contamination)))
    _diff = int((n_outliers - sum(n_outliers_)) / n_clusters)
    for i in range(n_clusters - 1):
        n_outliers_[i] += _diff
    n_outliers_[n_clusters - 1] += n_outliers - sum(n_outliers_)
    random_state.shuffle(n_outliers_)

    # generate data
    X_clusters, y_clusters = [], []
    X, y = np.zeros([n_samples, n_features]), np.zeros([
        n_samples,
    ])

    center_box = list(
        filter(
            lambda a: a != 0,
            np.linspace(-np.power(n_samples * n_clusters, dist),
                        np.power(n_samples * n_clusters, dist),
                        n_clusters + 2)))

    # index tracker for value assignment
    tracker_idx = 0

    for i in range(n_clusters):
        inliers, outliers = [], []
        _blob, _y = make_blobs(n_samples=clusters_size[i],
                               centers=1,
                               cluster_std=clusters_density[i],
                               center_box=(center_box[i], center_box[i + 1]),
                               n_features=n_features,
                               random_state=random_state)

        inliers.append(_blob)

        center_box_l = center_box[i] * (1.2 + dist + clusters_density[i])
        center_box_r = center_box[i + 1] * (1.2 + dist + clusters_density[i])

        outliers.append(
            make_blobs(n_samples=n_outliers_[i],
                       centers=1,
                       cluster_std=random_state.uniform(
                           clusters_density[i] * 3.5,
                           clusters_density[i] * 4.,
                           size=(1, )[0]),
                       center_box=(center_box_l, center_box_r),
                       n_features=n_features,
                       random_state=random_state)[0])
        _y = np.append(_y, [1] * int(n_outliers_[i]))

        # generate X
        if np.array(outliers).ravel().shape[0] > 0:
            stacked_X_temp = np.vstack(
                (np.concatenate(inliers), np.concatenate(outliers)))
            X_clusters.append(stacked_X_temp)
            tracker_idx_new = tracker_idx + stacked_X_temp.shape[0]
            X[tracker_idx:tracker_idx_new, :] = stacked_X_temp
        else:
            X_clusters.append(np.concatenate(inliers))

        # generate Y
        y_clusters.append(_y)
        y[tracker_idx:tracker_idx_new, ] = _y

        tracker_idx = tracker_idx_new

    if return_in_clusters:
        return X_clusters, y_clusters

    # return X_train, X_test, y_train, y_test
    else:
        return train_test_split(X,
                                y,
                                test_size=n_test,
                                random_state=random_state)
    def Compute_Performance_Reward_Batch(
            self, actions_power,
            idx):  # add the power dimension to the action selection
        # ==================================================
        # ------------- Used for Training ----------------
        # ==================================================
        actions = actions_power.copy()[:, :, 0]  #
        power_selection = actions_power.copy()[:, :, 1]  #
        V2V_Interference = np.zeros((len(self.vehicles), 3))
        V2V_Signal = np.zeros((len(self.vehicles), 3))
        Interfence_times = np.zeros((len(self.vehicles), 3))  #  3 neighbors
        #print(actions)
        origin_channel_selection = actions[idx[0], idx[1]]
        actions[idx[0], idx[1]] = 100  # something not relavant
        for i in range(self.n_RB):
            indexes = np.argwhere(actions == i)
            #print('index',indexes)
            for j in range(len(indexes)):
                #receiver_j = self.vehicles[indexes[j,0]].neighbors[indexes[j,1]]
                receiver_j = self.vehicles[indexes[j,
                                                   0]].destinations[indexes[j,
                                                                            1]]
                V2V_Signal[indexes[j, 0],indexes[j, 1]] = 10**((self.V2V_power_dB_List[power_selection[indexes[j, 0],indexes[j, 1]]] -\
                self.V2V_channels_with_fastfading[indexes[j,0], receiver_j, i]+ 2*self.vehAntGain - self.vehNoiseFigure)/10)
                V2V_Interference[indexes[j,0],indexes[j,1]] +=  10**((self.V2I_power_dB- self.V2V_channels_with_fastfading[i,receiver_j,i] + \
                2*self.vehAntGain - self.vehNoiseFigure)/10)  # interference from the V2I links

                for k in range(j + 1, len(indexes)):
                    receiver_k = self.vehicles[indexes[k, 0]].destinations[
                        indexes[k, 1]]
                    V2V_Interference[indexes[j,0],indexes[j,1]] += 10**((self.V2V_power_dB_List[power_selection[indexes[k,0],indexes[k,1]]] - \
                    self.V2V_channels_with_fastfading[indexes[k,0],receiver_j,i] + 2*self.vehAntGain - self.vehNoiseFigure)/10)
                    V2V_Interference[indexes[k,0],indexes[k,1]] += 10**((self.V2V_power_dB_List[power_selection[indexes[j,0],indexes[j,1]]] - \
                    self.V2V_channels_with_fastfading[indexes[j,0], receiver_k, i] + 2*self.vehAntGain - self.vehNoiseFigure)/10)
                    Interfence_times[indexes[j, 0], indexes[j, 1]] += 1
                    Interfence_times[indexes[k, 0], indexes[k, 1]] += 1

        self.V2V_Interference = V2V_Interference + self.sig2
        V2V_Rate_list = np.zeros((self.n_RB, len(
            self.V2V_power_dB_List)))  # the number of RB times the power level
        Deficit_list = np.zeros((self.n_RB, len(self.V2V_power_dB_List)))
        for i in range(self.n_RB):
            indexes = np.argwhere(actions == i)
            V2V_Signal_temp = V2V_Signal.copy()
            #receiver_k = self.vehicles[idx[0]].neighbors[idx[1]]
            receiver_k = self.vehicles[idx[0]].destinations[idx[1]]
            for power_idx in range(len(self.V2V_power_dB_List)):
                V2V_Interference_temp = V2V_Interference.copy()
                V2V_Signal_temp[idx[0],idx[1]] = 10**((self.V2V_power_dB_List[power_idx] - \
                self.V2V_channels_with_fastfading[idx[0], self.vehicles[idx[0]].destinations[idx[1]],i] + 2*self.vehAntGain - self.vehNoiseFigure )/10)
                V2V_Interference_temp[idx[0],idx[1]] +=  10**((self.V2I_power_dB - \
                self.V2V_channels_with_fastfading[i,self.vehicles[idx[0]].destinations[idx[1]],i] + 2*self.vehAntGain - self.vehNoiseFigure)/10)
                for j in range(len(indexes)):
                    receiver_j = self.vehicles[indexes[j, 0]].destinations[
                        indexes[j, 1]]
                    V2V_Interference_temp[idx[0],idx[1]] += 10**((self.V2V_power_dB_List[power_selection[indexes[j,0], indexes[j,1]]] -\
                    self.V2V_channels_with_fastfading[indexes[j,0],receiver_k, i] + 2*self.vehAntGain - self.vehNoiseFigure)/10)
                    V2V_Interference_temp[indexes[j,0],indexes[j,1]] += 10**((self.V2V_power_dB_List[power_idx]-\
                    self.V2V_channels_with_fastfading[idx[0],receiver_j, i] + 2*self.vehAntGain - self.vehNoiseFigure)/10)
                V2V_Rate_cur = np.log2(
                    1 + np.divide(V2V_Signal_temp, V2V_Interference_temp))
                if (origin_channel_selection == i) and (power_selection[idx[0],
                                                                        idx[1]]
                                                        == power_idx):
                    V2V_Rate = V2V_Rate_cur.copy()
                V2V_Rate_list[i, power_idx] = np.sum(V2V_Rate_cur)
                Deficit_list[i, power_idx] = 0 - 1 * np.sum(
                    np.maximum(np.zeros(V2V_Signal_temp.shape),
                               (self.demand - self.individual_time_limit *
                                V2V_Rate_cur * 1500)))
        Interference = np.zeros(self.n_RB)
        V2I_Rate_list = np.zeros(
            (self.n_RB, len(self.V2V_power_dB_List)))  # 3 of power level
        for i in range(len(self.vehicles)):
            for j in range(len(actions[i, :])):
                if (i == idx[0] and j == idx[1]):
                    continue
                Interference[actions[i][j]] += 10**((self.V2V_power_dB_List[power_selection[i,j]] - \
                self.V2I_channels_with_fastfading[i, actions[i][j]] + self.vehAntGain + self.bsAntGain - self.bsNoiseFigure)/10)
        V2I_Interference = Interference + self.sig2
        for i in range(self.n_RB):
            for j in range(len(self.V2V_power_dB_List)):
                V2I_Interference_temp = V2I_Interference.copy()
                V2I_Interference_temp[i] += 10**(
                    (self.V2V_power_dB_List[j] -
                     self.V2I_channels_with_fastfading[idx[0], i] +
                     self.vehAntGain + self.bsAntGain - self.bsNoiseFigure) /
                    10)
                V2I_Rate_list[i, j] = np.sum(np.log2(1 + np.divide(10**((self.V2I_power_dB + self.vehAntGain + self.bsAntGain \
                - self.bsNoiseFigure-self.V2I_channels_abs[0:min(self.n_RB,self.n_Veh)])/10), V2I_Interference_temp[0:min(self.n_RB,self.n_Veh)])))

        self.demand -= V2V_Rate * self.update_time_train * 1500
        self.test_time_count -= self.update_time_train
        self.individual_time_limit -= self.update_time_train
        self.individual_time_limit[np.add(self.individual_time_limit <= 0,
                                          self.demand < 0)] = self.V2V_limit
        self.demand[self.demand < 0] = self.demand_amount
        if self.test_time_count == 0:
            self.test_time_count = 10
        return V2I_Rate_list, Deficit_list, self.individual_time_limit[idx[0],
                                                                       idx[1]]
maximal_value_fbp 	= 40000
minimal_value_sart 	= 0
maximal_value_sart 	= 40000
inter_min			= 0
inter_max			= 100000
# then find all the 8 points where electrodes are. 
x_center 			= image_pixels/2
y_center 			= image_pixels/2
radius 	 			= image_pixels/2 - image_pixels/10
# this is our series of coordinates to create lines from. 
r,c,val 			= circle_perimeter_aa(x_center, y_center, radius)
# img[r, c] = val * 255

# electrode points: 
theta_points=[np.pi,5*np.pi/4,3*np.pi/2,7*np.pi/4,0,np.pi/4,np.pi/2,3*np.pi/4]
n1 = np.add(x_center*np.ones(len(theta_points)) , radius*np.cos(theta_points)) #  shift center
n2 = np.add(y_center*np.ones(len(theta_points)) , radius*np.sin(theta_points)) #
x = []
y = []
for i in xrange(len(n1)):
	x.append(int(n1[i]))
	y.append(int(n2[i]))

firstreconstruction = []
for j in xrange(len(lines)-1): # len(lines)-1

	data = lines[j] # t1 #test_data #lines[2]
	d = dict()

	# 8 choose 2
	for i in xrange(28):
    def Compute_Performance_Reward_fast_fading_with_power(
            self, actions_power):  # revising based on the fast fading part
        actions = actions_power.copy()[:, :, 0]  # the channel_selection_part
        power_selection = actions_power.copy()[:, :, 1]
        Rate = np.zeros(len(self.vehicles))
        Interference = np.zeros(
            self.n_RB)  # V2V signal interference to V2I links
        for i in range(len(self.vehicles)):
            for j in range(len(actions[i, :])):
                if not self.activate_links[i, j]:
                    continue
                #print('power selection,', power_selection[i,j])
                Interference[actions[i][j]] += 10**(
                    (self.V2V_power_dB_List[power_selection[i, j]] -
                     self.V2I_channels_with_fastfading[i, actions[i, j]] +
                     self.vehAntGain + self.bsAntGain - self.bsNoiseFigure) /
                    10)  # fast fading

        self.V2I_Interference = Interference + self.sig2
        V2V_Interference = np.zeros((len(self.vehicles), 3))
        V2V_Signal = np.zeros((len(self.vehicles), 3))

        # remove the effects of none active links
        #print('shapes', actions.shape, self.activate_links.shape)
        #print(not self.activate_links)
        actions[(np.logical_not(self.activate_links))] = -1
        #print('action are', actions)
        for i in range(self.n_RB):
            indexes = np.argwhere(actions == i)
            for j in range(len(indexes)):
                #receiver_j = self.vehicles[indexes[j,0]].neighbors[indexes[j,1]]
                receiver_j = self.vehicles[indexes[j,
                                                   0]].destinations[indexes[j,
                                                                            1]]
                # compute the V2V signal links
                V2V_Signal[indexes[j, 0], indexes[j, 1]] = 10**(
                    (self.V2V_power_dB_List[power_selection[indexes[j, 0],
                                                            indexes[j, 1]]] -
                     self.V2V_channels_with_fastfading[
                         indexes[j][0]][receiver_j][i] + 2 * self.vehAntGain -
                     self.vehNoiseFigure) / 10)
                #V2V_Signal[indexes[j, 0],indexes[j, 1]] = 10**((self.V2V_power_dB_List[0] - self.V2V_channels_with_fastfading[indexes[j][0]][receiver_j][i])/10)
                if i < self.n_Veh:
                    V2V_Interference[indexes[j, 0], indexes[j, 1]] += 10**(
                        (self.V2I_power_dB -
                         self.V2V_channels_with_fastfading[i][receiver_j][i] +
                         2 * self.vehAntGain - self.vehNoiseFigure) / 10
                    )  # V2I links interference to V2V links
                for k in range(j + 1,
                               len(indexes)):  # computer the peer V2V links
                    #receiver_k = self.vehicles[indexes[k][0]].neighbors[indexes[k][1]]
                    receiver_k = self.vehicles[indexes[k][0]].destinations[
                        indexes[k][1]]
                    V2V_Interference[indexes[j, 0], indexes[j, 1]] += 10**(
                        (self.V2V_power_dB_List[power_selection[indexes[k, 0],
                                                                indexes[k, 1]]]
                         - self.V2V_channels_with_fastfading[
                             indexes[k][0]][receiver_j][i] +
                         2 * self.vehAntGain - self.vehNoiseFigure) / 10)
                    V2V_Interference[indexes[k, 0], indexes[k, 1]] += 10**(
                        (self.V2V_power_dB_List[power_selection[indexes[j, 0],
                                                                indexes[j, 1]]]
                         - self.V2V_channels_with_fastfading[
                             indexes[j][0]][receiver_k][i] +
                         2 * self.vehAntGain - self.vehNoiseFigure) / 10)

        self.V2V_Interference = V2V_Interference + self.sig2
        V2V_Rate = np.zeros(self.activate_links.shape)
        V2V_Rate[self.activate_links] = np.log2(
            1 + np.divide(V2V_Signal[self.activate_links],
                          self.V2V_Interference[self.activate_links]))

        #print("V2V Rate", V2V_Rate * self.update_time_test * 1500)
        #print ('V2V_Signal is ', np.log(np.mean(V2V_Signal[self.activate_links])))
        V2I_Signals = self.V2I_power_dB - self.V2I_channels_abs[
            0:min(self.n_RB, self.n_Veh
                  )] + self.vehAntGain + self.bsAntGain - self.bsNoiseFigure
        V2I_Rate = np.log2(1 + np.divide(
            10**(V2I_Signals /
                 10), self.V2I_Interference[0:min(self.n_RB, self.n_Veh)]))

        # -- compute the latency constraits --
        self.demand -= V2V_Rate * self.update_time_test * 1500  # decrease the demand
        self.test_time_count -= self.update_time_test  # compute the time left for estimation
        self.individual_time_limit -= self.update_time_test  # compute the time left for individual V2V transmission
        self.individual_time_interval -= self.update_time_test  # compute the time interval left for next transmission

        # --- update the demand ---

        new_active = self.individual_time_interval <= 0
        self.activate_links[new_active] = True
        self.individual_time_interval[new_active] = np.random.exponential(
            0.02,
            self.individual_time_interval[new_active].shape) + self.V2V_limit
        self.individual_time_limit[new_active] = self.V2V_limit
        self.demand[new_active] = self.demand_amount
        #print("demand is", self.demand)
        #print('mean rate of average V2V link is', np.mean(V2V_Rate[self.activate_links]))

        # -- update the statistics---
        early_finish = np.multiply(self.demand <= 0, self.activate_links)
        unqulified = np.multiply(self.individual_time_limit <= 0,
                                 self.activate_links)
        self.activate_links[np.add(early_finish, unqulified)] = False
        #print('number of activate links is', np.sum(self.activate_links))
        self.success_transmission += np.sum(early_finish)
        self.failed_transmission += np.sum(unqulified)
        #if self.n_step % 1000 == 0 :
        #    self.success_transmission = 0
        #    self.failed_transmission = 0
        failed_percentage = self.failed_transmission / (
            self.failed_transmission + self.success_transmission + 0.0001)
        # print('Percentage of failed', np.sum(new_active), self.failed_transmission, self.failed_transmission + self.success_transmission , failed_percentage)
        return V2I_Rate, failed_percentage  #failed_percentage
示例#49
0
# --- Begin Public Functions --------------------------------------------------

abs = utils.copy_docstring(  # pylint: disable=redefined-builtin
    tf.math.abs,
    lambda x, name=None: np.abs(x))

accumulate_n = utils.copy_docstring(
    tf.math.accumulate_n,
    lambda inputs, shape=None, tensor_dtype=None, name=None: (  # pylint: disable=g-long-lambda
        sum(map(np.array, inputs)).astype(utils.numpy_dtype(tensor_dtype))))

acos = utils.copy_docstring(tf.math.acos, lambda x, name=None: np.arccos(x))

acosh = utils.copy_docstring(tf.math.acosh, lambda x, name=None: np.arccosh(x))

add = utils.copy_docstring(tf.math.add, lambda x, y, name=None: np.add(x, y))

add_n = utils.copy_docstring(
    tf.math.add_n, lambda inputs, name=None: sum(map(np.array, inputs)))

angle = utils.copy_docstring(tf.math.angle,
                             lambda input, name=None: np.angle(input))

argmax = utils.copy_docstring(
    tf.math.argmax,
    lambda input, axis=None, output_type=tf.int64, name=None: (  # pylint: disable=g-long-lambda
        np.argmax(input, axis=0 if axis is None else _astuple(axis)).astype(
            utils.numpy_dtype(output_type))))

argmin = utils.copy_docstring(
    tf.math.argmin,
示例#50
0
def exp(net_config, target_class):
    x_train, x_valid, y_train, y_valid = util.get_data(target_class)
    #print x_train
    datagen = getDataGenTrain()

    model = get_model()
    model.load_weights("donemodels/" + "0.552645073071.h5")
    model.save('my_model.h5')

    import pickle
    favorite_color = pickle.load(open("label_mapping.p", "rb"))
    print favorite_color
    inv_map = {v: k for k, v in favorite_color.items()}

    model_predict = model.predict(pre_process(x_valid))
    wrong_matrix = {}
    total_matrix = {}
    for i in range(len(model_predict)):
        predict_thresholded = model_predict[i] > 0.5
        y_valid_threshold = y_valid[i] > 0.5
        for j in range(len(predict_thresholded)):
            if predict_thresholded[j] and not y_valid_threshold[j]:
                indices = [i for i, x in enumerate(y_valid_threshold) if x]
                for e in indices:
                    j_name = inv_map[j]
                    e_name = inv_map[e]
                    if not predict_thresholded[e]:

                        if not (j_name, e_name) in wrong_matrix:
                            wrong_matrix[(j_name, e_name)] = 0
                        wrong_matrix[(j_name, e_name)] += 1

                    if not (j_name, e_name) in total_matrix:
                        total_matrix[(j_name, e_name)] = 0
                    total_matrix[(j_name, e_name)] += 1

    print "wrong matrix :"
    print wrong_matrix
    print "correct matrix:"
    print total_matrix

    for key in wrong_matrix:
        print key, wrong_matrix[key] / float(total_matrix[key])

    return
    for e in model_predict:
        print e

    for x, y in zip(x_valid, y_valid):
        flow = datagen.flow(np.array([x] * 10),
                            np.array([y] * 10),
                            batch_size=10)
        x_augmented, y_augmented = flow.next()
        for (x1, y1) in zip(x_augmented, y_augmented):
            showImage((x1).astype('uint8'), str(y1))

    for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=32):
        print x_batch[0]
        showImage((x_batch[0]).astype('uint8'), str(y_batch[0]))

    inputs = []
    targets = []
    for i in range(1000):
        inputs.append(np.fliplr(np.copy(x_valid[i])))
        targets.append(y_valid[i])
    x_valid1 = np.array(inputs)
    y_valid = np.array(targets)

    inputs = []
    for i in range(1000):
        inputs.append((np.copy(x_valid[i])))
    x_valid2 = np.array(inputs)

    predict = np.add(model.predict(x_valid1), model.predict(x_valid2))

    print predict

    p_valid = predict
    max_index_p_valid = np.argmax(p_valid, axis=1)
    max_index_y_valid = np.argmax(y_valid, axis=1)

    print "acc score: ", np.sum(
        max_index_p_valid == max_index_y_valid) / float(len(max_index_p_valid))
def sum_m(w1, w2):
    return np.add(w1, w2)
示例#52
0
    def learn(self, Xtrain, ytrain):
        """
        Learn the weights using the training data
        """
        pass
        self.weights = np.zeros(Xtrain.shape[1],)

        ### YOUR CODE HERE
        
        lmbd = self.params['lamb']
        
        numsamples = Xtrain.shape[0]
        # Xless = Xtrain[:,self.params['features']]
        Xless = Xtrain
        self.weights = np.random.rand(Xless.shape[1])
        err = 10000;
        #cw =0;
        tolerance = 10*np.exp(-4)
        i=0;
        
        
        w1 = self.weights
        #     cw_v =(np.dot(Xless, self.weights)-ytrain)
        #cw = (np.linalg.norm(cw_v)**2)/(2*numsamples)
        cw_v = np.dot(Xless, self.weights.T)
        cw = self.logit_cost(cw_v, Xless, ytrain) + lmbd * self.regularizer[0](self.weights)
        #  print(cw)
        errors = []
        runtm = []
        epch = []
        
        err = 1
        iteration= 1000
        #tm= time.time()
        while (abs(cw-err)>tolerance) and (i <iteration):
            err = cw
            g =  self.logit_cost_grad(cw_v, Xless, ytrain)
            obj = cw
            j=0
            ita = -1* self.params['stepsize']
            w = self.weights
            #  w1 = np.add(w,np.dot(ita,g))
            while(j<iteration):
                w1 = np.add(w,np.dot(ita,g))
                #  cw_v =(np.dot(Xless, w1)-ytrain)
                # cw = (np.linalg.norm(cw_v)**2)/(2*numsamples)
                cw_v = np.dot(Xless, w1.T)
                cw = self.logit_cost(cw_v, Xless, ytrain)+lmbd * self.regularizer[0](w1)
                ##    print (cw)
                
                if(cw<np.absolute(obj-tolerance)):  ############################################
                    break
                ita = 0.7*ita
                j=j+1
            
            if(j==iteration):
                self.weights=w
                ita =0
            else:
                self.weights = w1
            
            # cw_v =(np.dot(Xless, self.weights)-ytrain)
            #cw = (np.linalg.norm(cw_v)**2)/(2*numsamples)
            cw_v = np.dot(Xless, self.weights.T)
            cw = self.logit_cost(cw_v, Xless, ytrain)
            #tm1 = time.time()-tm
            #runtm.append(tm1)
            #err = cw
            errors.append(err)
            i=i+1
            epch.append(i)
示例#53
0
 def get_col_row_tot_2_array_from_data_record_array(value):
     return np.right_shift(np.bitwise_and(value, 0x00FE0000), 17), np.add(
         np.right_shift(np.bitwise_and(value, 0x0001FF00), 8),
         1), np.bitwise_and(value, 0x0000000F)
示例#54
0
        list1.append(int(input()))
    mat2.append(list1)
    
    #print(mat1)
    #print(mat2)
    
'''converting both the matrix to numpy arrary for easy manipulation of matrix'''
mat_arr1=np.asarray(mat1)
mat_arr2=np.asarray(mat2)

print(mat_arr1)
print(mat_arr2)
'''Performing matrix manipulation methods here which are inbuilt provided by numpy'''

print ("Addition of two matrices: ")
print (np.add(mat_arr1,mat_arr2))

print ("Subtraction of two matrices : ")
print (np.subtract(mat_arr1,mat_arr2))

print ("Multiplication of two matrices: ")
print (np.multiply(mat_arr1,mat_arr2))

print ("Matrix transposition : ")
print (mat_arr1.T)

print ("Determinant of a matrix: ")
det1 = np.linalg.det(mat_arr1)
print(det1)

print ("Inverse of a matrix: ")
示例#55
0
def overlay_arrays(clean_frames, noise_frames):
        return np.add(clean_frames, noise_frames)
示例#56
0
def main():
    a = [(0, 0), (0, 1), (0, -1), (1, 0), (-1, 0), (1, 1), (1, -1), (-1, 1),
         (-1, -1)]
    lst = np.add(a, [(1, 1)])

    print(lst)
示例#57
0
#导入numpy
import numpy as np

a = np.arange(9).reshape(3, 3)
b = np.array([10, 10, 10])
print('加法')
print(np.add(a, b))
print(a + b)
print('减法')
print(np.subtract(b, a))
print(b - a)

#out参数的使用
y = np.empty((3, 3), dtype=np.int)
np.multiply(a, 10, out=y)
print(y)

#三角函数
a = np.array([0, 30, 60, 90])
print(np.sin(a))

#around ceil floor
a = np.array([1.0, 4.55, 123, 0.567, 25.332])
print('around:', np.around(a))
print('ceil:', np.ceil(a))
print('floor:', np.floor(a))

#统计函数
# power()
a = np.arange(1, 13).reshape(3, 4)
print('原数组a')
示例#58
0
    noise_sample = np.random.random_sample((r * c, input_dim))

    # start train
    for i in range(epochs):
        beg = time.time()
        for j in range(step):
            for k in range(n_discr):
                train_mask = np.random.choice(len(train_images), batch_size)
                batch_images = train_images[train_mask]
                noise = np.random.random_sample((batch_size, input_dim))
                gen_imgs = generator.predict(noise)

                d_loss_real = discriminator.train_on_batch(batch_images, valid)
                d_loss_fake = discriminator.train_on_batch(gen_imgs, fake)

                d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
                # WGAN: clip discriminator weights
                for l in discriminator.layers:
                    weights = l.get_weights()
                    weights = [
                        np.clip(w, -clip_value, clip_value) for w in weights
                    ]
                    l.set_weights(weights)

            noise = np.random.random_sample((batch_size, input_dim))
            g_loss = combine.train_on_batch(noise, valid)

        print("%d [D loss: %f] [G loss: %f]" % (i, d_loss, g_loss))

        r = 5
        c = 5
示例#59
0
 def filter(self, mode, *args):
     """Applies a filter to the image.
     The existant filters are: GRAY, INVERT, OPAQUE, THRESHOLD, POSTERIZE,
     ERODE, DILATE and BLUR. This method requires numpy."""
     if not npy: raise ImportError("Numpy is required")
     if mode == GRAY:
         # Gray value = (77*(n>>16&0xff) + 151*(n>>8&0xff) + 28*(n&0xff)) >> 8
         # Where n is the ARGB color of the pixel
         lum1 = numpy.multiply(
             numpy.bitwise_and(numpy.right_shift(self.pixels, 16), 0xff),
             77)
         lum2 = numpy.multiply(
             numpy.bitwise_and(numpy.right_shift(self.pixels, 8), 0xff),
             151)
         lum3 = numpy.multiply(numpy.bitwise_and(self.pixels, 0xff), 28)
         lum = numpy.right_shift(numpy.add(numpy.add(lum1, lum2), lum3), 8)
         self.pixels = numpy.bitwise_and(self.pixels, 0xff000000)
         self.pixels = numpy.bitwise_or(self.pixels,
                                        numpy.left_shift(lum, 16))
         self.pixels = numpy.bitwise_or(self.pixels,
                                        numpy.left_shift(lum, 8))
         self.pixels = numpy.bitwise_or(self.pixels, lum)
     elif mode == INVERT:
         # This is the same as applying an exclusive or with the maximum value
         self.pixels = numpy.bitwise_xor(self.pixels, 0xffffff)
     elif mode == BLUR:
         if not args: args = [3]
         # Makes the image square by adding zeros.
         # This avoids the convolution (via fourier transform multiplication)
         # from jumping to another extreme of the image when a border is reached
         if self.width > self.height:
             dif = self.width - self.height
             updif = numpy.zeros(self.width * dif / 2, dtype=numpy.uint32)
             downdif = numpy.zeros(self.width * (dif - dif / 2),
                                   dtype=numpy.uint32)
             self.pixels = numpy.concatenate((updif, self.pixels, downdif))
             size = self.width
         elif self.width < self.height:
             dif = self.height - self.width
             leftdif = numpy.zeros(self.height * dif / 2,
                                   dtype=numpy.uint32)
             rightdif = numpy.zeros(self.height * (dif - dif / 2),
                                    dtype=numpy.uint32)
             self.pixels = self.pixels.reshape(self.height, self.width)
             self.pixels = numpy.transpose(self.pixels)
             self.pixels = self.pixels.reshape(self.width * self.height)
             self.pixels = numpy.concatenate(
                 (leftdif, self.pixels, rightdif))
             self.pixels = self.pixels.reshape(self.height, self.height)
             self.pixels = numpy.transpose(self.pixels)
             self.pixels = self.pixels.reshape(self.height * self.height)
             size = self.height
         else:
             size = self.height
         # Creates a gaussian kernel of the image's size
         _createKernel2d(args[0], size)
         # Divides the image's R, G and B channels, reshapes them
         # to square matrixes and applies two dimensional fourier transforms
         red = numpy.bitwise_and(numpy.right_shift(self.pixels, 16), 0xff)
         red = numpy.reshape(red, (size, size))
         red = numpy.fft.fft2(red)
         green = numpy.bitwise_and(numpy.right_shift(self.pixels, 8), 0xff)
         green = numpy.reshape(green, (size, size))
         green = numpy.fft.fft2(green)
         blue = numpy.bitwise_and(self.pixels, 0xff)
         blue = numpy.reshape(blue, (size, size))
         blue = numpy.fft.fft2(blue)
         # Does a element-wise multiplication of each channel matrix
         # and the fourier transform of the kernel matrix
         kernel = numpy.fft.fft2(weights)
         red = numpy.multiply(red, kernel)
         green = numpy.multiply(green, kernel)
         blue = numpy.multiply(blue, kernel)
         # Reshapes them back to arrays and converts to unsigned integers
         red = numpy.reshape(numpy.fft.ifft2(red).real, size * size)
         green = numpy.reshape(numpy.fft.ifft2(green).real, size * size)
         blue = numpy.reshape(numpy.fft.ifft2(blue).real, size * size)
         red = red.astype(numpy.uint32)
         green = green.astype(numpy.uint32)
         blue = blue.astype(numpy.uint32)
         self.pixels = numpy.bitwise_or(numpy.left_shift(green, 8), blue)
         self.pixels = numpy.bitwise_or(numpy.left_shift(red, 16),
                                        self.pixels)
         # Crops out the zeros added
         if self.width > self.height:
             self.pixels = self.pixels[self.width * dif / 2:size * size -
                                       self.width * (dif - dif / 2)]
         elif self.width < self.height:
             self.pixels = numpy.reshape(self.pixels, (size, size))
             self.pixels = numpy.transpose(self.pixels)
             self.pixels = numpy.reshape(self.pixels, size * size)
             self.pixels = self.pixels[self.height * dif / 2:size * size -
                                       self.height * (dif - dif / 2)]
             self.pixels = numpy.reshape(self.pixels,
                                         (self.width, self.height))
             self.pixels = numpy.transpose(self.pixels)
             self.pixels = numpy.reshape(self.pixels,
                                         self.height * self.width)
     elif mode == OPAQUE:
         # This is the same as applying an bitwise or with the maximum value
         self.pixels = numpy.bitwise_or(self.pixels, 0xff000000)
     elif mode == THRESHOLD:
         # Maximum = max((n & 0xff0000) >> 16, max((n & 0xff00)>>8, (n & 0xff)))
         # Broken down to Maximum = max(aux,aux2)
         # The pixel will be white if its maximum is greater than the threshold
         # value, and black if not. This was implemented via a boolean matrix
         # multiplication.
         if not args:
             args = [0.5]
         thresh = args[0] * 255
         aux = numpy.right_shift(numpy.bitwise_and(self.pixels, 0xff00), 8)
         aux = numpy.maximum(aux, numpy.bitwise_and(self.pixels, 0xff))
         aux2 = numpy.right_shift(numpy.bitwise_and(self.pixels, 0xff0000),
                                  16)
         boolmatrix = numpy.greater_equal(numpy.maximum(aux, aux2), thresh)
         self.pixels.fill(0xffffff)
         self.pixels = numpy.multiply(self.pixels, boolmatrix)
     elif mode == POSTERIZE:
         # New channel = ((channel*level)>>8)*255/(level-1)
         if not args: args = [8]
         levels1 = args[0] - 1
         rlevel = numpy.bitwise_and(numpy.right_shift(self.pixels, 16),
                                    0xff)
         glevel = numpy.bitwise_and(numpy.right_shift(self.pixels, 8), 0xff)
         blevel = numpy.bitwise_and(self.pixels, 0xff)
         rlevel = numpy.right_shift(numpy.multiply(rlevel, args[0]), 8)
         rlevel = numpy.divide(numpy.multiply(rlevel, 255), levels1)
         glevel = numpy.right_shift(numpy.multiply(glevel, args[0]), 8)
         glevel = numpy.divide(numpy.multiply(glevel, 255), levels1)
         blevel = numpy.right_shift(numpy.multiply(blevel, args[0]), 8)
         blevel = numpy.divide(numpy.multiply(blevel, 255), levels1)
         self.pixels = numpy.bitwise_and(self.pixels, 0xff000000)
         self.pixels = numpy.bitwise_or(self.pixels,
                                        numpy.left_shift(rlevel, 16))
         self.pixels = numpy.bitwise_or(self.pixels,
                                        numpy.left_shift(glevel, 8))
         self.pixels = numpy.bitwise_or(self.pixels, blevel)
     elif mode == ERODE:
         # Checks the pixels directly above, under and to the left and right
         # of each pixel of the image. If it has a greater luminosity, then
         # the center pixel receives its color
         colorOrig = numpy.array(self.pixels)
         colOut = numpy.array(self.pixels)
         colLeft = numpy.roll(colorOrig, 1)
         colRight = numpy.roll(colorOrig, -1)
         colUp = numpy.roll(colorOrig, self.width)
         colDown = numpy.roll(colorOrig, -self.width)
         currLum1 = numpy.bitwise_and(numpy.right_shift(colorOrig, 16),
                                      0xff)
         currLum1 = numpy.multiply(currLum1, 77)
         currLum2 = numpy.bitwise_and(numpy.right_shift(colorOrig, 8), 0xff)
         currLum2 = numpy.multiply(currLum2, 151)
         currLum3 = numpy.multiply(numpy.bitwise_and(colorOrig, 0xff), 28)
         currLum = numpy.add(numpy.add(currLum1, currLum2), currLum3)
         lumLeft1 = numpy.bitwise_and(numpy.right_shift(colLeft, 16), 0xff)
         lumLeft1 = numpy.multiply(lumLeft1, 77)
         lumLeft2 = numpy.bitwise_and(numpy.right_shift(colLeft, 8), 0xff)
         lumLeft2 = numpy.multiply(lumLeft2, 151)
         lumLeft3 = numpy.multiply(numpy.bitwise_and(colLeft, 0xff), 28)
         lumLeft = numpy.add(numpy.add(lumLeft1, lumLeft2), lumLeft3)
         lumRight1 = numpy.bitwise_and(numpy.right_shift(colRight, 16),
                                       0xff)
         lumRight1 = numpy.multiply(lumRight1, 77)
         lumRight2 = numpy.bitwise_and(numpy.right_shift(colRight, 8), 0xff)
         lumRight2 = numpy.multiply(lumRight2, 151)
         lumRight3 = numpy.multiply(numpy.bitwise_and(colRight, 0xff), 28)
         lumRight = numpy.add(numpy.add(lumRight1, lumRight2), lumRight3)
         lumDown1 = numpy.bitwise_and(numpy.right_shift(colDown, 16), 0xff)
         lumDown1 = numpy.multiply(lumDown1, 77)
         lumDown2 = numpy.bitwise_and(numpy.right_shift(colDown, 8), 0xff)
         lumDown2 = numpy.multiply(lumDown2, 151)
         lumDown3 = numpy.multiply(numpy.bitwise_and(colDown, 0xff), 28)
         lumDown = numpy.add(numpy.add(lumDown1, lumDown2), lumDown3)
         lumUp1 = numpy.bitwise_and(numpy.right_shift(colUp, 16), 0xff)
         lumUp1 = numpy.multiply(lumUp1, 77)
         lumUp2 = numpy.bitwise_and(numpy.right_shift(colUp, 8), 0xff)
         lumUp2 = numpy.multiply(lumUp2, 151)
         lumUp3 = numpy.multiply(numpy.bitwise_and(colUp, 0xff), 28)
         lumUp = numpy.add(numpy.add(lumUp1, lumUp2), lumUp3)
         numpy.putmask(colOut, lumLeft > currLum, colLeft)
         numpy.putmask(currLum, lumLeft > currLum, lumLeft)
         numpy.putmask(colOut, lumRight > currLum, colRight)
         numpy.putmask(currLum, lumRight > currLum, lumRight)
         numpy.putmask(colOut, lumUp > currLum, colUp)
         numpy.putmask(currLum, lumUp > currLum, lumUp)
         numpy.putmask(colOut, lumDown > currLum, colDown)
         numpy.putmask(currLum, lumDown > currLum, lumDown)
         self.pixels = colOut
     elif mode == DILATE:
         # Checks the pixels directly above, under and to the left and right
         # of each pixel of the image. If it has a lesser luminosity, then
         # the center pixel receives its color
         colorOrig = numpy.array(self.pixels)
         colOut = numpy.array(self.pixels)
         colLeft = numpy.roll(colorOrig, 1)
         colRight = numpy.roll(colorOrig, -1)
         colUp = numpy.roll(colorOrig, self.width)
         colDown = numpy.roll(colorOrig, -self.width)
         currLum1 = numpy.bitwise_and(numpy.right_shift(colorOrig, 16),
                                      0xff)
         currLum1 = numpy.multiply(currLum1, 77)
         currLum2 = numpy.bitwise_and(numpy.right_shift(colorOrig, 8), 0xff)
         currLum2 = numpy.multiply(currLum2, 151)
         currLum3 = numpy.multiply(numpy.bitwise_and(colorOrig, 0xff), 28)
         currLum = numpy.add(numpy.add(currLum1, currLum2), currLum3)
         lumLeft1 = numpy.bitwise_and(numpy.right_shift(colLeft, 16), 0xff)
         lumLeft1 = numpy.multiply(lumLeft1, 77)
         lumLeft2 = numpy.bitwise_and(numpy.right_shift(colLeft, 8), 0xff)
         lumLeft2 = numpy.multiply(lumLeft2, 151)
         lumLeft3 = numpy.multiply(numpy.bitwise_and(colLeft, 0xff), 28)
         lumLeft = numpy.add(numpy.add(lumLeft1, lumLeft2), lumLeft3)
         lumRight1 = numpy.bitwise_and(numpy.right_shift(colRight, 16),
                                       0xff)
         lumRight1 = numpy.multiply(lumRight1, 77)
         lumRight2 = numpy.bitwise_and(numpy.right_shift(colRight, 8), 0xff)
         lumRight2 = numpy.multiply(lumRight2, 151)
         lumRight3 = numpy.multiply(numpy.bitwise_and(colRight, 0xff), 28)
         lumRight = numpy.add(numpy.add(lumRight1, lumRight2), lumRight3)
         lumDown1 = numpy.bitwise_and(numpy.right_shift(colDown, 16), 0xff)
         lumDown1 = numpy.multiply(lumDown1, 77)
         lumDown2 = numpy.bitwise_and(numpy.right_shift(colDown, 8), 0xff)
         lumDown2 = numpy.multiply(lumDown2, 151)
         lumDown3 = numpy.multiply(numpy.bitwise_and(colDown, 0xff), 28)
         lumDown = numpy.add(numpy.add(lumDown1, lumDown2), lumDown3)
         lumUp1 = numpy.bitwise_and(numpy.right_shift(colUp, 16), 0xff)
         lumUp1 = numpy.multiply(lumUp1, 77)
         lumUp2 = numpy.bitwise_and(numpy.right_shift(colUp, 8), 0xff)
         lumUp2 = numpy.multiply(lumUp2, 151)
         lumUp3 = numpy.multiply(numpy.bitwise_and(colUp, 0xff), 28)
         lumUp = numpy.add(numpy.add(lumUp1, lumUp2), lumUp3)
         numpy.putmask(colOut, lumLeft < currLum, colLeft)
         numpy.putmask(currLum, lumLeft < currLum, lumLeft)
         numpy.putmask(colOut, lumRight < currLum, colRight)
         numpy.putmask(currLum, lumRight < currLum, lumRight)
         numpy.putmask(colOut, lumUp < currLum, colUp)
         numpy.putmask(currLum, lumUp < currLum, lumUp)
         numpy.putmask(colOut, lumDown < currLum, colDown)
         numpy.putmask(currLum, lumDown < currLum, lumDown)
         self.pixels = colOut
     self.updatePixels()
示例#60
0
    # Convolve the HRF with the stimulus sequence
    signal_function = sim.double_gamma_hrf(stimfunction=stimfunction_cond,
                                           tr_duration=tr_duration,
                                           )

    # Multiply the HRF timecourse with the signal
    signal_cond = sim.apply_signal(signal_function=signal_function,
                                   volume_signal=volume_signal,
                                   )

    # Concatenate all the signal and function files
    if cond == 0:
        stimfunction = stimfunction_cond
        signal = signal_cond
    else:
        stimfunction = list(np.add(stimfunction, stimfunction_cond))
        signal += signal_cond

# Generate the mask of the signal
mask, template = sim.mask_brain(signal)

# Mask the signal to the shape of a brain (does not attenuate signal according
# to grey matter likelihood)
signal *= mask.reshape(dimensions[0], dimensions[1], dimensions[2], 1)

# Downsample the stimulus function to generate it in TR time
stimfunction_tr = stimfunction[::int(tr_duration * 1000)]

# Iterate through the participants and store participants
epochs = []
for participantcounter in range(1, participants + 1):