def __init__(self, input, n_in, n_out):
        """ Initialize the parameters of the logistic regression

        :type input: theano.tensor.TensorType
        :param input: symbolic variable that describes the input of the
                      architecture (one minibatch)

        :type n_in: int
        :param n_in: number of input units, the dimension of the space in
                     which the datapoints lie

        :type n_out: int
        :param n_out: number of output units, the dimension of the space in
                      which the labels lie

        """

        # initialize with 0 the weights W as a matrix of shape (n_in, n_out)
        self.W = theano.shared(value=numpy.zeros((n_in, n_out),
                                                 dtype=theano.config.floatX),
                                name='W', borrow=True)
        # initialize the baises b as a vector of n_out 0s
        self.b = theano.shared(value=numpy.zeros((n_out,),
                                                 dtype=theano.config.floatX),
                               name='b', borrow=True)

        # compute vector of class-membership probabilities in symbolic form
        self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b)

        # compute prediction as class whose probability is maximal in
        # symbolic form
        self.y_pred = T.argmax(self.p_y_given_x, axis=1)

        # parameters of the model
        self.params = [self.W, self.b]
def runLabeling(file_path, gps_filename, output_name, frames_to_skip, final_frame, lp, rp, pickle_loc):
    video_reader = WarpedVideoReader(file_path)
    #video_reader.setSubsample(True)
    video_reader.setPerspectives(pickle_loc)
    gps_reader = GPSReader(gps_filename)
    gps_dat = gps_reader.getNumericData()

    cam = getCameraParams()
    cam_to_use = cam[int(output_name[-1]) - 1]

    lp = pixelTo3d(lp, cam_to_use)
    rp = pixelTo3d(rp, cam_to_use)
    tr = GPSTransforms(gps_dat, cam_to_use)
    pitch = -cam_to_use['rot_x']
    height = 1.106
    R_camera_pitch = euler_matrix(cam_to_use['rot_x'], cam_to_use['rot_y'], cam_to_use['rot_z'], 'sxyz')[0:3, 0:3]
    Tc = np.eye(4)
    Tc[0:3, 0:3] = R_camera_pitch.transpose()
    Tc[0:3, 3] = [-0.2, -height, -0.5]
    lpts = np.zeros((lp.shape[0], 4))
    rpts = np.zeros((rp.shape[0], 4))
    for t in range(min(tr.shape[0], lp.shape[0])):
        lpts[t, :] = np.dot(tr[t, :, :], np.linalg.solve(Tc, np.array([lp[t, 0], lp[t, 1], lp[t, 2], 1])))
        rpts[t, :] = np.dot(tr[t, :, :], np.linalg.solve(Tc, np.array([rp[t, 0], rp[t, 1], rp[t, 2], 1])))

    ldist = np.apply_along_axis(np.linalg.norm, 1, np.concatenate((np.array([[0, 0, 0, 0]]), lpts[1:] - lpts[0:-1])))
    rdist = np.apply_along_axis(np.linalg.norm, 1, np.concatenate((np.array([[0, 0, 0, 0]]), rpts[1:] - rpts[0:-1])))
    start_frame = frames_to_skip
    runBatch(video_reader, gps_dat, cam_to_use, output_name, start_frame, final_frame, lpts, rpts, ldist, rdist, tr)

    print "Done with %s" % output_name
示例#3
0
def make_video(events, t0=0.0, t1=None, dt_frame=0.01, tau=0.01):
    if t1 is None:
        t1 = events["t"].max()

    ts = events["t"]
    dt = 1e-3
    nt = int((t1 - t0) / dt) + 1
    # nt = min(nt, 1000)  # cap at 1000 for now

    image = np.zeros((128, 128))
    images = np.zeros((nt, 128, 128))

    for i in range(nt):
        # --- decay image
        image *= np.exp(-dt / tau) if tau > 0 else 0
        # image *= 0

        # --- add events
        ti = t0 + i * dt
        add_to_image(image, events[close(ts, ti)])

        images[i] = image

    # --- average in frames
    nt_frame = int(dt_frame / dt)
    nt_video = int(nt / nt_frame)

    video = np.zeros((nt_video, 128, 128))
    for i in range(nt_video):
        slicei = slice(i * nt_frame, (i + 1) * nt_frame)
        video[i] = np.sum(images[slicei], axis=0)

    return video
示例#4
0
文件: try2.py 项目: Beirdo/misc-stuff
def resample(oldrate,newrate,x,n,dtype,factor):
    print "Resampling from",oldrate,"Hz to",newrate,"Hz, amplification factor",factor
    rategcd = gcd(oldrate,newrate)
    uprate = newrate / rategcd
    dnrate = oldrate / rategcd

    oldcount = len(x)
    midcount = oldcount * uprate
    newcount = midcount / dnrate

    print "Upsampling by",uprate
    if uprate == 1:
        yout = np.asarray(x, dtype=dtype)
    else:
        yout = np.zeros(midcount, dtype=dtype)
        for i in range(0, oldcount-1):
            yout[i * uprate] = x[i] * uprate

    wl = min(1.0/uprate,1.0/dnrate)
    print "Antialias filtering at",wl
    
    midrate = oldrate * uprate
    filt = firfilter(0, (midrate * wl) / 2.0, midrate, n)
    y = signal.lfilter(filt, 1, yout)

    print "Downsampling by",dnrate
    if dnrate == 1:
        yout = np.asarray(y, dtype=dtype)
    else:
        yout = np.zeros(newcount, dtype=dtype)
        for i in range(0, newcount-1):
            yout[i] = y[i * dnrate] * factor

    return yout
 def calculate_zernikes(self, workspace):
     zernike_indexes = cpmz.get_zernike_indexes(self.zernike_degree.value + 1)
     meas = workspace.measurements
     for o in self.objects:
         object_name = o.object_name.value
         objects = workspace.object_set.get_objects(object_name)
         #
         # First, get a table of centers and radii of minimum enclosing
         # circles per object
         #
         ij = np.zeros((objects.count + 1, 2))
         r = np.zeros(objects.count + 1)
         for labels, indexes in objects.get_labels():
             ij_, r_ = minimum_enclosing_circle(labels, indexes)
             ij[indexes] = ij_
             r[indexes] = r_
         #
         # Then compute x and y, the position of each labeled pixel
         # within a unit circle around the object
         #
         ijv = objects.ijv
         l = ijv[:, 2]
         yx = (ijv[:, :2] - ij[l, :]) / r[l, np.newaxis]
         z = cpmz.construct_zernike_polynomials(
                 yx[:, 1], yx[:, 0], zernike_indexes)
         for image_group in self.images:
             image_name = image_group.image_name.value
             image = workspace.image_set.get_image(
                     image_name, must_be_grayscale=True)
             pixels = image.pixel_data
             mask = (ijv[:, 0] < pixels.shape[0]) & \
                    (ijv[:, 1] < pixels.shape[1])
             mask[mask] = image.mask[ijv[mask, 0], ijv[mask, 1]]
             yx_ = yx[mask, :]
             l_ = l[mask]
             z_ = z[mask, :]
             if len(l_) == 0:
                 for i, (n, m) in enumerate(zernike_indexes):
                     ftr = self.get_zernike_magnitude_name(image_name, n, m)
                     meas[object_name, ftr] = np.zeros(0)
                     if self.wants_zernikes == Z_MAGNITUDES_AND_PHASE:
                         ftr = self.get_zernike_phase_name(image_name, n, m)
                         meas[object_name, ftr] = np.zeros(0)
                 continue
             areas = scind.sum(
                     np.ones(l_.shape, int), labels=l_, index=objects.indices)
             for i, (n, m) in enumerate(zernike_indexes):
                 vr = scind.sum(
                         pixels[ijv[mask, 0], ijv[mask, 1]] * z_[:, i].real,
                         labels=l_, index=objects.indices)
                 vi = scind.sum(
                         pixels[ijv[mask, 0], ijv[mask, 1]] * z_[:, i].imag,
                         labels=l_, index=objects.indices)
                 magnitude = np.sqrt(vr * vr + vi * vi) / areas
                 ftr = self.get_zernike_magnitude_name(image_name, n, m)
                 meas[object_name, ftr] = magnitude
                 if self.wants_zernikes == Z_MAGNITUDES_AND_PHASE:
                     phase = np.arctan2(vr, vi)
                     ftr = self.get_zernike_phase_name(image_name, n, m)
                     meas[object_name, ftr] = phase
示例#6
0
    def __fen2tensor(self, fen):

        frdpos = np.zeros((9, 10, 16), dtype=OUT_TYPE)
        frdmove = np.zeros((9, 10, 16), dtype=OUT_TYPE)
        emypos = np.zeros((9, 10, 16), dtype=OUT_TYPE)
        emymove = np.zeros((9, 10, 16), dtype=OUT_TYPE)
        movelabel = np.zeros((9, 10, 16), dtype=OUT_TYPE)

        fenlist = fen.split('\t')
        frdpos, emypos = self.__f2tpos(fenlist[0], frdpos, emypos)
        frdmove = self.__f2tfrdmove(fenlist[1], frdmove, frdpos)

        label = fenlist[2].strip().split('-')
        layer = np.argmax(frdpos[self.__loca2i(label[0][0])][self.__loca2i(label[0][1])])
        movelabel[self.__loca2i(label[1][0])][self.__loca2i(label[1][1])][layer] = 1

        if fenlist[0].split()[1] == 'b':
            self.__switch_round(frdpos)
            self.__switch_round(frdmove)
            self.__switch_round(emypos)
            self.__switch_round(movelabel)

        # shuffle   random
        self.__shuffle([frdpos, frdmove, movelabel], self.__shuffle_args())
        self.__shuffle([emypos], self.__shuffle_args())

        return frdpos, frdmove, emypos, movelabel
示例#7
0
文件: getf.py 项目: daxiongshu/bnp
def kfold_cv(X_train, y_train,idx,k):

    kf = StratifiedKFold(y_train,n_folds=k)
    xx=[]
    count=0
    ypred=np.zeros(X_train.shape[0])
    for train_index, test_index in kf:
        count+=1
        X_train_cv, X_test_cv = X_train[train_index,:],X_train[test_index,:]
        gc.collect()
        y_train_cv, y_test_cv = y_train[train_index],y_train[test_index]
        y_pred=np.zeros(X_test_cv.shape[0])
        m=1
         
        for j in range(m):
            clf=xgb_classifier(eta=0.01,min_child_weight=10,col=0.7,subsample=0.68,depth=5,num_round=500,seed=j*77,gamma=0)

            y_pred+=clf.train_predict(X_train_cv,(y_train_cv),X_test_cv,y_test=(y_test_cv))
            yqq=y_pred/(1+j)
            print j,llfun(y_test_cv,yqq)
        y_pred/=m;
        #clf=RandomForestClassifier(n_jobs=-1,n_estimators=100,max_depth=100)
        #clf.fit(X_train_cv,(y_train_cv))
        #y_pred=clf.predict_proba(X_test_cv).T[1]
        print y_pred.shape
        xx.append(llfun(y_test_cv,(y_pred)))
        ypred[test_index]=y_pred
        print xx[-1]#,y_pred.shape

    print xx,'average:',np.mean(xx),'std',np.std(xx)
    return ypred
示例#8
0
def label_nodes_with_class(nodes_xyt, class_maps, pix):
  """
  Returns:
    class_maps__: one-hot class_map for each class.
    node_class_label: one-hot class_map for each class, nodes_xyt.shape[0] x n_classes
  """
  # Assign each pixel to a node.
  selem = skimage.morphology.disk(pix)
  class_maps_ = class_maps*1.
  for i in range(class_maps.shape[2]):
    class_maps_[:,:,i] = skimage.morphology.dilation(class_maps[:,:,i]*1, selem)
  class_maps__ = np.argmax(class_maps_, axis=2)
  class_maps__[np.max(class_maps_, axis=2) == 0] = -1

  # For each node pick out the label from this class map.
  x = np.round(nodes_xyt[:,[0]]).astype(np.int32)
  y = np.round(nodes_xyt[:,[1]]).astype(np.int32)
  ind = np.ravel_multi_index((y,x), class_maps__.shape)
  node_class_label = class_maps__.ravel()[ind][:,0]

  # Convert to one hot versions.
  class_maps_one_hot = np.zeros(class_maps.shape, dtype=np.bool)
  node_class_label_one_hot = np.zeros((node_class_label.shape[0], class_maps.shape[2]), dtype=np.bool)
  for i in range(class_maps.shape[2]):
    class_maps_one_hot[:,:,i] = class_maps__ == i
    node_class_label_one_hot[:,i] = node_class_label == i
  return class_maps_one_hot, node_class_label_one_hot
示例#9
0
 def test_continuum_seismicity(self):
     '''
     Tests the function hmtk.strain.shift.Shift.continuum_seismicity - 
     the python implementation of the Subroutine Continuum Seismicity from
     the Fortran 90 code GSRM.f90
     '''
     self.strain_model = GeodeticStrain()
     # Define a simple strain model
     test_data = {'longitude': np.zeros(3, dtype=float),
                  'latitude': np.zeros(3, dtype=float),
                  'exx': np.array([1E-9, 1E-8, 1E-7]),
                  'eyy': np.array([5E-10, 5E-9, 5E-8]),
                  'exy': np.array([2E-9, 2E-8, 2E-7])}
     self.strain_model.get_secondary_strain_data(test_data)
     self.model = Shift([5.66, 6.66])
     threshold_moment = moment_function(np.array([5.66, 6.66]))
     
     expected_rate = np.array([[-14.43624419, -22.48168502],
                               [-13.43624419, -21.48168502],
                               [-12.43624419, -20.48168502]]) 
     np.testing.assert_array_almost_equal(
         expected_rate,
         np.log10(self.model.continuum_seismicity(
             threshold_moment,
             self.strain_model.data['e1h'],
             self.strain_model.data['e2h'],
             self.strain_model.data['err'],
             BIRD_GLOBAL_PARAMETERS['OSRnor'])))
示例#10
0
文件: Utils.py 项目: tanmoy7989/c25ld
	def makeHist(self, normalize = True, doPMF = True):
		if self.isDataPickled:
			return

		if not self.Dim == 1:
			raise TypeError('Variable # mismatch')

		z = self.z
		Nframes = len(z)
		bin_min = 0.98 * z.min(); bin_max = 1.02*z.max()
		delta = (bin_max - bin_min)/float(self.nbins)
		bin_centers = np.zeros(self.nbins)
		bin_vals = np.zeros(self.nbins)
		pmf = np.zeros(self.nbins)
		for i in range(self.nbins):
			bin_centers[i] = bin_min + (i+0.5) * delta
			
		frameStatus = pb(Text = 'Binning frame by frame', Steps = Nframes)
		for i in range(Nframes):
		
			assignment = int((z[i] - bin_min)/delta)
			bin_vals[assignment] += 1.0
		
			frameStatus.Update(i)
		
		if normalize:
			#bin_vals /= (np.sum(bin_vals) * delta)
			bin_vals /= np.trapz(bin_vals, bin_centers, dx = delta)
		if doPMF:
			pmf = - np.log(bin_vals)
		

		hist = {'bin_centers': bin_centers, 'bin_vals': bin_vals, 'pmf' : pmf}
		pickle.dump(hist, open(self.data, 'w'))
		self.isDataPickled = True
示例#11
0
 def train_set_loss_vars_for_cur_batches(self):
   """
   Called via Engine.SeqTrainParallelControl.
   """
   assert self.train_have_loss_for_cur_batches()
   # See EngineUtil.assign_dev_data for reference.
   from Dataset import Dataset
   n_time, n_batch = Dataset.index_shape_for_batches(self.train_batches)
   n_output_dim = self.output_layer.attrs['n_out']
   output_loss = numpy.zeros((n_batch,), "float32")
   output_hat_y = numpy.zeros((n_time, n_batch, n_output_dim), "float32")
   offset_slice = 0
   for batch in self.train_batches:
     for seq in batch.seqs:
       o = seq.batch_frame_offset
       q = seq.batch_slice + offset_slice
       l = seq.frame_length
       # input-data, input-index will also be set in this loop. That is data-key "data".
       for k in [self.output_target]:
         if l[k] == 0: continue
         loss, hat_y = self.get_loss_and_hat_y(seq.seq_idx)
         assert seq.seq_start_frame[k] < hat_y.shape[0]
         assert seq.seq_end_frame[k] <= hat_y.shape[0]
         output_loss[q] += loss * float(l[k]) / hat_y.shape[0]
         output_hat_y[o[k]:o[k] + l[k], q] = hat_y[seq.seq_start_frame[k]:seq.seq_end_frame[k]]
   self.output_var_loss.set_value(output_loss)
   self.output_var_hat_y.set_value(output_hat_y)
示例#12
0
    def divide_arrays(self, num_array, num_array_error, den_array, den_array_error):
        '''
        This function calculates the ratio of two arrays and calculate the respective error values
        '''

        nbr_elements = np.shape(num_array)[0]
        
        # calculate the ratio array
        ratio_array = np.zeros(nbr_elements)
        for i in range(nbr_elements):
            if den_array[i] is 0:
                _tmp_ratio = 0
            else:
                _tmp_ratio = num_array[i] / den_array[i]
            ratio_array[i] = _tmp_ratio
            
        # calculate the error of the ratio array
        ratio_error_array = np.zeros(nbr_elements)
        for i in range(nbr_elements):
            
            if (num_array[i] == 0) or (den_array[i] == 0): 
                ratio_error_array[i] = 0 
            else:
                tmp1 = pow(num_array_error[i] / num_array[i],2)
                tmp2 = pow(den_array_error[i] / den_array[i],2)
                ratio_error_array[i] = math.sqrt(tmp1+tmp2)*(num_array[i]/den_array[i])
    
        return [ratio_array, ratio_error_array]        
示例#13
0
文件: getz.py 项目: tconklin/coprops
def photoz(s1100,e1100=0.,s14=0.,e14=0.,ntry=50000):
    '''
    Determine the photometric redshift of a galaxy given the
    measured 1.4 cm and 1100 micron flux and uncertainty
    '''
    z = np.arange(0,10,.05)
    ngal = 44
    if s14 == 0:
        ratioin = -1
        ratiosig = -1
    else:
        ratioin = s1100/s14
        ratiosig = (e1100/s1100**2+e14/s14**2)**.5
    a = idlsave.read('fluxratio1100.sav')
    dat = a.get('data')
    zs = a.get('redshift')
    averatio = np.zeros(200)
    sigma = np.zeros(200)
    array = np.random.randn(ntry)
    array1 = np.random.randn(ntry)
    if s14 <= 0.:
        ydarts = (s1100+array*e1100)/(np.abs(array1*e14))
    else:
        ydarts = array*ratiosig+ratioin
    xdarts = np.zeros(ntry)
    for i in range(ntry):
        jrangal = np.floor(ngal*np.random.rand(1))[0]
        testtrack = dat[:,jrangal]
        yval = ydarts[i]
        xdarts[i] = np.interp(yval,testtrack,z)
    return xdarts,ydarts
    def backprop(self, x, y):
        activation = x
        activations = [x]
        zs = []

        for weight, bias in zip(self.weights, self.biases):
            z = np.dot(activation, weight)+bias
            zs.append(z)
            activation = sigmoid(z)
            activations.append(activation)

        delta = (activation-y)*sigmoid_prime(zs[-1])

        nabla_weights = [np.zeros(w.shape) for w in self.weights]
        nabla_biases = [np.zeros(b.shape) for b in self.biases]

        nabla_weights[-1] = np.dot(activations[-2].transpose(), delta)
        nabla_biases[-1] = delta

        for l in xrange(2, len(self.layers)):
            delta = np.dot(delta, self.weights[-l+1].transpose())*sigmoid_prime(zs[-l])
            nabla_weights[-l] = np.dot(activations[-l-1].transpose(), delta)
            nabla_biases[-l] = delta

        return (nabla_weights, nabla_biases)
示例#15
0
	def __init__(self, featureDimension, lambda_, eta_, userNum, windowSize =20):
		self.windowSize = windowSize
		self.counter = 0
		self.userNum = userNum
		self.lambda_ = lambda_
		# Basic stat in estimating Theta
		self.A = lambda_*np.identity(n = featureDimension*userNum)
		self.b = np.zeros(featureDimension*userNum)
		self.UserTheta = np.zeros(shape = (featureDimension, userNum))
		#self.UserTheta = np.random.random((featureDimension, userNum))
		self.AInv = np.linalg.inv(self.A)
		
		#self.W = np.random.random((userNum, userNum))
		self.W = np.identity(n = userNum)
		self.Wlong = vectorize(self.W)
		self.batchGradient = np.zeros(userNum*userNum)

		self.CoTheta = np.dot(self.UserTheta, self.W)
		self.BigW = np.kron(np.transpose(self.W), np.identity(n=featureDimension))
		self.CCA = np.identity(n = featureDimension*userNum)
		self.BigTheta = np.kron(np.identity(n=userNum) , self.UserTheta)
		self.W_X_arr = []
		self.W_y_arr = []
		for i in range(userNum):
			self.W_X_arr.append([])
			self.W_y_arr.append([])
示例#16
0
def fix_labels(mnist_label, add_num):
    """
    Args:
    label: [[int]] arary, class labels
    n: int, number of add data

    Returns:
    [[int]] array

    """

    c_num = len(mnist_label[0])

    # add one dimention
    fixed_label = np.c_[mnist_label, np.zeros(len(mnist_label))]
    assert len(fixed_label[0]) == c_num + 1

    # generate new class label
    new_label = np.zeros(c_num + 1)
    new_label[c_num] = 1
    new_label = np.array([new_label for i in range(add_num)])

    # add new class label
    fixed_label = np.r_[fixed_label, new_label]
    assert len(fixed_label) == len(mnist_label) + add_num

    return fixed_label
示例#17
0
def computeNumericalGradient(J,theta):
    # numgrad = computeNumericalGradient(J, theta)
    # theta: a vector of parameters
    # J: a function that outputs r.
    # Calling y = J(theta) will return the function value at theta. 
      
    # Initialize numgrad with zeros
    numgrad = np.zeros(np.shape(theta))

    ## ---------- YOUR CODE HERE --------------------------------------
    # Instructions: 
    # Implement numerical gradient checking, and return the result in numgrad.  
    # (See Section 2.3 of the lecture notes.)
    # You should write code so that numgrad(i) is (the numerical approximation to) the 
    # partial derivative of J with respect to the i-th input argument, evaluated at theta.  
    # I.e., numgrad(i) should be the (approximately) the partial derivative of J with 
    # respect to theta(i).
    #               
    # Hint: You will probably want to compute the elements of numgrad one at a time.
    for i in range(0,numgrad.shape[0]):
        k = np.zeros(np.shape(theta))
        k[i] = 0.0001
        y1 = J(theta+k)
        y2 = J(theta - k)
        numgrad[i] = (y1-y2)/0.0002

    ## ---------------------------------------------------------------
    return numgrad
示例#18
0
	def updateParameters(self, articlePicked, click,  userID):	
		self.counter +=1
		self.Wlong = vectorize(self.W)
		featureDimension = len(articlePicked.featureVector)
		T_X = vectorize(np.outer(articlePicked.featureVector, self.W.T[userID])) 
		self.A += np.outer(T_X, T_X)	
		self.b += click*T_X
		self.AInv = np.linalg.inv(self.A)
		self.UserTheta = matrixize(np.dot(self.AInv, self.b), len(articlePicked.featureVector)) 

		Xi_Matirx = np.zeros(shape = (featureDimension, self.userNum))
		Xi_Matirx.T[userID] = articlePicked.featureVector
		W_X = vectorize( np.dot(np.transpose(self.UserTheta), Xi_Matirx))
		self.batchGradient +=evaluateGradient(W_X, click, self.Wlong, self.lambda_, self.regu  )

		if self.counter%self.windowSize ==0:
			self.Wlong -= 1/(float(self.counter/self.windowSize)+1)*self.batchGradient
			self.W = matrixize(self.Wlong, self.userNum)
			self.W = normalize(self.W, axis=0, norm='l1')
			#print 'SVD', self.W
			self.batchGradient = np.zeros(self.userNum*self.userNum)
			# Use Ridge regression to fit W
		'''
		plt.pcolor(self.W_b)
		plt.colorbar
		plt.show()
		'''
		if self.W.T[userID].any() <0 or self.W.T[userID].any()>1:
			print self.W.T[userID]

		self.CoTheta = np.dot(self.UserTheta, self.W)
		self.BigW = np.kron(np.transpose(self.W), np.identity(n=len(articlePicked.featureVector)))
		self.CCA = np.dot(np.dot(self.BigW , self.AInv), np.transpose(self.BigW))
		self.BigTheta = np.kron(np.identity(n=self.userNum) , self.UserTheta)
示例#19
0
def value_of_policy(sigma):
    "Computes the value of following policy sigma."

    # Set up the stochastic kernel p_sigma as a 2D array:
    N = len(S)
    p_sigma = zeros((N, N))   
    for x in S:
        for y in S: 
            p_sigma[x, y] = phi(y - sigma[x])

    # Create the right Markov operator M_sigma:
    M_sigma = lambda h: dot(p_sigma, h)

    # Set up the function r_sigma as an array:
    r_sigma = array([U(x - sigma[x]) for x in S])
    # Reshape r_sigma into a column vector:
    r_sigma = r_sigma.reshape((N, 1))

    # Initialize v_sigma to zero:
    v_sigma = zeros((N,1))
    # Initialize the discount factor to 1:
    discount = 1

    for i in range(50):
        v_sigma = v_sigma + discount * r_sigma 
        r_sigma = M_sigma(r_sigma)
        discount = discount * rho

    return v_sigma
示例#20
0
def predictSoftmax(theta,data,label, numClasses,inputSize):

    theta = theta.reshape(numClasses,inputSize+1)
    y = np.zeros((data.shape[0], numClasses))

    for i in range(len(label)):
        k = np.zeros(numClasses)
        k[label[i,0]] = 1
        y[i] = (y[i] + k).astype(int)

    theta_1 =  np.dot(data, theta.T)
    theta_1 = theta_1 - np.amax(theta_1, axis = 1).reshape(data.shape[0],1)
    prob = np.exp(theta_1) # 10000*724 * 724*10 = 10000*10
    sum_prob = np.sum(prob, axis = 1).reshape(data.shape[0],1) # 10000*1
    prob = prob/sum_prob  #10000*10

    predict = prob/np.amax(prob, axis = 1).reshape(data.shape[0],1)

    predict = (predict == 1.0 ).astype(float)

    k = 0

    for i in range(len(label)):
        if np.array_equal(predict[i,:],y[i,:]):
            k = k+1

    correctness = k/(len(label))
    return correctness
def all_patches(padded_brain,i,predict_patchsize,obs_patchsize,num_channels):
    
    image = padded_brain[i]
    ishape_h , ishape_w = padded_brain.shape[1:3]
    #ipdb.set_trace()
    #ipdb.set_trace()
    half_obs_patchsize = obs_patchsize/2
    half_predict_patchsize = predict_patchsize/2
    extended_image = np.zeros((ishape_h+obs_patchsize-predict_patchsize,ishape_w+obs_patchsize-predict_patchsize,num_channels))
    extended_image[half_obs_patchsize - half_predict_patchsize   : -(half_obs_patchsize - half_predict_patchsize),half_obs_patchsize - half_predict_patchsize  : -(half_obs_patchsize - half_predict_patchsize)]= image
    num_patches_rows = ishape_h // predict_patchsize
    num_patches_cols = ishape_w // predict_patchsize
    
    list_patches = np.zeros((num_patches_cols*num_patches_rows, obs_patchsize, obs_patchsize, num_channels))
    index = 0
    h_range = np.arange(obs_patchsize/2,ishape_h+obs_patchsize/2,predict_patchsize)
    #h_range = h_range[:-1]
    v_range = np.arange(obs_patchsize/2,ishape_w+obs_patchsize/2,predict_patchsize)
    #v_range = v_range[:-1]
    #ipdb.set_trace()
    for index_h in h_range:
        for index_w in v_range:
            patch_brian = extended_image[index_h-obs_patchsize/2: index_h+obs_patchsize/2 ,index_w-obs_patchsize/2: index_w+obs_patchsize/2,:]
            #if patch_brian.shape == (38,29,4):
            #   ipdb.set_trace()
             
            list_patches[index,:,:,:] = patch_brian
            index += 1
    #ipdb.set_trace()
    assert index == num_patches_rows*num_patches_cols
    return list_patches       
示例#22
0
 def backprop(self, x, y):
     """Return a tuple ``(nabla_b, nabla_w)`` representing the
     gradient for the cost function C_x.  ``nabla_b`` and
     ``nabla_w`` are layer-by-layer lists of numpy arrays, similar
     to ``self.biases`` and ``self.weights``."""
     nabla_b = [np.zeros(b.shape) for b in self.biases]
     nabla_w = [np.zeros(w.shape) for w in self.weights]
     # feedforward
     activation = x
     activations = [x] # list to store all the activations, layer by layer
     zs = [] # list to store all the z vectors, layer by layer
     for b, w in zip(self.biases, self.weights):
         z = np.dot(w, activation)+b
         zs.append(z)
         activation = sigmoid(z)
         activations.append(activation)
     # backward pass
     delta = self.cost_derivative(activations[-1], y) * \
         sigmoid_prime(zs[-1])
     nabla_b[-1] = delta
     nabla_w[-1] = np.dot(delta, activations[-2].transpose())
     # Note that the variable l in the loop below is used a little
     # differently to the notation in Chapter 2 of the book.  Here,
     # l = 1 means the last layer of neurons, l = 2 is the
     # second-last layer, and so on.  It's a renumbering of the
     # scheme in the book, used here to take advantage of the fact
     # that Python can use negative indices in lists.
     for l in xrange(2, self.num_layers):
         z = zs[-l]
         sp = sigmoid_prime(z)
         delta = np.dot(self.weights[-l+1].transpose(), delta) * sp
         nabla_b[-l] = delta
         nabla_w[-l] = np.dot(delta, activations[-l-1].transpose())
     return (nabla_b, nabla_w)
示例#23
0
 def test_reset_data_shape(self):
     shape1 = 10, 10, 10
     shape3 = 10, 10, 10, 3
     
     # Init data (explicit shape)
     data = np.zeros((10, 10, 10, 1), dtype=np.uint8)
     T = Texture3D(data=data)
     assert T.shape == (10, 10, 10, 1)
     assert T._format == gl.GL_LUMINANCE
     
     # Set data to rgb
     T.set_data(np.zeros(shape3, np.uint8))
     assert T.shape == (10, 10, 10, 3)
     assert T._format == gl.GL_RGB
     
     # Set data to grayscale
     T.set_data(np.zeros(shape1, np.uint8))
     assert T.shape == (10, 10, 10, 1)
     assert T._format == gl.GL_LUMINANCE
     
     # Set size to rgb
     T.resize(shape3)
     assert T.shape == (10, 10, 10, 3)
     assert T._format == gl.GL_RGB
     
     # Set size to grayscale
     T.resize(shape1)
     assert T.shape == (10, 10, 10, 1)
     assert T._format == gl.GL_LUMINANCE
示例#24
0
    def test_setitem_all_no_store(self):

        data = np.zeros((10, 10), dtype=np.uint8)
        T = Texture(data=data, store=False)
        T[...] = np.ones((10, 10), np.uint8)
        assert len(T._pending_data) == 1
        assert np.allclose(data, np.zeros((10, 10)))
示例#25
0
  def __init__(self):
    """
    Setup tri33 cell.
    """
    vertices = numpy.array([[-1.0, -1.0],
                            [+1.0, -1.0],
                            [-1.0, +1.0]])
    quadPts = vertices[:]
    quadWts = numpy.array( [2.0/3.0, 2.0/3.0, 2.0/3.0])

    # Compute basis fns and derivatives at quadrature points
    basis = numpy.zeros( (3, 3), dtype=numpy.float64)
    basisDeriv = numpy.zeros( (3, 3, 2), dtype=numpy.float64)
    iQuad = 0
    for q in quadPts:
      basis[iQuad] = numpy.array([self.N0(q), self.N1(q), self.N2(q)],
                                 dtype=numpy.float64).reshape( (3,) )
      deriv = numpy.array([[self.N0p(q), self.N0q(q)],
                           [self.N1p(q), self.N1q(q)],
                           [self.N2p(q), self.N2q(q)]])      
      basisDeriv[iQuad] = deriv.reshape((3, 2))
      iQuad += 1

    self.cellDim = 2
    self.numCorners = len(vertices)
    self.numQuadPts = len(quadPts)
    self.vertices = vertices
    self.quadPts = quadPts
    self.quadWts = quadWts
    self.basis = basis
    self.basisDeriv = basisDeriv
    return
示例#26
0
文件: atomic.py 项目: rcthomas/tardis
    def _create_collision_coefficient_matrix(self):
        self.C_ul_interpolator = {}
        self.delta_E_matrices = {}
        self.g_ratio_matrices = {}
        collision_group = self.atom_data.collision_data.groupby(level=['atomic_number', 'ion_number'])
        for species in self.nlte_species:
            no_of_levels = self.atom_data.levels.ix[species].energy.count()
            C_ul_matrix = np.zeros(
                    (
                        no_of_levels,
                        no_of_levels,
                        len(self.atom_data.collision_data_temperatures))
                    )
            delta_E_matrix = np.zeros((no_of_levels, no_of_levels))
            g_ratio_matrix = np.zeros((no_of_levels, no_of_levels))

            for (
                    atomic_number,
                    ion_number,
                    level_number_lower,
                    level_number_upper), line in (
                            collision_group.get_group(species).iterrows()):
                        # line.columns : delta_e, g_ratio, temperatures ...
                C_ul_matrix[level_number_lower, level_number_upper, :] = line.values[2:]
                delta_E_matrix[level_number_lower, level_number_upper] = line['delta_e']
                #TODO TARDISATOMIC fix change the g_ratio to be the otherway round - I flip them now here.
                g_ratio_matrix[level_number_lower, level_number_upper] = line['g_ratio']
            self.C_ul_interpolator[species] = interpolate.interp1d(
                    self.atom_data.collision_data_temperatures,
                    C_ul_matrix)
            self.delta_E_matrices[species] = delta_E_matrix

            self.g_ratio_matrices[species] = g_ratio_matrix
示例#27
0
文件: reactor.py 项目: archphy/poropy
    def sort_assemblies(self, pattern, assemblies) :
        """ Sort the assemblies by reactivity.
        """ 
        # TODO(robertsj): Consider a cleaner approach for this sorting.
        
        # We build a 2-d array of [index,kinf] pairs.  Sorting this gives
        #   permuted index in the first entry.  The location of each
        #   original index will become the new pattern.  (Note that kinf
        #   is negated so we get descending order of reactivity. It seems
        #   argsort has no option for ascend/descend.
        pattern_length = len(pattern)
        index = np.zeros((pattern_length,2))
        for i in range(0, pattern_length) :
            index[i][0] = i
            index[i][1] = -assemblies[i].kinf()
        index=index[index[:,1].argsort(),0]
        
        # Define the sorted pattern and assemblies using the permuted 
        #   indices. Note that each pattern element will be unique, even 
        #   if a small number of unique assemblies  defined the pattern
        #   initially.
        sorted_pattern = np.zeros(len(pattern),dtype='i')
        sorted_assemblies = []
        for i in range(0, pattern_length) :
            sorted_pattern[i] = (np.where(index == i))[0][0]
            sorted_assemblies.append(assemblies[int(index[i])])

        return sorted_pattern, sorted_assemblies
示例#28
0
def conv3d_oneToMany(x, xShape, w, wShape, strideT, strideY, strideX, inName):
    [ntp, nyp, nxp, nifp, nofp] = wShape
    [nb, nt, ny, nx, nf] = xShape

    # stride must be divisible by both weights and input
    assert ntp % strideT == 0
    assert nyp % strideY == 0
    assert nxp % strideX == 0
    assert nt % strideT == 0
    assert ny % strideY == 0
    assert nx % strideX == 0

    assert nifp == nf

    print "Building weight indices for conv3d"
    # Build gather indices for weights
    # Must be in shape of target output weights
    weightIdxs = np.zeros(
        (int(ntp / strideT), int(nyp / strideY), int(nxp / strideX), nifp, nofp * strideT * strideX * strideY, 5)
    ).astype(np.int32)
    # Adding kernel number to end of features
    for itp in range(ntp):
        for iyp in range(nyp):
            for ixp in range(nxp):
                for iifp in range(nifp):
                    for iofp in range(nofp):
                        # Calculate output indices given input indices
                        # Must reverse, as we're using conv2d as transpose conv2d
                        otp = int((ntp - itp - 1) / strideT)
                        oyp = int((nyp - iyp - 1) / strideY)
                        oxp = int((nxp - ixp - 1) / strideX)
                        oifp = iifp  # Input features stay the same
                        # oofp uses iofp as offset, plus an nf stride based on which kernel it belongs to
                        kernelIdx = (itp % strideT) * strideY * strideX + (iyp % strideY) * strideX + (ixp % strideX)
                        oofp = iofp + nofp * kernelIdx
                        weightIdxs[otp, oyp, oxp, oifp, oofp, :] = [itp, iyp, ixp, iifp, iofp]

    print "Building output indices for conv3d"
    # Build gather indices for output
    # Must be in shape of target output data
    dataIdxs = np.zeros((nb, nt * strideT, ny * strideY, nx * strideX, nofp, 5)).astype(np.int32)
    for oob in range(nb):
        for oot in range(nt * strideT):
            for ooy in range(ny * strideY):
                for oox in range(nx * strideX):
                    for oof in range(nofp):
                        # Calculate input indices given output indices
                        iib = oob
                        iit = oot / strideT
                        iiy = ooy / strideY
                        iix = oox / strideX
                        kernelIdx = (oot % strideT) * strideY * strideX + (ooy % strideY) * strideX + (oox % strideX)
                        iif = oof + nofp * kernelIdx
                        dataIdxs[oob, oot, ooy, oox, oof, :] = [iib, iit, iiy, iix, iif]

    # Build convolution structure
    w_reshape = tf.gather_nd(w, weightIdxs)
    o_reshape = tf.nn.conv3d(x, w_reshape, strides=[1, 1, 1, 1, 1], padding="SAME", name=inName)
    o = tf.gather_nd(o_reshape, dataIdxs)
    return o
示例#29
0
def conv_backward_naive(dout, cache):
    """
    A naive implementation of the backward pass for a convolutional layer.

    Inputs:
    - dout: Upstream derivatives.
    - cache: A tuple of (x, w, b, conv_param) as in conv_forward_naive

    Returns a tuple of:
    - dx: Gradient with respect to x
    - dw: Gradient with respect to w
    - db: Gradient with respect to b
    """
    dx, dw, db = None, None, None
    x, w, b, conv_param = cache
    stride = conv_param['stride']
    pad = conv_param['pad']
    N, C, H, W = x.shape
    F, _, HH, WW = w.shape
    Hp = 1 + (H + 2 * pad - HH) / stride
    Wp = 1 + (W + 2 * pad - WW) / stride
    dx = np.zeros(x.shape)
    dw = np.zeros(w.shape)
    db = np.zeros(b.shape)
    for i in xrange(N):
        # for j in xrange(F):
        data = x[i]
        data = np.pad(data, ((0, 0), (pad, pad), (pad, pad)), 'constant')
        paded_dxi = np.pad(dx[i], ((0, 0), (pad, pad), (pad, pad)), 'constant')
        filter_vert_indices = 0
        filter_hori_indices = 0
        for s in xrange(Hp):
            filter_hori_indices = 0
            for p in xrange(Wp):
                data_fragment = data[:, filter_vert_indices:filter_vert_indices+HH,
                                                         filter_hori_indices:filter_hori_indices+WW]
                dw += np.einsum('i, jkl->ijkl', dout[i, :, s, p], data_fragment)
                # paded_dxi[:, filter_vert_indices:filter_vert_indices+HH,
                #                                          filter_hori_indices:filter_hori_indices+WW] = \
                #                                          np.einsum('ijkl,i->jkl', w, dout[i, :, s, p])
                # paded_dxi[:, filter_vert_indices:filter_vert_indices+HH,
                #                                          filter_hori_indices:filter_hori_indices+WW] = \
                #                                          np.tensordot(w, dout[i, :, s, p], axes = ([0], [0]))
                for f in xrange(F):
                    paded_dxi[:, filter_vert_indices:filter_vert_indices+HH,
                                        filter_hori_indices:filter_hori_indices+WW] \
                                         += w[f] * dout[i, f, s, p]
                filter_hori_indices += stride
            filter_vert_indices += stride
        dx[i] = paded_dxi[:, pad:-pad, pad:-pad]
    db = np.einsum('ijkl->j', dout)
    # print(dx)

    #############################################################################
    # TODO: Implement the convolutional backward pass.                          #
    #############################################################################
    #############################################################################
    #                             END OF YOUR CODE                              #
    #############################################################################
    return dx, dw, db
示例#30
0
def torgerson(distances, n_components=2):
    """
    Perform classical mds (Torgerson scaling).

    ..note ::
        If the distances are euclidean then this is equivalent to projecting
        the original data points to the first `n` principal components.

    """
    distances = np.asarray(distances)
    assert distances.shape[0] == distances.shape[1]
    N = distances.shape[0]
    # O ^ 2
    D_sq = distances ** 2

    # double center the D_sq
    rsum = np.sum(D_sq, axis=1, keepdims=True)
    csum = np.sum(D_sq, axis=0, keepdims=True)
    total = np.sum(csum)
    D_sq -= rsum / N
    D_sq -= csum / N
    D_sq += total / (N ** 2)
    B = np.multiply(D_sq, -0.5, out=D_sq)

    U, L, _ = np.linalg.svd(B)
    if n_components > N:
        U = np.hstack((U, np.zeros((N, n_components - N))))
        L = np.hstack((L, np.zeros((n_components - N))))
    U = U[:, :n_components]
    L = L[:n_components]
    D = np.diag(np.sqrt(L))
    return np.dot(U, D)
示例#31
0
    def evaluate(self, v):
        """
        Evaluate coefficients in standard 3D coordinate basis from those in 3D FB basis

        :param v: A coefficient vector (or an array of coefficient vectors) in FB basis
            to be evaluated. The first dimension must equal `self.count`.
        :return x: The evaluation of the coefficient vector(s) `x` in standard 3D
            coordinate basis. This is an array whose first three dimensions equal
            `self.sz` and the remaining dimensions correspond to dimensions two and
            higher of `v`.
        """
        # make should the first dimension of v is self.count
        v, sz_roll = unroll_dim(v, 2)
        v = m_reshape(v, (self.count, -1))

        # get information on polar grids from precomputed data
        n_theta = np.size(self._precomp['ang_theta_wtd'], 0)
        n_phi = np.size(self._precomp['ang_phi_wtd_even'][0], 0)
        n_r = np.size(self._precomp['radial_wtd'], 0)

        # number of 3D image samples
        n_data = np.size(v, 1)

        u_even = np.zeros((n_r, int(2 * self.ell_max + 1), n_data,
                           int(np.floor(self.ell_max / 2) + 1)),
                          dtype=v.dtype)
        u_odd = np.zeros((n_r, int(2 * self.ell_max + 1), n_data,
                          int(np.ceil(self.ell_max / 2))),
                         dtype=v.dtype)

        # go through each basis function and find corresponding coefficient
        # evaluate the radial parts
        for ell in range(0, self.ell_max + 1):
            k_max_ell = self.k_max[ell]
            radial_wtd = self._precomp['radial_wtd'][:, 0:k_max_ell, ell]

            ind = self._indices['ells'] == ell

            v_ell = m_reshape(v[ind, :], (k_max_ell, (2 * ell + 1) * n_data))
            v_ell = radial_wtd @ v_ell
            v_ell = m_reshape(v_ell, (n_r, 2 * ell + 1, n_data))

            if np.mod(ell, 2) == 0:
                u_even[:,
                       int(self.ell_max - ell):int(self.ell_max + ell + 1), :,
                       int(ell / 2)] = v_ell
            else:
                u_odd[:,
                      int(self.ell_max - ell):int(self.ell_max + ell + 1), :,
                      int((ell - 1) / 2)] = v_ell

        u_even = np.transpose(u_even, (3, 0, 1, 2))
        u_odd = np.transpose(u_odd, (3, 0, 1, 2))
        w_even = np.zeros((n_phi, n_r, n_data, 2 * self.ell_max + 1),
                          dtype=v.dtype)
        w_odd = np.zeros((n_phi, n_r, n_data, 2 * self.ell_max + 1),
                         dtype=v.dtype)

        # evaluate the phi parts
        for m in range(0, self.ell_max + 1):
            ang_phi_wtd_m_even = self._precomp['ang_phi_wtd_even'][m]
            ang_phi_wtd_m_odd = self._precomp['ang_phi_wtd_odd'][m]

            n_even_ell = np.size(ang_phi_wtd_m_even, 1)
            n_odd_ell = np.size(ang_phi_wtd_m_odd, 1)

            if m == 0:
                sgns = (1, )
            else:
                sgns = (1, -1)

            for sgn in sgns:

                end = np.size(u_even, 0)
                u_m_even = u_even[end - n_even_ell:end, :,
                                  self.ell_max + sgn * m, :]
                end = np.size(u_odd, 0)
                u_m_odd = u_odd[end - n_odd_ell:end, :,
                                self.ell_max + sgn * m, :]

                u_m_even = m_reshape(u_m_even, (n_even_ell, n_r * n_data))
                u_m_odd = m_reshape(u_m_odd, (n_odd_ell, n_r * n_data))

                w_m_even = ang_phi_wtd_m_even @ u_m_even
                w_m_odd = ang_phi_wtd_m_odd @ u_m_odd

                w_m_even = m_reshape(w_m_even, (n_phi, n_r, n_data))
                w_m_odd = m_reshape(w_m_odd, (n_phi, n_r, n_data))

                w_even[:, :, :, self.ell_max + sgn * m] = w_m_even
                w_odd[:, :, :, self.ell_max + sgn * m] = w_m_odd

        w_even = np.transpose(w_even, (3, 0, 1, 2))
        w_odd = np.transpose(w_odd, (3, 0, 1, 2))
        u_even = w_even
        u_odd = w_odd

        u_even = m_reshape(u_even,
                           (2 * self.ell_max + 1, n_phi * n_r * n_data))
        u_odd = m_reshape(u_odd, (2 * self.ell_max + 1, n_phi * n_r * n_data))

        # evaluate the theta parts
        w_even = self._precomp['ang_theta_wtd'] @ u_even
        w_odd = self._precomp['ang_theta_wtd'] @ u_odd

        pf = w_even + 1j * w_odd
        pf = m_reshape(pf, (n_theta * n_phi * n_r, n_data))

        # perform inverse non-uniformly FFT transformation back to 3D rectangular coordinates
        freqs = m_reshape(self._precomp['fourier_pts'],
                          (3, n_r * n_theta * n_phi, -1))
        x = np.zeros((self.sz[0], self.sz[1], self.sz[2], n_data),
                     dtype=v.dtype)
        for isample in range(0, n_data):
            x[..., isample] = np.real(anufft3(pf[:, isample], freqs, self.sz))

        # return the x with the first three dimensions of self.sz
        x = roll_dim(x, sz_roll)
        return x
示例#32
0
def test_plot_raw_psd():
    """Test plotting of raw psds."""
    raw = _get_raw()
    # normal mode
    raw.plot_psd(average=False)
    # specific mode
    picks = pick_types(raw.info, meg='mag', eeg=False)[:4]
    raw.plot_psd(tmax=None, picks=picks, area_mode='range', average=False,
                 spatial_colors=True)
    raw.plot_psd(tmax=20., color='yellow', dB=False, line_alpha=0.4,
                 n_overlap=0.1, average=False)
    plt.close('all')
    ax = plt.axes()
    # if ax is supplied:
    pytest.raises(ValueError, raw.plot_psd, ax=ax, average=True)
    raw.plot_psd(tmax=None, picks=picks, ax=ax, average=True)
    plt.close('all')
    ax = plt.axes()
    with pytest.raises(ValueError, match='2 axes must be supplied, got 1'):
        raw.plot_psd(ax=ax, average=True)
    plt.close('all')
    ax = plt.subplots(2)[1]
    raw.plot_psd(tmax=None, ax=ax, average=True)
    plt.close('all')
    # topo psd
    ax = plt.subplot()
    raw.plot_psd_topo(axes=ax)
    plt.close('all')
    # with channel information not available
    for idx in range(len(raw.info['chs'])):
        raw.info['chs'][idx]['loc'] = np.zeros(12)
    with pytest.warns(RuntimeWarning, match='locations not available'):
        raw.plot_psd(spatial_colors=True, average=False)
    # with a flat channel
    raw[5, :] = 0
    for dB, estimate in itertools.product((True, False),
                                          ('power', 'amplitude')):
        with pytest.warns(UserWarning, match='[Infinite|Zero]'):
            fig = raw.plot_psd(average=True, dB=dB, estimate=estimate)
        ylabel = fig.axes[1].get_ylabel()
        ends_dB = ylabel.endswith('mathrm{(dB)}$')
        if dB:
            assert ends_dB, ylabel
        else:
            assert not ends_dB, ylabel
        if estimate == 'amplitude':
            assert r'fT/cm/\sqrt{Hz}' in ylabel, ylabel
        else:
            assert estimate == 'power'
            assert '(fT/cm)²/Hz' in ylabel, ylabel
        ylabel = fig.axes[0].get_ylabel()
        if estimate == 'amplitude':
            assert r'fT/\sqrt{Hz}' in ylabel
        else:
            assert 'fT²/Hz' in ylabel
    # test reject_by_annotation
    raw = _get_raw()
    raw.set_annotations(Annotations([1, 5], [3, 3], ['test', 'test']))
    raw.plot_psd(reject_by_annotation=True)
    raw.plot_psd(reject_by_annotation=False)
    plt.close('all')

    # test fmax value checking
    with pytest.raises(ValueError, match='not exceed one half the sampling'):
        raw.plot_psd(fmax=50000)

    # test xscale value checking
    with pytest.raises(ValueError, match="Invalid value for the 'xscale'"):
        raw.plot_psd(xscale='blah')

    # gh-5046
    raw = read_raw_fif(raw_fname, preload=True).crop(0, 1)
    picks = pick_types(raw.info, meg=True)
    raw.plot_psd(picks=picks, average=False)
    raw.plot_psd(picks=picks, average=True)
    plt.close('all')
    raw.set_channel_types({'MEG 0113': 'hbo', 'MEG 0112': 'hbr',
                           'MEG 0122': 'fnirs_cw_amplitude',
                           'MEG 0123': 'fnirs_od'},
                          verbose='error')
    fig = raw.plot_psd()
    assert len(fig.axes) == 10
    plt.close('all')

    # gh-7631
    data = 1e-3 * np.random.rand(2, 100)
    info = create_info(['CH1', 'CH2'], 100)
    raw = RawArray(data, info)
    picks = pick_types(raw.info, misc=True)
    raw.plot_psd(picks=picks, spatial_colors=False)
    plt.close('all')
示例#33
0
 def create_object():
     return np.zeros(int(object_size), dtype=np.uint8)
示例#34
0
# create own image with drawing
import numpy as np
import cv2

# initialize image array
pic = np.zeros((500, 500, 3), dtype='uint8')  # uint8 means 0 - 255
color = (255, 255, 255)

# create a rectangle
# cv2.rectangle(
#   pic,             # image
#   (0, 0),          # point 1
#   (500, 150),      # point 2
#   (123, 200, 98),  # color
#   3,               # thickness
#   lineType=8,      # lineType
#   shift=0          # shift
# )

# draw a line
# cv2.line(pic, (200, 200), (500, 500), color)

# draw a circle
cv2.circle(pic, (250, 250), 100, color)

cv2.imshow('Image', pic)
cv2.waitKey(0)
cv2.destroyAllWindows()
示例#35
0
def kmeans(datafile, K):

    df = pd.read_csv(datafile, header=None, names=["label", "x", "y"])

    df['c'] = 0
    df['dc'] = 0.0

    n_df = df.shape[0]
    df = df.reset_index(drop=True)

    # Intial K points
    k0_i = np.random.choice(range(n_df), K, replace=False).tolist()
    k0 = np.array(df.iloc[k0_i, 1:3])
    centroids_old = np.zeros([K, 2])
    centroids_new = k0
    #    classes = df['label'].iloc[k0_i]

    #xy = np.array(k0[['x','y']])

    # check if no changes or if counter has reached 50
    counter = 0

    while sum(sum(
        (np.array(centroids_new) - np.array(centroids_old))**2)) > 1e-3:
        #while list(centroids_new) != list(centroids_old):
        counter += 1
        # assign labels
        for i in range(n_df):
            px = np.array(df.iloc[i, 1:3])
            #        c,dc = pick_k(px, centroids_new)
            df.loc[i, 'c'], df.loc[i, 'dc'] = pick_k(px, centroids_new)
        centroids_old = np.array(centroids_new)
        #        print(list(centroids_old))

        for j in range(K):
            k_cluster = df.loc[df['c'] == j]
            xc = k_cluster['x'].mean()
            yc = k_cluster['y'].mean()

            centroids_new[j] = [xc, yc]
        if counter > 50:
            break

    #calculate WC-SSD
    WC_SSD = sum(np.array(df.dc)**2)

    #calculate SC
    pn = np.array(df[['x', 'y']])
    dij = DPnxn(pn, n_df)
    S = []  # K
    # find index sets for K clusters
    for i in range(K):
        S_i = df[df.c == i].index.values
        S.append(S_i)
    SCi = []
    #
    for i in range(n_df):
        ci = df.c[i]
        a = dij[i, :][S[ci]].mean()
        b = dij[i, :][list(set(range(n_df)) - set(S[ci]))].mean()
        SCi.append((b - a) / max(a, b))
    SC = np.array(SCi).mean()

    SC = metrics.silhouette_score(pn, df.label)

    NMI = normalized_mutual_info_score(df.label, df.c)

    return WC_SSD, SC, NMI
示例#36
0
def DPnxn(pn, n):
    dij = np.zeros((n, n))
    for i in range(n):
        for j in range(n):
            dij[i][j] = d2(pn[i], pn[j])
    return dij
def lowess(endog, exog, frac=2./3, it=3):
    """
    LOWESS (Locally Weighted Scatterplot Smoothing)

    A lowess function that outs smoothed estimates of endog
    at the given exog values from points (exog, endog)

    Parameters
    ----------
    endog: 1-D numpy array
        The y-values of the observed points
    exog: 1-D numpy array
        The x-values of the observed points
    frac: float
        Between 0 and 1. The fraction of the data used
        when estimating each y-value.
    it: int
        The number of residual-based reweightings
        to perform.

    Returns
    -------
    out: numpy array
        A numpy array with two columns. The first column
        is the sorted x values and the second column the
        associated estimated y-values.

    Notes
    -----
    This lowess function implements the algorithm given in the
    reference below using local linear estimates.

    Suppose the input data has N points. The algorithm works by
    estimating the true ``y_i`` by taking the frac*N closest points
    to ``(x_i,y_i)`` based on their x values and estimating ``y_i``
    using a weighted linear regression. The weight for ``(x_j,y_j)``
    is `_lowess_tricube` function applied to ``|x_i-x_j|``.

    If ``iter > 0``, then further weighted local linear regressions
    are performed, where the weights are the same as above
    times the `_lowess_bisquare` function of the residuals. Each iteration
    takes approximately the same amount of time as the original fit,
    so these iterations are expensive. They are most useful when
    the noise has extremely heavy tails, such as Cauchy noise.
    Noise with less heavy-tails, such as t-distributions with ``df > 2``,
    are less problematic. The weights downgrade the influence of
    points with large residuals. In the extreme case, points whose
    residuals are larger than 6 times the median absolute residual
    are given weight 0.

    Some experimentation is likely required to find a good
    choice of frac and iter for a particular dataset.

    References
    ----------
    Cleveland, W.S. (1979) "Robust Locally Weighted Regression
    and Smoothing Scatterplots". Journal of the American Statistical
    Association 74 (368): 829-836.

    Examples
    --------
    The below allows a comparison between how different the fits from
    `lowess` for different values of frac can be.

    >>> import numpy as np
    >>> import statsmodels.api as sm
    >>> lowess = sm.nonparametric.lowess
    >>> x = np.random.uniform(low=-2*np.pi, high=2*np.pi, size=500)
    >>> y = np.sin(x) + np.random.normal(size=len(x))
    >>> z = lowess(y, x)
    >>> w = lowess(y, x, frac=1./3)

    This gives a similar comparison for when it is 0 vs not.

    >>> import scipy.stats as stats
    >>> x = np.random.uniform(low=-2*np.pi, high=2*np.pi, size=500)
    >>> y = np.sin(x) + stats.cauchy.rvs(size=len(x))
    >>> z = lowess(y, x, frac= 1./3, it=0)
    >>> w = lowess(y, x, frac=1./3)

    """
    x = exog

    if exog.ndim != 1:
        raise ValueError('exog must be a vector')
    if endog.ndim != 1:
        raise ValueError('endog must be a vector')
    if endog.shape[0] != x.shape[0] :
        raise ValueError('exog and endog must have same length')

    n = exog.shape[0]
    fitted = np.zeros(n)

    k = int(frac * n)

    index_array = np.argsort(exog)
    x_copy = np.array(exog[index_array]) #, dtype ='float32')
    y_copy = endog[index_array]

    fitted, weights = _lowess_initial_fit(x_copy, y_copy, k, n)

    for i in range(it):
        _lowess_robustify_fit(x_copy, y_copy, fitted,
                              weights, k, n)

    out = np.array([x_copy, fitted]).T
    out.shape = (n,2)

    return out
示例#38
0
def extract_muons_from_run(
    input_run_path,
    output_run_path,
    output_run_header_path
):
    """
    Detects and extracts muon candidate events from a run. The muon candidate
    events are exported into a new output run. In addidion a header for the
    muon candidates is exported.


    Parameter
    ---------
    input_run_path              Path to the input run.

    output_run_path             Path to the output run of muon candidates.

    output_run_header_path      Path to the binary output run header.


    Binary Output Format Run Header
    -------------------------------
    for each muon candidate:

    1)      uint32      Night
    2)      uint32      Run ID
    3)      uint32      Event ID
    4)      uint32      unix time seconds [s]
    5)      uint32      unix time micro seconds modulo full seconds [us]
    6)      float32     Pointing zenith distance [deg]
    7)      float32     Pointing azimuth [deg]
    8)      float32     muon ring center x [deg]
    9)      float32     muon ring center y [deg]
   10)      float32     muon ring radius [deg]
   11)      float32     mean arrival time muon cluster [s]
   12)      float32     muon ring overlapp with field of view (0.0 to 1.0) [1]
   13)      float32     number of photons muon cluster [1]
    """
    run = ps.EventListReader(input_run_path)
    with gzip.open(output_run_path, 'wt') as f_muon_run, \
        open(output_run_header_path, 'wb') as f_muon_run_header:

        for event in run:

            if (
                event.observation_info.trigger_type ==
                FACT_PHYSICS_SELF_TRIGGER
            ):

                photon_clusters = ps.PhotonStreamCluster(event.photon_stream)
                muon_features = detection(event, photon_clusters)

                if muon_features['is_muon']:

                    # EXPORT EVENT in JSON
                    event_dict = ps.io.jsonl.event_to_dict(event)
                    json.dump(event_dict, f_muon_run)
                    f_muon_run.write('\n')

                    # EXPORT EVENT header
                    head1 = np.zeros(5, dtype=np.uint32)
                    head1[0] = event.observation_info.night
                    head1[1] = event.observation_info.run
                    head1[2] = event.observation_info.event
                    head1[3] = event.observation_info._time_unix_s
                    head1[4] = event.observation_info._time_unix_us

                    head2 = np.zeros(8, dtype=np.float32)
                    head2[0] = event.zd
                    head2[1] = event.az
                    head2[2] = muon_features['muon_ring_cx']*rad2deg
                    head2[3] = muon_features['muon_ring_cy']*rad2deg
                    head2[4] = muon_features['muon_ring_r']*rad2deg
                    head2[5] = muon_features['mean_arrival_time_muon_cluster']
                    head2[6] = muon_features[
                        'muon_ring_overlapp_with_field_of_view'
                    ]
                    head2[7] = muon_features['number_of_photons']

                    f_muon_run_header.write(head1.tobytes())
                    f_muon_run_header.write(head2.tobytes())
示例#39
0
def calc_flow(depth_src,
              pose_src,
              pose_tgt,
              K,
              depth_tgt,
              thresh=3e-3,
              standard_rep=False):
    """
    project the points in source corrd to target corrd
    :param standard_rep:
    :param depth_src: depth image of source(m)
    :param pose_src: pose matrix of soucre, [R|T], 3x4
    :param depth_tgt: depth image of target
    :param pose_tgt: pose matrix of target, [R|T], 3x4
    :param K: intrinsic_matrix
    :param depth_tgt: depth image of target(m)
    :return: visible: whether points in source can be viewed in target
    :return: flow: flow from source to target
    """
    height = depth_src.shape[0]
    width = depth_src.shape[1]
    visible = np.zeros(depth_src.shape[:2]).flatten()
    X = backproject_camera(depth_src, intrinsic_matrix=K)
    transform = np.matmul(K, se3_mul(pose_tgt, se3_inverse(pose_src)))
    Xp = np.matmul(
        transform,
        np.append(X, np.ones([1, X.shape[1]], dtype=np.float32), axis=0))

    pz = Xp[2] + 1e-15
    pw = Xp[0] / pz
    ph = Xp[1] / pz

    valid_points = np.where(depth_src.flatten() != 0)[0]
    depth_proj_valid = pz[valid_points]
    pw_valid_raw = np.round(pw[valid_points]).astype(int)
    pw_valid = np.minimum(np.maximum(pw_valid_raw, 0), width - 1)
    ph_valid_raw = np.round(ph[valid_points]).astype(int)
    ph_valid = np.minimum(np.maximum(ph_valid_raw, 0), height - 1)
    p_within = np.logical_and(
        np.logical_and(pw_valid_raw >= 0, pw_valid_raw < width),
        np.logical_and(ph_valid_raw >= 0, ph_valid_raw < height),
    )

    depth_tgt_valid = depth_tgt[ph_valid, pw_valid]

    p_within = np.logical_and(
        p_within,
        np.abs(depth_tgt_valid - depth_proj_valid) < thresh)
    p_valid = np.abs(depth_tgt_valid) > 1e-10
    fg_points = valid_points[np.logical_and(p_within, p_valid)]
    visible[fg_points] = 1
    visible = visible.reshape(depth_src.shape[:2])
    w_ori, h_ori = np.meshgrid(np.linspace(0, width - 1, width),
                               np.linspace(0, height - 1, height))
    if standard_rep:
        flow = np.dstack([
            pw.reshape(depth_src.shape[:2]) - w_ori,
            ph.reshape(depth_src.shape[:2]) - h_ori
        ])
    else:
        # depleted version, only used in old code
        flow = np.dstack([
            ph.reshape(depth_src.shape[:2]) - h_ori,
            pw.reshape(depth_src.shape[:2]) - w_ori
        ])
    flow[np.dstack([visible, visible]) != 1] = 0
    assert np.isnan(flow).sum() == 0
    X_valid = np.array([c[np.where(visible.flatten())] for c in X])
    return flow, visible, X_valid
示例#40
0
    def __init__(self, s_size, a_size, scope, optimizer):
        with tf.variable_scope(scope):
            # input and visual encoding layers
            self.inputs = tf.placeholder(shape=[None, s_size], dtype=tf.float32)
            self.image_in = tf.reshape(self.inputs, shape=[-1, 84, 84, 1])
            self.conv1 = slim.conv2d(activation_fn=tf.nn.elu, inputs=self.image_in, num_outputs=16,
                                     kernel_size=[8, 8], stride=[4, 4], padding='VALID')
            self.conv2 = slim.conv2d(activation_fn=tf.nn.elu, inputs=self.conv1, num_outputs=32,
                                     kernel_size=[4, 4], stride=[2, 2], padding='VALID')
            hidden = slim.fully_connected(slim.flatten(self.conv2), 256, activation_fn=tf.nn.elu)

            # Recurrent network for temporal dependencies
            lstm_cell = tf.contrib.rnn.BasicLSTMCell(256, state_is_tuple=True)
            c_init = np.zeros((1, lstm_cell.state_size.c), np.float32)
            h_init = np.zeros((1, lstm_cell.state_size.h), np.float32)
            self.state_init = [c_init, h_init]
            c_in = tf.placeholder(tf.float32, [1, lstm_cell.state_size.c])
            h_in = tf.placeholder(tf.float32, [1, lstm_cell.state_size.h])
            self.state_in = (c_in, h_in)
            rnn_in = tf.expand_dims(hidden, [0])
            step_size = tf.shape(self.image_in)[:1]
            state_in = tf.contrib.rnn.LSTMStateTuple(c_in, h_in)
            lstm_outputs, lstm_state = tf.nn.dynamic_rnn(lstm_cell, rnn_in,
                                                         initial_state=state_in,
                                                         sequence_length=step_size,
                                                         time_major=False)
            lstm_c, lstm_h = lstm_state
            self.state_out = (lstm_c[:1, :], lstm_h[:1, :])
            rnn_out = tf.reshape(lstm_outputs, [-1, 256])

            # output layers for policy and value estimations
            self.policy = slim.fully_connected(rnn_out, a_size,
                                               activation_fn=tf.nn.softmax,
                                               weights_initializer=normalized_columns_initializer(0.01),
                                               biases_initializer=None)
            self.value = slim.fully_connected(rnn_out, 1,
                                              activation_fn=None,
                                              weights_initializer=normalized_columns_initializer(1.0),
                                              biases_initializer=None)

            # only the worker network needs ops for loss and gradient update
            if scope != 'global':
                self.actions = tf.placeholder(shape=[None], dtype=tf.int32)
                self.actions_onehot = tf.one_hot(self.actions, a_size, dtype=tf.float32)
                self.target_v = tf.placeholder(shape=[None], dtype=tf.float32)
                self.advantages = tf.placeholder(shape=[None], dtype=tf.float32)
                self.responsible_outputs = tf.reduce_sum(self.policy * self.actions_onehot, [1])

                # loss functions
                self.value_loss = 0.5 * tf.reduce_sum(tf.square(self.target_v - tf.reshape(self.value, [-1])))
                self.entropy = -tf.reduce_sum(self.policy * tf.log(self.policy))
                self.policy_loss = -tf.reduce_sum(tf.log(self.responsible_outputs) * self.advantages)
                self.loss = 0.5 * self.value_loss + self.policy_loss - self.entropy * 0.01

                # get gradients from local network using local losses
                local_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
                self.gradients = tf.gradients(self.loss, local_vars)
                self.var_norms = tf.global_norm(local_vars)
                grads, self.grad_norms = tf.clip_by_global_norm(self.gradients, 40.)

                # apply local gradients to global network
                global_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'global')
                self.apply_grads = optimizer.apply_gradients(zip(grads, global_vars))
示例#41
0
X = np.log(np.abs(X) / (1 - np.abs(X)))
#X=sp.stats.zscore(X)

#pca = PCA(n_components=2)
#pca.fit(X.T)
#Xred=pca.components_.T
#Xred=sp.stats.zscore(Xred)

#vectvox=np.random.randint(0,X.shape[1],100)
#vectvox=np.random.permutation(100)
#Xred=X[:,vectvox_app_fewer[vectvox[1:50]]]

Xred = X
NVox = Xred.shape[1]
SizeLayer = int(NVox / 10)
res = np.zeros(100)
for iiter in range(100):
    X_train, X_test, y_train, y_test = train_test_split(Xred,
                                                        Y,
                                                        train_size=0.75)
    scaler = StandardScaler()  # doctest: +SKIP
    scaler.fit(X_train)  # doctest: +SKIP
    X_train = scaler.transform(X_train)  # doctest: +SKIP
    X_test = scaler.transform(X_test)  # doctest: +SKIP
    clf = MLPClassifier(hidden_layer_sizes=(SizeLayer),
                        activation='relu',
                        max_iter=500).fit(X_train, y_train)
    res[iiter] = clf.score(X_test, y_test)

plt.hist(res)
plt.show()
示例#42
0
    fig = plt.figure()
    plt.axis("off")
    fig.add_subplot(2, 4, 1)
    plt.imshow(im_src)
    fig.add_subplot(2, 4, 2)
    plt.imshow(im_tgt)
    fig.add_subplot(2, 4, 3)
    plt.imshow(depth_src)
    fig.add_subplot(2, 4, 4)
    plt.imshow(depth_tgt)

    fig.add_subplot(2, 4, 5)
    height = depth_src.shape[0]
    width = depth_src.shape[1]
    img_tgt = np.zeros((height, width, 3), np.uint8)
    img_src = np.zeros((height, width, 3), np.uint8)
    for h in range(height):
        for w in range(width):
            if visible[h, w]:
                cur_flow = flow[h, w, :]
                img_src = cv2.line(
                    img_src,
                    (np.round(w).astype(int), np.round(h).astype(int)),
                    (np.round(w).astype(int), np.round(h).astype(int)),
                    (255, h * 255 / height, w * 255 / width),
                    5,
                )
                img_tgt = cv2.line(
                    img_tgt,
                    (np.round(w + cur_flow[1]).astype(int),
	def compute_cell_information(self, obj_model_dict):

		cached_information = dict()

		# First we obtain a sample from the Pareto Frontier of NUM_POINTS_FRONTIER

		moop = MOOP(obj_model_dict, obj_model_dict, self.input_space, False)
		
		grid = sobol_grid.generate(self.input_space.num_dims, self.input_space.num_dims * GRID_SIZE)

		if USE_GRID_ONLY == True:

			moop.solve_using_grid(grid)

			for i in range(len(obj_model_dict.keys())):
				result = self.find_optimum_gp(obj_model_dict[ obj_model_dict.keys()[ i ] ], grid)
				moop.append_to_population(result)
		else:

			assert NSGA_POP > len(obj_model_dict.keys()) + 1

			moop.solve_using_grid(grid)

			for i in range(len(obj_model_dict.keys())):
				result = self.find_optimum_gp(obj_model_dict[ obj_model_dict.keys()[ i ] ], grid)
				moop.append_to_population(result)

			pareto_set = moop.compute_pareto_front_and_set_summary(NSGA_POP)['pareto_set']

			moop.initialize_population(np.maximum(NSGA_POP - pareto_set.shape[ 0 ], 0))

			for i in range(pareto_set.shape[ 0 ]):
				moop.append_to_population(pareto_set[ i, : ])

			moop.evolve_population_only(NSGA_EPOCHS)

			for i in range(pareto_set.shape[ 0 ]):
				moop.append_to_population(pareto_set[ i, : ])

		result = moop.compute_pareto_front_and_set_summary(NUM_POINTS_FRONTIER)

		print 'Inner multi-objective problem solved!'

		means_objectives = np.zeros((obj_model_dict[ obj_model_dict.keys()[ 0 ] ].inputs.shape[ 0 ], len(obj_model_dict)))

		k = 0
		for obj in obj_model_dict:
			means_objectives[ :, k ] = obj_model_dict[ obj ].predict(obj_model_dict[ obj ].inputs)[ 0 ]
			k += 1

		v_inf = np.ones((1, len(obj_model_dict))) * np.inf
		v_ref = np.ones((1, len(obj_model_dict))) * 1e3

		# We add the non-dominated prediction and the observed inputs to the frontier

		frontier = result['frontier']
		frontier = np.vstack((frontier, means_objectives))
		frontier = frontier[ _cull_algorithm(frontier), ]

		# We remove repeated entries from the pareto front

		X = frontier[ 0 : 1, : ]

		for i in range(frontier.shape[ 0 ]):
			if np.min(cdist(frontier[ i : (i + 1), : ], X)) > 1e-8:
			    	X = np.vstack((X, frontier[ i, ])) 

		frontier = X

		cached_information['frontier'] = frontier

		# We sort the entries in the pareto frontier

		frontier_sorted = np.vstack((-v_inf, cached_information['frontier'], v_ref, v_inf))

		for i in range(len(obj_model_dict)):
			frontier_sorted[ :, i ] = np.sort(frontier_sorted[ :, i ])

		# Now we build the info associated to each cell

		n_repeat = (frontier_sorted.shape[ 0 ] - 2) ** frontier_sorted.shape[ 1 ]

		cached_information['cells'] = dict()

		added_cells = 0
		for i in range(n_repeat):

			cell = dict()

			indices = np.zeros(len(obj_model_dict)).astype(int)

			j = i

			for k in range(len(obj_model_dict)):
				indices[ k ] = int(j % (frontier_sorted.shape[ 0 ] - 2))
				j = np.floor(j / (frontier_sorted.shape[ 0 ] - 2))

			u = np.zeros(len(obj_model_dict))

			for k in range(len(obj_model_dict)):
				u[ k ] = frontier_sorted[ int(indices[ k ] + 1), k ]
			
			l = np.zeros(len(obj_model_dict))
				
			for k in range(len(obj_model_dict)):
				l[ k ] = frontier_sorted[ indices[ k ], k ]

			# If the cell is dominated we discard it

			is_dominated = False
			for k in range(frontier.shape[ 0 ]):
				if np.all(l >= frontier[ k, : ]):
					is_dominated = True

			if is_dominated:
				continue

			# We find the vector v

			v = np.zeros(len(obj_model_dict))

			for k in range(len(obj_model_dict)):

				l_tmp = np.copy(l)

				for j in range(int(frontier_sorted.shape[ 0 ] - indices[ k ] - 1)):
					l_tmp[ k ] = frontier_sorted[ indices[ k ] + j, k ]

					dominates_all = True
					for h in range(frontier.shape[ 0 ]):
						if np.all(frontier[ h, : ] <= l_tmp):
							dominates_all = False
							break

					if dominates_all == False:
						break
					
				if dominates_all == False:
					v[ k ] = l_tmp[ k ]
				else:
					v[ k ] = v_ref[ 0, k ]

			# We compute the quantities required for evaluating the gain in hyper-volume

			# We find the points dominated by u

			dominated_by_u = frontier
			h = 0
			while (h < dominated_by_u.shape[ 0 ]):
				if (not np.any(u < dominated_by_u[ h, : ])) and (not np.all(u == dominated_by_u[ h, : ])):
					dominated_by_u = np.delete(dominated_by_u, (h), axis = 0)
				else:
					h+= 1

			# The value of minusQ2plusQ3 is given by the hypervolume of the dominated points with reference v

			if dominated_by_u.shape[ 0 ] == 0:
				minusQ2plusQ3 = 0.0
			else:
				hv = HyperVolume(v.tolist())
				minusQ2plusQ3 = -hv.compute(dominated_by_u.tolist())
			
			cell['u'] = u
			cell['l'] = l
			cell['v'] = v
			cell['dominated_by_u'] = dominated_by_u
			cell['minusQ2plusQ3'] = minusQ2plusQ3
			
			cached_information['cells'][ str(added_cells) ] = cell
			added_cells += 1
			
		n_cells = added_cells

		cached_information['n_cells'] = n_cells
		cached_information['v_ref'] = v_ref[ 0, : ]
		cached_information['n_objectives'] = len(obj_model_dict)
#		self.print_cell_info(cached_information)

		return cached_information
示例#44
0
def test_net():
    ''' Evaluate the network '''
    # Make result directory and the result file.
    result_dir = os.path.join(cfg.DIR.OUT_PATH, cfg.TEST.EXP_NAME)
    if not os.path.exists(result_dir):
        os.makedirs(result_dir)
    result_fn = os.path.join(result_dir, 'result.mat')

    print("Exp file will be written to: " + result_fn)

    # Make a network and load weights
    NetworkClass = load_model(cfg.CONST.NETWORK_CLASS)

    #print('Network definition: \n')
    #print(inspect.getsource(NetworkClass.network_definition))

    net = NetworkClass()
    
    net.cuda()
    
    solver = Solver(net)
    solver.load(cfg.CONST.WEIGHTS)

    # set constants
    batch_size = cfg.CONST.BATCH_SIZE

    # set up testing data process. We make only one prefetching process. The
    # process will return one batch at a time.
    queue = Queue(cfg.QUEUE_SIZE)
    data_pair = category_model_id_pair(dataset_portion=cfg.TEST.DATASET_PORTION)
    processes = make_data_processes(queue, data_pair, 1, repeat=False, train=False)

    num_data = len(processes[0].data_paths)
    num_batch = int(num_data / batch_size)

    # prepare result container
    results = {'cost': np.zeros(num_batch),
               'mAP': np.zeros((num_batch, batch_size))}
    # Save results for various thresholds
    for thresh in cfg.TEST.VOXEL_THRESH:
        results[str(thresh)] = np.zeros((num_batch, batch_size, 5))

    # Get all test data
    batch_idx = 0
    for batch_img, batch_voxel in get_while_running(processes[0], queue):
        if batch_idx == num_batch:
            break

        #activations is a list of torch.cuda.FloatTensor
        pred, loss, activations = solver.test_output(batch_img, batch_voxel)
        
        #convert pytorch tensor to numpy array
        pred = pred.data.cpu().numpy()
        loss = loss.data.cpu().numpy()

        for j in range(batch_size):
            # Save IoU per thresh
            for i, thresh in enumerate(cfg.TEST.VOXEL_THRESH):
                r = evaluate_voxel_prediction(pred[j, ...], batch_voxel[j, ...], thresh)
                results[str(thresh)][batch_idx, j, :] = r

            # Compute AP
            precision = sklearn.metrics.average_precision_score(
                batch_voxel[j, 1].flatten(), pred[j, 1].flatten())

            results['mAP'][batch_idx, j] = precision

        # record result for the batch
        results['cost'][batch_idx] = float(loss)
        print('%d/%d, costs: %f, mAP: %f' %
                (batch_idx, num_batch, loss, np.mean(results['mAP'][batch_idx])))
        batch_idx += 1


    print('Total loss: %f' % np.mean(results['cost']))
    print('Total mAP: %f' % np.mean(results['mAP']))

    sio.savemat(result_fn, results)
示例#45
0

def psnr(target, ref):
    # assume RGB image
    target_data = np.array(target, dtype=float)
    ref_data = np.array(ref, dtype=float)

    diff = ref_data - target_data
    diff = diff.flatten('C')

    rmse = math.sqrt(np.mean(diff ** 2.))

    return 20 * math.log10(255. / rmse)

def interpolation(noisy , SNR , Number_of_pilot , interp)
    noisy_image = np.zeros((40000,72,14,2))

    noisy_image[:,:,:,0] = np.real(noisy)
    noisy_image[:,:,:,1] = np.imag(noisy)


    if (Number_of_pilot == 48):
        idx = [14*i for i in range(1, 72,6)]+[4+14*(i) for i in range(4, 72,6)]+[7+14*(i) for i in range(1, 72,6)]+[11+14*(i) for i in range(4, 72,6)]
    elif (Number_of_pilot == 16):
        idx= [4+14*(i) for i in range(1, 72,9)]+[9+14*(i) for i in range(4, 72,9)]
    elif (Number_of_pilot == 24):
        idx = [14*i for i in range(1,72,9)]+ [6+14*i for i in range(4,72,9)]+ [11+14*i for i in range(1,72,9)]
    elif (Number_of_pilot == 8):
      idx = [4+14*(i) for  i in range(5,72,18)]+[9+14*(i) for i in range(8,72,18)]
    elif (Number_of_pilot == 36):
      idx = [14*(i) for  i in range(1,72,6)]+[6+14*(i) for i in range(4,72,6)] + [11+14*i for i in range(1,72,6)]
示例#46
0
#30-A
#(1)
import numpy as np
a=np.zeros(10)
print(a)

#결과 [0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]


#(2)
a[4]=1
print(a)

#결과 [0. 0. 0. 0. 1. 0. 0. 0. 0. 0.]


#(3)
a=np.arange(10,50)
print(a)

#결과 
[10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33
 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49]


#(4)
a=np.arange(0,25)
b=a.reshape(5,5)
print(b)

#결과
示例#47
0
    def draw_pose(self, object_name: str, vectors_3D: np.ndarray):
        """
        Draw poses as directional axes on the image.

        Parameters
        ----------
        object_name : str
            Name of the object
        vectors_3D : numpy.ndarray
            The 3D directional vectors.
        """

        p_image = np.zeros((4, 2), dtype=np.int32)
        coordinates = None
        for i, vec in enumerate(vectors_3D):
            coordinates = self.project3dToPixel(vec)
            if np.isnan(coordinates).any():
                break
            p_image[i] = coordinates

        if coordinates is not None:
            p_image[3] = p_image[0] - (p_image[3] - p_image[0])
            # z = c + (z-c)*(norm(x-c)/norm(z-c))
            p_image[3] = p_image[0] + (p_image[3] - p_image[0])*(norm(
                    p_image[1] - p_image[0])/norm(
                        p_image[3] - p_image[0]
                    )
                )

            colors_ = [(0, 0, 255), (0, 255, 0), (255, 0, 0)]
            for i in range(1, 4):
                cv2.line(
                        self.viz_frame,
                        tuple(p_image[0]),
                        tuple(p_image[i]),
                        colors_[i-1],
                        thickness=2
                    )
                x1, y1, x2, y2 = self.calc_vertexes(p_image[0], p_image[i])
                cv2.line(
                        self.viz_frame,
                        tuple(p_image[i]),
                        (x1, y1),
                        colors_[i-1],
                        thickness=2
                    )
                cv2.line(
                        self.viz_frame,
                        tuple(p_image[i]),
                        (x2, y2),
                        colors_[i-1],
                        thickness=2
                    )

            # Put object label aligned to the object's in-plane planar rotation
            text_loc = np.array(
                    [p_image[2, 0] - (p_image[1, 0] - p_image[0, 0])/2,
                     p_image[2, 1] - 20],
                    dtype=np.int16
                )
            base, tangent = p_image[1] - p_image[0]
            text_angle = np.arctan2(-tangent, base)*180/np.pi
            self.viz_frame = draw_angled_text(
                    object_name,
                    text_loc,
                    text_angle,
                    self.viz_frame
                )
    def __init__(self, game_name, agent_num, action_range=(-10, 10)):
        self.game_name = game_name
        self.agent_num = agent_num
        self.action_range = action_range

        game_list = DifferentialGame.get_game_list()

        if not self.game_name in game_list:
            raise EnvironmentNotFound(f"The game {self.game_name} doesn't exists")

        expt_num_agent = game_list[self.game_name]['agent_num']
        if expt_num_agent != self.agent_num:
            raise WrongNumberOfAgent(f"The number of agent \
                required for {self.game_name} is {expt_num_agent}")

        self.action_spaces = MASpace(tuple(Box(low=-1., high=1., shape=(1,)) for _ in range(self.agent_num)))
        self.observation_spaces = MASpace(tuple(Box(low=-1., high=1., shape=(1,)) for _ in range(self.agent_num)))
        self.env_specs = MAEnvSpec(self.observation_spaces, self.action_spaces)
        self.t = 0
        self.payoff = {}

        if self.game_name == 'zero_sum':
            self.payoff[0] = lambda a1, a2: a1 * a2
            self.payoff[1] = lambda a1, a2: -a1 * a2
        elif self.game_name == 'trigonometric':
            self.payoff[0] = lambda a1, a2: np.cos(a2) * a1
            self.payoff[1] = lambda a1, a2: np.sin(a1) * a2
        elif self.game_name == 'mataching_pennies':
            self.payoff[0] = lambda a1, a2: (a1-0.5)*(a2-0.5)
            self.payoff[1] = lambda a1, a2: (a1-0.5)*(a2-0.5)
        elif self.game_name == 'rotational':
            self.payoff[0] = lambda a1, a2: 0.5 * a1 * a1 + 10 * a1 * a2
            self.payoff[1] = lambda a1, a2: 0.5 * a2 * a2 - 10 * a1 * a2
        elif self.game_name == 'wolf':
            def V(alpha, beta, payoff):
                u = payoff[(0, 0)] - payoff[(0, 1)] - payoff[(1, 0)] + payoff[(1, 1)]
                return alpha * beta * u + alpha * (payoff[(0, 1)] - payoff[(1, 1)]) + beta * (
                            payoff[(1, 0)] - payoff[(1, 1)]) + payoff[(1, 1)]

            payoff_0 = np.array([[0, 3], [1, 2]])
            payoff_1 = np.array([[3, 2], [0, 1]])

            self.payoff[0] = lambda a1, a2: V(a1, a2, payoff_0)
            self.payoff[1] = lambda a1, a2: V(a1, a2, payoff_1)
        elif self.game_name == 'ma_softq':
            h1 = 0.8
            h2 = 1.
            s1 = 3.
            s2 = 1.
            x1 = -5.
            x2 = 5.
            y1 = -5.
            y2 = 5.
            c = 10.
            def max_f(a1, a2):
                f1 = h1 * (-(np.square(a1 - x1) / s1) - (np.square(a2 - y1) / s1))
                f2 = h2 * (-(np.square(a1 - x2) / s2) - (np.square(a2 - y2) / s2)) + c
                return max(f1, f2)
            self.payoff[0] = lambda a1, a2: max_f(a1, a2)
            self.payoff[1] = lambda a1, a2: max_f(a1, a2)
        else:
            raise EnvironmentNotFound(f"The game {self.game_name} doesn't exists")

        self.rewards = np.zeros((self.agent_num,))
示例#49
0
    def _precomp(self):
        """
        Precomute the basis functions on a polar Fourier 3D grid

        Gaussian quadrature points and weights are also generated
        in radical and phi dimensions.
        """
        n_r = int(self.ell_max + 1)
        n_theta = int(2 * self.sz[0])
        n_phi = int(self.ell_max + 1)

        r, wt_r = lgwt(n_r, 0.0, self.kcut)
        z, wt_z = lgwt(n_phi, -1, 1)
        r = m_reshape(r, (n_r, 1))
        wt_r = m_reshape(wt_r, (n_r, 1))
        z = m_reshape(z, (n_phi, 1))
        wt_z = m_reshape(wt_z, (n_phi, 1))
        phi = np.arccos(z)
        wt_phi = wt_z
        theta = 2 * pi * np.arange(n_theta).T / (2 * n_theta)
        theta = m_reshape(theta, (n_theta, 1))

        # evaluate basis function in the radial dimension
        radial_wtd = np.zeros(shape=(n_r, np.max(self.k_max),
                                     self.ell_max + 1))
        for ell in range(0, self.ell_max + 1):
            k_max_ell = self.k_max[ell]
            rmat = r * self.r0[0:k_max_ell, ell].T / self.kcut
            radial_ell = np.zeros_like(rmat)
            for ik in range(0, k_max_ell):
                radial_ell[:, ik] = sph_bessel(ell, rmat[:, ik])
            nrm = np.abs(sph_bessel(ell + 1, self.r0[0:k_max_ell, ell].T) / 4)
            radial_ell = radial_ell / nrm
            radial_ell_wtd = r**2 * wt_r * radial_ell
            radial_wtd[:, 0:k_max_ell, ell] = radial_ell_wtd

        # evaluate basis function in the phi dimension
        ang_phi_wtd_even = []
        ang_phi_wtd_odd = []
        for m in range(0, self.ell_max + 1):
            n_even_ell = int(
                np.floor((self.ell_max - m) / 2) + 1 -
                np.mod(self.ell_max, 2) * np.mod(m, 2))
            n_odd_ell = int(self.ell_max - m + 1 - n_even_ell)
            phi_wtd_m_even = np.zeros((n_phi, n_even_ell), dtype=phi.dtype)
            phi_wtd_m_odd = np.zeros((n_phi, n_odd_ell), dtype=phi.dtype)

            ind_even = 0
            ind_odd = 0
            for ell in range(m, self.ell_max + 1):
                phi_m_ell = norm_assoc_legendre(ell, m, z)
                nrm_inv = np.sqrt(0.5 / pi)
                phi_m_ell = nrm_inv * phi_m_ell
                phi_wtd_m_ell = wt_phi * phi_m_ell
                if np.mod(ell, 2) == 0:
                    phi_wtd_m_even[:, ind_even] = phi_wtd_m_ell[:, 0]
                    ind_even = ind_even + 1
                else:
                    phi_wtd_m_odd[:, ind_odd] = phi_wtd_m_ell[:, 0]
                    ind_odd = ind_odd + 1

            ang_phi_wtd_even.append(phi_wtd_m_even)
            ang_phi_wtd_odd.append(phi_wtd_m_odd)

        # evaluate basis function in the theta dimension
        ang_theta = np.zeros((n_theta, 2 * self.ell_max + 1),
                             dtype=theta.dtype)

        ang_theta[:, 0:self.ell_max] = np.sqrt(2) * np.sin(
            theta @ m_reshape(np.arange(self.ell_max, 0, -1),
                              (1, self.ell_max)))
        ang_theta[:, self.ell_max] = np.ones(n_theta, dtype=theta.dtype)
        ang_theta[:,
                  self.ell_max + 1:2 * self.ell_max + 1] = np.sqrt(2) * np.cos(
                      theta @ m_reshape(np.arange(1, self.ell_max + 1),
                                        (1, self.ell_max)))

        ang_theta_wtd = (2 * pi / n_theta) * ang_theta

        theta_grid, phi_grid, r_grid = np.meshgrid(theta,
                                                   phi,
                                                   r,
                                                   sparse=False,
                                                   indexing='ij')
        fourier_x = m_flatten(r_grid * np.cos(theta_grid) * np.sin(phi_grid))
        fourier_y = m_flatten(r_grid * np.sin(theta_grid) * np.sin(phi_grid))
        fourier_z = m_flatten(r_grid * np.cos(phi_grid))
        fourier_pts = 2 * pi * np.vstack(
            (fourier_x[np.newaxis, ...], fourier_y[np.newaxis, ...],
             fourier_z[np.newaxis, ...]))

        return {
            'radial_wtd': radial_wtd,
            'ang_phi_wtd_even': ang_phi_wtd_even,
            'ang_phi_wtd_odd': ang_phi_wtd_odd,
            'ang_theta_wtd': ang_theta_wtd,
            'fourier_pts': fourier_pts
        }
示例#50
0
    def estimate_pose(
                self,
                object_name: str,
                bbox: list,
                pc_sub: PointCloud2
            ):
        """
        Estimates planar pose of detected objects and
        updates the stored pose.

        Parameters
        ----------
        object_name: str
            Name of the object.
        bbox : list
            Contains the coordinates of the bounding box
            of the detected object.
        pc_sub : PointCloud2
            A pointcloud object containing the 3D locations
            in terms of the frame `self.frame_id`
        """

        bbox = np.array(bbox)

        # Compute the center, the mid point of the right
        # and top segment of the bounding box
        c = (bbox[0] + bbox[2]) // 2
        x = (bbox[2] + bbox[3]) // 2
        y = (bbox[0] + bbox[3]) // 2

        points = np.array([c, x, y]).tolist()
        vectors_3D = np.zeros((3, 3))

        try:
            # Get the corresponding 3D location of c, x, y
            for pt_count, dt in enumerate(
                pc2.read_points(
                        pc_sub,
                        field_names={'x', 'y', 'z'},
                        skip_nans=False, uvs=points
                    )
                ):
                # If any point returns nan, return
                if np.any(np.isnan(dt)):
                    if object_name in self.object_pose_info.keys():
                        del self.object_pose_info[object_name]
                    rospy.loginfo('No corresponding 3D point found')
                    return
                else:
                    vectors_3D[pt_count] = dt
                    if pt_count == 2:
                        self.vectors_3D = vectors_3D
        except struct.error as err:
            rospy.loginfo(err)
            return

        try:
            # 3D position of the object
            c_3D = self.vectors_3D[0]

            # Center the vectors to the origin
            x_vec = self.vectors_3D[1] - c_3D
            x_vec /= norm(x_vec)

            y_vec = self.vectors_3D[2] - c_3D
            y_vec /= norm(y_vec)
            # Take the cross product of x and y vector
            # to generate z vector.
            z_vec = np.cross(x_vec, y_vec)
            z_vec /= norm(z_vec)

            # Recompute x vector to make it truly orthognal
            x_vec_orth = np.cross(y_vec, z_vec)
            x_vec_orth /= norm(x_vec_orth)
        except RuntimeWarning as w:
            rospy.loginfo(w)
            return

        if self.viz_pose:
            self.draw_pose(object_name, np.vstack((self.vectors_3D, z_vec)))

        # Compute Euler angles i.e. roll, pitch, yaw
        roll = np.arctan2(y_vec[2], z_vec[2])
        pitch = np.arctan2(-x_vec_orth[2], np.sqrt(1 - x_vec_orth[2]**2))
        # pitch = np.arcsin(-x_vec_orth[2])
        yaw = np.arctan2(x_vec_orth[1], x_vec_orth[0])

        [qx, qy, qz, qw] = self.euler_to_quaternion(roll, pitch, yaw)

        # Generate Pose message.
        pose_msg = Pose()

        pose_msg.position.x = c_3D[0]
        pose_msg.position.y = c_3D[1]
        pose_msg.position.z = c_3D[2]
        # Make sure the quaternion is valid and normalized
        pose_msg.orientation.x = qx
        pose_msg.orientation.y = qy
        pose_msg.orientation.z = qz
        pose_msg.orientation.w = qw

        self.object_pose_info[object_name] = {
                'position': c_3D.tolist(),
                'orientation': [qx, qy, qz, qw]
            }

        return pose_msg
示例#51
0
Iy = cv2.Scharr(gray, cv2.CV_32F, 0, 1)
Ixx = cv2.Scharr(Ix, cv2.CV_32F, 1, 0)
Ixy = cv2.Scharr(Ix, cv2.CV_32F, 0, 1)
Iyy = cv2.Scharr(Iy, cv2.CV_32F, 0, 1)
Iyx = cv2.Scharr(Iy, cv2.CV_32F, 1, 0)
detector = (cv2.GaussianBlur(Ixy,
                             (5, 5), 0) * cv2.GaussianBlur(Iyx, (5, 5), 0) -
            cv2.GaussianBlur(Ixx,
                             (5, 5), 0) * cv2.GaussianBlur(Iyy, (5, 5), 0))
print(type(detector))
slice1 = np.array([1, 5, 8, 6, 2])
slice2 = np.array([1, 5, 8, 6, 2])
print(img[slice1, slice2])
print(np.sort(detector), np.min(detector))

detector_norm = np.zeros(detector.shape, dtype=np.float32)
print(detector_norm)
cv2.normalize(np.abs(detector), detector_norm, 0, 255, cv2.NORM_MINMAX)
print(np.max(detector_norm), "dhjfkljdkljfmdksf,")
points = cv2.goodFeaturesToTrack(detector,
                                 maxCorners=100,
                                 qualityLevel=0.1,
                                 minDistance=5,
                                 blockSize=3)
points_int = points.astype(np.int).reshape((-1, 2))
print(np.where(detector_norm))
cv2.namedWindow("normimage", 0)
cv2.imshow("normimage", detector_norm)
for center in points_int:
    i, j = center
    img[j, i] = [255, 0, 0]
示例#52
0
    def evaluate_t(self, x):
        """
        Evaluate coefficient in FB basis from those in standard 3D coordinate basis

        :param x: The coefficient array in the standard 3D coordinate basis
            to be evaluated. The first three dimensions must equal `self.sz`.
        :return v: The evaluation of the coefficient array `v` in the FB basis.
            This is an array of vectors whose first dimension equals
            `self.count` and whose remaining dimensions correspond to higher
            dimensions of `x`.
        """
        # ensure the first three dimensions with size of self.sz
        x, sz_roll = unroll_dim(x, self.ndim + 1)
        x = m_reshape(x, (self.sz[0], self.sz[1], self.sz[2], -1))

        n_data = np.size(x, 3)
        n_r = np.size(self._precomp['radial_wtd'], 0)
        n_phi = np.size(self._precomp['ang_phi_wtd_even'][0], 0)
        n_theta = np.size(self._precomp['ang_theta_wtd'], 0)

        # resamping x in a polar Fourier gird using nonuniform discrete Fourier transform
        pf = np.zeros((n_theta * n_phi * n_r, n_data), dtype=complex)
        for isample in range(0, n_data):
            pf[..., isample] = nufft3(x[..., isample],
                                      self._precomp['fourier_pts'], self.sz)

        pf = m_reshape(pf, (n_theta, n_phi * n_r * n_data))

        # evaluate the theta parts
        u_even = self._precomp['ang_theta_wtd'].T @ np.real(pf)
        u_odd = self._precomp['ang_theta_wtd'].T @ np.imag(pf)

        u_even = m_reshape(u_even, (2 * self.ell_max + 1, n_phi, n_r, n_data))
        u_odd = m_reshape(u_odd, (2 * self.ell_max + 1, n_phi, n_r, n_data))

        u_even = np.transpose(u_even, (1, 2, 3, 0))
        u_odd = np.transpose(u_odd, (1, 2, 3, 0))

        w_even = np.zeros((int(np.floor(self.ell_max / 2) + 1), n_r,
                           2 * self.ell_max + 1, n_data),
                          dtype=x.dtype)
        w_odd = np.zeros((int(np.ceil(
            self.ell_max / 2)), n_r, 2 * self.ell_max + 1, n_data),
                         dtype=x.dtype)

        # evaluate the phi parts
        for m in range(0, self.ell_max + 1):
            ang_phi_wtd_m_even = self._precomp['ang_phi_wtd_even'][m]
            ang_phi_wtd_m_odd = self._precomp['ang_phi_wtd_odd'][m]

            n_even_ell = np.size(ang_phi_wtd_m_even, 1)
            n_odd_ell = np.size(ang_phi_wtd_m_odd, 1)

            if m == 0:
                sgns = (1, )
            else:
                sgns = (1, -1)

            for sgn in sgns:
                u_m_even = u_even[:, :, :, self.ell_max + sgn * m]
                u_m_odd = u_odd[:, :, :, self.ell_max + sgn * m]

                u_m_even = m_reshape(u_m_even, (n_phi, n_r * n_data))
                u_m_odd = m_reshape(u_m_odd, (n_phi, n_r * n_data))

                w_m_even = ang_phi_wtd_m_even.T @ u_m_even
                w_m_odd = ang_phi_wtd_m_odd.T @ u_m_odd

                w_m_even = m_reshape(w_m_even, (n_even_ell, n_r, n_data))
                w_m_odd = m_reshape(w_m_odd, (n_odd_ell, n_r, n_data))
                end = np.size(w_even, 0)
                w_even[end - n_even_ell:end, :,
                       self.ell_max + sgn * m, :] = w_m_even
                end = np.size(w_odd, 0)
                w_odd[end - n_odd_ell:end, :,
                      self.ell_max + sgn * m, :] = w_m_odd

        w_even = np.transpose(w_even, (1, 2, 3, 0))
        w_odd = np.transpose(w_odd, (1, 2, 3, 0))

        # evaluate the radial parts
        v = np.zeros((self.count, n_data), dtype=x.dtype)
        for ell in range(0, self.ell_max + 1):
            k_max_ell = self.k_max[ell]
            radial_wtd = self._precomp['radial_wtd'][:, 0:k_max_ell, ell]

            if np.mod(ell, 2) == 0:
                v_ell = w_even[:,
                               int(self.ell_max - ell):int(self.ell_max + 1 +
                                                           ell), :,
                               int(ell / 2)]
            else:
                v_ell = w_odd[:,
                              int(self.ell_max - ell):int(self.ell_max + 1 +
                                                          ell), :,
                              int((ell - 1) / 2)]

            v_ell = m_reshape(v_ell, (n_r, (2 * ell + 1) * n_data))

            v_ell = radial_wtd.T @ v_ell

            v_ell = m_reshape(v_ell, (k_max_ell * (2 * ell + 1), n_data))

            # TODO: Fix this to avoid lookup each time.
            ind = self._indices['ells'] == ell
            v[ind, :] = v_ell
        v = roll_dim(v, sz_roll)
        return v
示例#53
0
def main():
  # MAIN -- TRADES + EMCEE
  # READ COMMAND LINE ARGUMENTS
  cli = get_args()

  # STARTING TIME
  start = time.time()

  # RENAME 
  working_path = cli.full_path
  nthreads=cli.nthreads
  np.random.RandomState(cli.seed)
  
  # INITIALISE TRADES WITH SUBROUTINE WITHIN TRADES_LIB -> PARAMETER NAMES, MINMAX, INTEGRATION ARGS, READ DATA ...
  pytrades_lib.pytrades.initialize_trades(working_path, cli.sub_folder, nthreads)

  # RETRIEVE DATA AND VARIABLES FROM TRADES_LIB MODULE
  
  #global n_bodies, n_planets, ndata, npar, nfit, dof, inv_dof
  n_bodies = pytrades_lib.pytrades.n_bodies # NUMBER OF TOTAL BODIES OF THE SYSTEM
  n_planets = n_bodies - 1 # NUMBER OF PLANETS IN THE SYSTEM
  ndata = pytrades_lib.pytrades.ndata # TOTAL NUMBER OF DATA AVAILABLE
  npar  = pytrades_lib.pytrades.npar # NUMBER OF TOTAL PARAMATERS ~n_planets X 6
  nfit  = pytrades_lib.pytrades.nfit # NUMBER OF PARAMETERS TO FIT
  nfree  = pytrades_lib.pytrades.nfree # NUMBER OF FREE PARAMETERS (ie nrvset)
  dof   = pytrades_lib.pytrades.dof # NUMBER OF DEGREES OF FREEDOM = NDATA - NFIT
  global inv_dof
  #inv_dof = np.float64(1.0 / dof)
  inv_dof = pytrades_lib.pytrades.inv_dof

  # READ THE NAMES OF THE PARAMETERS FROM THE TRADES_LIB AND CONVERT IT TO PYTHON STRINGS
  #reshaped_names = pytrades_lib.pytrades.parameter_names.reshape((10,nfit), order='F').T
  #parameter_names = [''.join(reshaped_names[i,:]).strip() for i in range(0,nfit)]
  
  #parameter_names = anc.convert_fortran2python_strarray(pytrades_lib.pytrades.parameter_names, nfit, str_len=10)
  #trades_names = anc.convert_fortran2python_strarray(pytrades_lib.pytrades.parameter_names,
                                                     #nfit, str_len=10
                                                    #)
  ##parameter_names = anc.trades_names_to_emcee(trades_names)
  str_len = pytrades_lib.pytrades.str_len
  temp_names = pytrades_lib.pytrades.get_parameter_names(nfit,str_len)
  trades_names = anc.convert_fortran_charray2python_strararray(temp_names)
  parameter_names = trades_names
  
  
  if(cli.trades_previous is not None):
    temp_names, trades_parameters = anc.read_fitted_file(cli.trades_previous)
    if(nfit != np.shape(trades_parameters)[0]):
      anc.print_both(' NUMBER OF PARAMETERS (%d) IN TRADES-PREVIOUS FILE DOES NOT' \
                 'MATCH THE CURRENT CONFIGURATION nfit=%d\nSTOP' \
                 %(np.shape(trades_parameters)[0], nfit)
                )
      sys.exit()
    del temp_names
  else:
    # INITIAL PARAMETER SET (NEEDED ONLY TO HAVE THE PROPER ARRAY/VECTOR)
    #fitting_parameters = pytrades_lib.pytrades.fitting_parameters
    trades_parameters = pytrades_lib.pytrades.fitting_parameters
  
  # save initial_fitting parameters into array  
  original_fit_parameters = trades_parameters.copy()
  #fitting_parameters = anc.e_to_sqrte_fitting(trades_parameters, trades_names)
  fitting_parameters = trades_parameters
  
  trades_minmax = pytrades_lib.pytrades.parameters_minmax # PARAMETER BOUNDARIES
  parameters_minmax = trades_minmax.copy()
  #parameters_minmax[:,0] = anc.e_to_sqrte_fitting(parameters_minmax[:,0], trades_names)
  #parameters_minmax[:,1] = anc.e_to_sqrte_fitting(parameters_minmax[:,1], trades_names)

  # RADIAL VELOCITIES SET
  n_rv = pytrades_lib.pytrades.nrv
  n_set_rv = pytrades_lib.pytrades.nrvset

  # TRANSITS SET
  n_t0 = pytrades_lib.pytrades.nt0
  #n_t0_sum = np.sum(n_t0)
  n_t0_sum = pytrades_lib.pytrades.ntts
  n_set_t0 = 0
  for i in range(0, n_bodies):
    #if (np.sum(n_t0[i]) > 0): n_set_t0 += 1
    if (n_t0[i] > 0): n_set_t0 += 1

  # compute global constant for the loglhd
  global ln_err_const

  #try:
    ## fortran variable RV in python will be rv!!!
    #e_RVo = np.array(pytrades_lib.pytrades.ervobs[:], dtype=np.float64)
  #except:
    #e_RVo = np.array([0.], dtype=np.float64)
  #try:
    #e_T0o = np.array(pytrades_lib.pytrades.et0obs[:,:], dtype=np.float64).reshape((-1))
  #except:
    #e_T0o = np.array([0.], dtype=np.float64)
  #ln_err_const = anc.compute_ln_err_const(dof, e_RVo, e_T0o, cli.ln_flag)
  ln_err_const = pytrades_lib.pytrades.ln_err_const

  # SET EMCEE PARAMETERS:
  nwalkers, nruns, nsave, npost = get_emcee_arguments(cli,nfit)

  # INITIALISE SCRIPT FOLDER/LOG FILE
  working_folder, run_log, of_run = init_folder(working_path, cli.sub_folder)

  anc.print_both('',of_run)
  anc.print_both(' ======== ',of_run)
  anc.print_both(' pyTRADES' ,of_run)
  anc.print_both(' ======== ',of_run)
  anc.print_both('',of_run)
  anc.print_both(' WORKING PATH = %s' %(working_path),of_run)
  anc.print_both(' NUMBER OF THREADS = %d' %(nthreads),of_run)
  anc.print_both(' dof = ndata(%d) - nfit(%d) - nfree(%d) = %d' %(ndata, nfit, nfree, dof),of_run)
  anc.print_both(' Total N_RV = %d for %d set(s)' %(n_rv, n_set_rv),of_run)
  anc.print_both(' Total N_T0 = %d for %d out of %d planet(s)' %(n_t0_sum, n_set_t0, n_planets),of_run)
  anc.print_both(' %s = %.7f' %('log constant error = ', ln_err_const),of_run)
  anc.print_both(' %s = %.7f' %('IN FORTRAN log constant error = ', pytrades_lib.pytrades.ln_err_const),of_run)
  anc.print_both(' seed = %s' %(str(cli.seed)), of_run)

  if(cli.trades_previous is not None):
    anc.print_both('\n ******\n INITIAL FITTING PARAMETERS FROM PREVIOUS' \
              ' TRADES-EMCEE SIM IN FILE:\n %s\n ******\n' %(cli.trades_previous),
              of_run
              )
    
  anc.print_both(' ORIGINAL PARAMETER VALUES -> 0000', of_run)
  fitness_0000, lgllhd_0000, check_0000 = pytrades_lib.pytrades.write_summary_files(0, original_fit_parameters)
  anc.print_both(' ', of_run)
  #anc.print_both(' TESTING LNPROB_SQ ...', of_run)
  
  lgllhd_zero = lnprob(trades_parameters)
  #lgllhd_sq_zero = lnprob(fitting_parameters, parameter_names)

  anc.print_both(' ', of_run)
  anc.print_both(' %15s %23s %23s' %('trades_names', 'original_trades', 'trades_par'), of_run)
  for ifit in range(0, nfit):
    anc.print_both(' %15s %23.16e %23.16e' %(trades_names[ifit], original_fit_parameters[ifit], trades_parameters[ifit]), of_run)
  anc.print_both(' ', of_run)
  anc.print_both(' %15s %23.16e %23.16e' %('lnprob', lgllhd_0000,lgllhd_zero), of_run)
  anc.print_both(' ', of_run)
  
  # INITIALISES THE WALKERS
  if(cli.emcee_previous is not None):
    anc.print_both(' Use a previous emcee simulation: %s' %(cli.emcee_previous), of_run)
    last_p0, old_nwalkers, last_done = anc.get_last_emcee_iteration(cli.emcee_previous, nwalkers)
    if(not last_done):
      anc.print_both('**STOP: USING A DIFFERENT NUMBER OF WALKERS (%d) W.R.T. PREVIOUS EMCEE SIMULATION (%d).' %(nwalkers, old_nwalkers), of_run)
      sys.exit()
    p0 = last_p0
  else:
    p0 = compute_initial_walkers(nfit, nwalkers, fitting_parameters, parameters_minmax, parameter_names, cli.delta_sigma, of_run)

  anc.print_both(' emcee chain: nwalkers = %d nruns = %d' %(nwalkers, nruns), of_run)
  anc.print_both(' sampler ... ',of_run)
  
  # old version with threads
  #sampler = emcee.EnsembleSampler(nwalkers, nfit, lnprob, threads=nthreads)
  #sampler = emcee.EnsembleSampler(nwalkers, nfit, lnprob_sq, threads=nthreads, args=[parameter_names])
  
  threads_pool = emcee.interruptible_pool.InterruptiblePool(nthreads)
  sampler = emcee.EnsembleSampler(nwalkers, nfit, lnprob, pool=threads_pool)
  
  anc.print_both(' ready to go', of_run)
  anc.print_both(' with nsave = %s' %(str(nsave)), of_run)
  sys.stdout.flush()

  #sys.exit()

  if (nsave != False):
    # save temporary sampling during emcee every nruns*10%
    #if(os.path.exists(os.path.join(working_folder, 'emcee_temp.hdf5')) and os.path.isfile(os.path.join(working_folder, 'emcee_temp.hdf5'))):
      #os.remove(os.path.join(working_folder, 'emcee_temp.hdf5'))
    if(os.path.exists(os.path.join(working_folder, 'emcee_summary.hdf5')) and os.path.isfile(os.path.join(working_folder, 'emcee_summary.hdf5'))):
      os.remove(os.path.join(working_folder, 'emcee_summary.hdf5'))
    f_hdf5 = h5py.File(os.path.join(working_folder, 'emcee_summary.hdf5'), 'a')
    f_hdf5.create_dataset('parameter_names', data=parameter_names, dtype='S10')
    f_hdf5.create_dataset('boundaries', data=parameters_minmax, dtype=np.float64)
    temp_dset = f_hdf5.create_dataset('chains', (nwalkers, nruns, nfit), dtype=np.float64)
    f_hdf5['chains'].attrs['nwalkers'] = nwalkers
    f_hdf5['chains'].attrs['nruns'] = nruns
    f_hdf5['chains'].attrs['nfit'] = nfit
    f_hdf5['chains'].attrs['nfree'] = nfree
    temp_lnprob = f_hdf5.create_dataset('lnprobability', (nwalkers, nruns), dtype=np.float64)
    temp_lnprob.attrs['ln_err_const'] = ln_err_const
    temp_acceptance = f_hdf5.create_dataset('acceptance_fraction', data=np.zeros((nfit)), dtype=np.float64)
    temp_acor = f_hdf5.create_dataset('autocor_time', data=np.zeros((nfit)), dtype=np.float64)
    f_hdf5.close()
    pos = p0
    nchains = int(nruns/nsave)
    state=None
    anc.print_both(' Running emcee with temporary saving', of_run)
    sys.stdout.flush()
    for i in range(0, nchains):
      anc.print_both('', of_run)
      anc.print_both(' iter: %6d ' %(i+1), of_run)
      aaa = i*nsave
      bbb = aaa+nsave
      pos, prob, state = sampler.run_mcmc(pos, N=nsave, rstate0=state)
      anc.print_both('completed %d steps of %d' %(bbb, nruns), of_run)
      f_hdf5 = h5py.File(os.path.join(working_folder, 'emcee_summary.hdf5'), 'a')
      temp_dset = f_hdf5['chains'] #[:,:,:]
      temp_dset[:,aaa:bbb,:] = sampler.chain[:, aaa:bbb, :]
      temp_dset.attrs['completed_steps'] = bbb

      temp_lnprob = f_hdf5['lnprobability'] #[:,:]
      temp_lnprob[:, aaa:bbb] = sampler.lnprobability[:, aaa:bbb]
      shape_lnprob = sampler.lnprobability.shape
      
      acceptance_fraction = sampler.acceptance_fraction
      temp_acceptance = f_hdf5['acceptance_fraction']
      temp_acceptance = acceptance_fraction
      #f_hdf5.create_dataset('acceptance_fraction', data=acceptance_fraction, dtype=np.float64)
      mean_acceptance_fraction = np.mean(acceptance_fraction)
    
      #temp_chains_T = np.zeros((bbb, nwalkers, nfit))
      #for ifit in range(0,nfit):
        #temp_chains_T[:,:,ifit] = sampler.chain[:, :bbb, ifit].T
      #acor_time = anc.compute_autocor_time(temp_chains_T, walkers_transposed=True)
      acor_time = anc.compute_acor_time(sampler, steps_done=bbb)
      temp_acor = f_hdf5['autocor_time']
      temp_acor[...] = acor_time
      
      #f_hdf5.create_dataset('autocor_time', data=np.array(acor_temp, dtype=np.float64), dtype=np.float64)
      #f_hdf5.create_dataset('autocor_time', data=np.array(sampler.acor, dtype=np.float64), dtype=np.float64) # not working
      #print 'aaa = %6d bbb = %6d -> sampler.lnprobability.shape = (%6d , %6d)' %(aaa, bbb, shape_lnprob[0], shape_lnprob[1])
      f_hdf5.close()
      sys.stdout.flush()
    anc.print_both('', of_run)
    anc.print_both('...done with saving temporary total shape = %s' %(str(np.shape(sampler.chain))), of_run)
    anc.print_both('', of_run)
    sys.stdout.flush()

  # RUN EMCEE AND RESET AFTER REMOVE BURN-IN
  #pos, prob, state = sampler.run_mcmc(p0, npost)
  #sampler.reset()
  #sampler.run_mcmc(pos, nruns, rstate0=state)
  else:
    # GOOD COMPLETE SINGLE RUNNING OF EMCEE, WITHOUT REMOVING THE BURN-IN
    anc.print_both(' Running full emcee ...', of_run)
    sys.stdout.flush()
    sampler.run_mcmc(p0, nruns)
    anc.print_both('done', of_run)
    anc.print_both('', of_run)
    sys.stdout.flush()
    flatchains = sampler.chain[:, :, :].reshape((nwalkers*nruns, nfit)) # full chain values
    acceptance_fraction = sampler.acceptance_fraction
    mean_acceptance_fraction = np.mean(acceptance_fraction)
    #autocor_time = sampler.acor
    temp_chains_T = np.zeros((bbb, nwalkers, nfit))
    for ifit in range(0,nfit):
      temp_chains_T[:,:,ifit] = sampler.chain[:, :, ifit].T
    #acor_time = anc.compute_autocor_time(temp_chains_T, walkers_transposed=True)
    acor_time = anc.compute_acor_time(sampler)
    lnprobability = sampler.lnprobability
    # save chains with original shape as hdf5 file
    f_hdf5 = h5py.File(os.path.join(working_folder, 'emcee_summary.hdf5'), 'w')
    f_hdf5.create_dataset('chains', data=sampler.chain, dtype=np.float64)
    f_hdf5['chains'].attrs['nwalkers'] = nwalkers
    f_hdf5['chains'].attrs['nruns'] = nruns
    f_hdf5['chains'].attrs['nfit'] = nfit
    f_hdf5['chains'].attrs['nfree'] = nfree
    f_hdf5['chains'].attrs['completed_steps'] = nruns
    f_hdf5.create_dataset('parameter_names', data=parameter_names, dtype='S10')
    f_hdf5.create_dataset('boundaries', data=parameters_minmax, dtype=np.float64)
    f_hdf5.create_dataset('acceptance_fraction', data=acceptance_fraction, dtype=np.float64)
    f_hdf5.create_dataset('autocor_time', data=acor_time, dtype=np.float64)
    f_hdf5.create_dataset('lnprobability', data=lnprobability, dtype=np.float64)
    f_hdf5['lnprobability'].attrs['ln_err_const'] = ln_err_const
    f_hdf5.close()

  anc.print_both(" Mean_acceptance_fraction should be between [0.25-0.5] = %.6f" %(mean_acceptance_fraction), of_run)
  anc.print_both('', of_run)

  # close the pool of threads
  threads_pool.close()
  threads_pool.terminate()
  threads_pool.join()

  anc.print_both('COMPLETED EMCEE', of_run)

  elapsed = time.time() - start
  elapsed_d, elapsed_h, elapsed_m, elapsed_s = anc.computation_time(elapsed)

  anc.print_both('', of_run)
  anc.print_both(' pyTRADES: EMCEE FINISHED in %2d day %02d hour %02d min %.2f sec - bye bye' %(int(elapsed_d), int(elapsed_h), int(elapsed_m), elapsed_s), of_run)
  anc.print_both('', of_run)
  of_run.close()
  pytrades_lib.pytrades.deallocate_variables()

  return
示例#54
0
def compute_similarity_matrix(text1, text2):
    distance_matrix = np.zeros([len(text1), len(text2)])
    for i, w1 in enumerate(text1):
        for j, w2 in enumerate(text2):
            distance_matrix[i, j] = word_similarity(w1, w2)
    return distance_matrix
示例#55
0
def test_mesh_construction_pygmsh():

    pygmsh = pytest.importorskip("pygmsh")

    if MPI.rank(MPI.comm_world) == 0:
        geom = pygmsh.opencascade.Geometry()
        geom.add_ball([0.0, 0.0, 0.0], 1.0, char_length=0.2)
        pygmsh_mesh = pygmsh.generate_mesh(geom)
        points, cells = pygmsh_mesh.points, pygmsh_mesh.cells
    else:
        points = np.zeros([0, 3])
        cells = {
            "tetra": np.zeros([0, 4], dtype=np.int64),
            "triangle": np.zeros([0, 3], dtype=np.int64),
            "line": np.zeros([0, 2], dtype=np.int64)
        }

    mesh = Mesh(MPI.comm_world, dolfinx.cpp.mesh.CellType.tetrahedron, points,
                cells['tetra'], [], cpp.mesh.GhostMode.none)
    assert mesh.degree() == 1
    assert mesh.geometry.dim == 3
    assert mesh.topology.dim == 3

    mesh = Mesh(MPI.comm_world,
                dolfinx.cpp.mesh.CellType.triangle, points,
                cells['triangle'], [], cpp.mesh.GhostMode.none)
    assert mesh.degree() == 1
    assert mesh.geometry.dim == 3
    assert mesh.topology.dim == 2

    mesh = Mesh(MPI.comm_world,
                dolfinx.cpp.mesh.CellType.interval, points,
                cells['line'], [], cpp.mesh.GhostMode.none)
    assert mesh.degree() == 1
    assert mesh.geometry.dim == 3
    assert mesh.topology.dim == 1

    if MPI.rank(MPI.comm_world) == 0:
        print("Generate mesh")
        geom = pygmsh.opencascade.Geometry()
        geom.add_ball([0.0, 0.0, 0.0], 1.0, char_length=0.2)
        pygmsh_mesh = pygmsh.generate_mesh(
            geom, extra_gmsh_arguments=['-order', '2'])
        points, cells = pygmsh_mesh.points, pygmsh_mesh.cells
        print("End Generate mesh", cells.keys())
    else:
        points = np.zeros([0, 3])
        cells = {
            "tetra10": np.zeros([0, 10], dtype=np.int64),
            "triangle6": np.zeros([0, 6], dtype=np.int64),
            "line3": np.zeros([0, 3], dtype=np.int64)
        }

    mesh = Mesh(MPI.comm_world, dolfinx.cpp.mesh.CellType.tetrahedron, points,
                cells['tetra10'], [], cpp.mesh.GhostMode.none)
    assert mesh.degree() == 2
    assert mesh.geometry.dim == 3
    assert mesh.topology.dim == 3

    mesh = Mesh(MPI.comm_world, dolfinx.cpp.mesh.CellType.triangle, points,
                cells['triangle6'], [], cpp.mesh.GhostMode.none)
    assert mesh.degree() == 2
    assert mesh.geometry.dim == 3
    assert mesh.topology.dim == 2
def beat_extraction(short_features, window_size, plot=False):
    """
    This function extracts an estimate of the beat rate for a musical signal.
    ARGUMENTS:
     - short_features:     a np array (n_feats x numOfShortTermWindows)
     - window_size:        window size in seconds
    RETURNS:
     - bpm:            estimates of beats per minute
     - ratio:          a confidence measure
    """

    # Features that are related to the beat tracking task:
    selected_features = [0, 1, 3, 4, 5, 6, 7, 8, 9, 10,
                         11, 12, 13, 14, 15, 16, 17, 18]

    max_beat_time = int(round(2.0 / window_size))
    hist_all = np.zeros((max_beat_time,))
    # for each feature
    for ii, i in enumerate(selected_features):
        # dif threshold (3 x Mean of Difs)
        dif_threshold = 2.0 * (np.abs(short_features[i, 0:-1] -
                                      short_features[i, 1::])).mean()
        if dif_threshold <= 0:
            dif_threshold = 0.0000000000000001
        # detect local maxima
        [pos1, _] = utilities.peakdet(short_features[i, :], dif_threshold)
        position_diffs = []
        # compute histograms of local maxima changes
        for j in range(len(pos1)-1):
            position_diffs.append(pos1[j+1]-pos1[j])
        histogram_times, histogram_edges = \
            np.histogram(position_diffs, np.arange(0.5, max_beat_time + 1.5))
        hist_centers = (histogram_edges[0:-1] + histogram_edges[1::]) / 2.0
        histogram_times = \
            histogram_times.astype(float) / short_features.shape[1]
        hist_all += histogram_times
        if plot:
            plt.subplot(9, 2, ii + 1)
            plt.plot(short_features[i, :], 'k')
            for k in pos1:
                plt.plot(k, short_features[i, k], 'k*')
            f1 = plt.gca()
            f1.axes.get_xaxis().set_ticks([])
            f1.axes.get_yaxis().set_ticks([])

    if plot:
        plt.show(block=False)
        plt.figure()

    # Get beat as the argmax of the agregated histogram:
    max_indices = np.argmax(hist_all)
    bpms = 60 / (hist_centers * window_size)
    bpm = bpms[max_indices]
    # ... and the beat ratio:
    ratio = hist_all[max_indices] / (hist_all.sum() + eps)

    if plot:
        # filter out >500 beats from plotting:
        hist_all = hist_all[bpms < 500]
        bpms = bpms[bpms < 500]

        plt.plot(bpms, hist_all, 'k')
        plt.xlabel('Beats per minute')
        plt.ylabel('Freq Count')
        plt.show(block=True)

    return bpm, ratio
示例#57
0
    return mag
## change these parameters for a smaller (faster) simulation 
nt      = 88         #  number of temperature points
N       = 16         #  size of the lattice, N x N
eqSteps = 1024       #  number of MC sweeps for equilibration
mcSteps = 1024       #  number of MC sweeps for calculation

FileEnergy = "Data_Energy_fast.txt"
FileMagnet = "Data_Magnet_fast.txt"
FileC = "Data_SpHeat_fast.txt"
FileX = "Data_Suscep_fast.txt"
FileFigure = "Fig_total_fast.png"


T       = np.linspace(1.5, 3.3, nt); 
E,M,C,X = np.zeros(nt), np.zeros(nt), np.zeros(nt), np.zeros(nt)
iNSite = 1.0/(N*N)
# divide by number of samples, and by system size to get intensive values
#----------------------------------------------------------------------
#  MAIN PART OF THE CODE
#----------------------------------------------------------------------
for tt in range(nt):
    E1 = M1 = E2 = M2 = 0
    config = initialstate(N)
    iT=1.0/T[tt]; iT2=iT*iT;
    
    P_2D = np.zeros(2)
    P_2D[0] = np.exp(-4*iT)
    P_2D[1] = P_2D[0] * P_2D[0]
    
    
def fit_to_size(matrix, shape):
    res = np.zeros(shape)
    slices = [slice(0,min(dim,shape[e])) for e, dim in enumerate(matrix.shape)]
    res[slices] = matrix[slices]
    return res
示例#59
0
    def to_scrip(self, scripFileName):  # {{{
        '''
        Given an MPAS mesh file, create a SCRIP file based on the mesh.

        Parameters
        ----------
        scripFileName : str
            The path to which the SCRIP file should be written
        '''
        # Authors
        # -------
        # Xylar Asay-Davis

        self.scripFileName = scripFileName

        inFile = netCDF4.Dataset(self.fileName, 'r')
        outFile = netCDF4.Dataset(scripFileName, 'w')

        # Get info from input file
        latCell = inFile.variables['latCell'][:]
        lonCell = inFile.variables['lonCell'][:]
        latVertex = inFile.variables['latVertex'][:]
        lonVertex = inFile.variables['lonVertex'][:]
        verticesOnCell = inFile.variables['verticesOnCell'][:]
        nEdgesOnCell = inFile.variables['nEdgesOnCell'][:]
        nCells = len(inFile.dimensions['nCells'])
        maxVertices = len(inFile.dimensions['maxEdges'])
        areaCell = inFile.variables['areaCell'][:]
        sphereRadius = float(inFile.sphere_radius)

        _create_scrip(outFile,
                      grid_size=nCells,
                      grid_corners=maxVertices,
                      grid_rank=1,
                      units='radians',
                      meshName=self.meshName)

        grid_area = outFile.createVariable('grid_area', 'f8', ('grid_size', ))
        grid_area.units = 'radian^2'
        # SCRIP uses square radians
        grid_area[:] = areaCell[:] / (sphereRadius**2)

        outFile.variables['grid_center_lat'][:] = latCell[:]
        outFile.variables['grid_center_lon'][:] = lonCell[:]
        outFile.variables['grid_dims'][:] = nCells
        outFile.variables['grid_imask'][:] = 1

        # grid corners:
        grid_corner_lon = numpy.zeros((nCells, maxVertices))
        grid_corner_lat = numpy.zeros((nCells, maxVertices))
        for iVertex in range(maxVertices):
            cellIndices = numpy.arange(nCells)
            # repeat the last vertex wherever iVertex > nEdgesOnCell
            localVertexIndices = numpy.minimum(nEdgesOnCell - 1, iVertex)
            vertexIndices = verticesOnCell[cellIndices, localVertexIndices] - 1
            grid_corner_lat[cellIndices, iVertex] = latVertex[vertexIndices]
            grid_corner_lon[cellIndices, iVertex] = lonVertex[vertexIndices]

        outFile.variables['grid_corner_lat'][:] = grid_corner_lat[:]
        outFile.variables['grid_corner_lon'][:] = grid_corner_lon[:]

        # Update history attribute of netCDF file
        if hasattr(inFile, 'history'):
            newhist = '\n'.join(
                [getattr(inFile, 'history'), ' '.join(sys.argv[:])])
        else:
            newhist = sys.argv[:]
        setattr(outFile, 'history', newhist)

        inFile.close()
        outFile.close()  # }}}
 
                
             batch_x_val = batch_x_val.cpu().data.numpy()
           
             batch_y_val = batch_y_val.cpu().data.numpy() 
             output_val = output_val.cpu().data.numpy()            
             output_val = np.moveaxis(output_val, 1, -1)       
             seg_val = np.argmax(output_val[0], axis=-1)  
               
             input_3D = batch_x_val[0][0]
             seed_3D = batch_x_val[0][1]
             truth_3D = batch_y_val[0]
             seg_3D = seg_val
             intersect = truth_3D + seg_3D
            
             combined = np.zeros(np.shape(seg_3D))
            
             combined[truth_3D > 0] = 1
             combined[seg_3D > 0] = 2
             combined[intersect > 1] = 3
            
             # plt.figure();
             # ma = np.amax(combined, axis=0)
             # plt.imshow(ma, cmap='magma')
            
            
            
             """ Get sklearn metric """
             from sklearn.metrics import jaccard_score
             #jacc_new = jaccard_score(truth_3D.flatten(), seg_3D.flatten())