コード例 #1
0
ファイル: Util.py プロジェクト: Dhruva6/889e-HW1
def generate_test_states(data, scaler):
    """
        Function that returns the next (s) pair from the input data one by one every time you call it. Requires a 
        scaler to have been computed so that we can approximate the vaues for the 'NA' pairs in the data.
    """
    # Get the known states
    known_states = [state for state in get_known_states(data)]

    event_length = 9
    state_length = 9 
    num_states = 1
    test_s = []
    for episode in data:
        # Start at the beginning and keep looking at a net length of len(s)  points
        # Each time, we increment our start position by s = 9 points
        curr_state = 0
        while curr_state < num_states:
            start_idx = curr_state * state_length
            end_idx = start_idx + event_length
            datum = episode[start_idx:end_idx]
            # If its normal data without 'NA', proceed as before except we 'scale' the values to mean-0 and variance-1
            try:
                s = np.array(datum[:9].astype(np.float))
                s = scaler.transform(s)
                test_s.append(s)
            # IF there was a value error it means there was a 'NA' field somewhere. 
            except ValueError:
                # ONLY S AND S' have these 'NA' fields (I've confirmed). Therefore we go through them and replace any
                # fields that have 'NA' with the mean of the corresponding feature, and then apply the scaler.
#                s = np.array([elem if elem!='NA' else scaler.mean_[i].astype(np.float) for i, elem in enumerate(datum[:9])]).astype(np.float)
                s = np.array([elem if elem!='NA' else 0.0 for i, elem in enumerate(datum[:9])]).astype(np.float)
                s = scaler.transform(s)
                test_s.append(s)
            curr_state += 1
    return test_s
コード例 #2
0
ファイル: training.py プロジェクト: JiaoFusen/csdnSMP
def lstm(trainData, trainMark, testData, embedding_dim, embedding_matrix, maxlen, output_len):
    # 填充数据,将每个序列长度保持一致
    trainData = list(sequence.pad_sequences(trainData, maxlen=maxlen,
                                            dtype='float64'))  # sequence返回的是一个numpy数组,pad_sequences用于填充指定长度的序列,长则阶段,短则补0,由于下面序号为0时,对应值也为0,因此可以这样
    testData = list(sequence.pad_sequences(testData, maxlen=maxlen,
                                           dtype='float64'))  # sequence返回的是一个numpy数组,pad_sequences用于填充指定长度的序列,长则阶段,短则补0

    # 建立lstm神经网络模型
    model = Sequential()  # 多个网络层的线性堆叠,可以通过传递一个layer的list来构造该模型,也可以通过.add()方法一个个的加上层
    # model.add(Dense(256, input_shape=(train_total_vova_len,)))   #使用全连接的输入层
    model.add(Embedding(len(embedding_matrix), embedding_dim, weights=[embedding_matrix], mask_zero=False,
                        input_length=maxlen))  # 指定输入层,将高维的one-hot转成低维的embedding表示,第一个参数大或等于0的整数,输入数据最大下标+1,第二个参数大于0的整数,代表全连接嵌入的维度
    # lstm层,也是比较核心的层
    model.add(LSTM(256))  # 256对应Embedding输出维度,128是输入维度可以推导出来
    model.add(Dropout(0.5))  # 每次在参数更新的时候以一定的几率断开层的链接,用于防止过拟合
    model.add(Dense(output_len))  # 全连接,这里用于输出层,1代表输出层维度,128代表LSTM层维度可以自行推导出来
    model.add(Activation('softmax'))  # 输出用sigmoid激活函数
    # 编译该模型,categorical_crossentropy(亦称作对数损失,logloss),adam是一种优化器,class_mode表示分类模式
    model.compile(loss='categorical_crossentropy', optimizer='sgd')

    # 正式运行该模型,我知道为什么了,因为没有补0!!每个array的长度是不一样的,因此才会报错
    X = np.array(list(trainData))  # 输入数据
    print("X:", X)
    Y = np.array(list(trainMark))  # 标签
    print("Y:", Y)
    # batch_size:整数,指定进行梯度下降时每个batch包含的样本数
    # nb_epoch:整数,训练的轮数,训练数据将会被遍历nb_epoch次
    model.fit(X, Y, batch_size=200, nb_epoch=10)  # 该函数的X、Y应该是多个输入:numpy list(其中每个元素为numpy.array),单个输入:numpy.array

    # 进行预测
    A = np.array(list(testData))  # 输入数据
    print("A:", A)
    classes = model.predict(A)  # 这个是预测的数据
    return classes
コード例 #3
0
ファイル: test_neighbors.py プロジェクト: 93sam/scikit-learn
def test_kneighbors_classifier_predict_proba():
    """Test KNeighborsClassifier.predict_proba() method"""
    X = np.array([[0, 2, 0],
                  [0, 2, 1],
                  [2, 0, 0],
                  [2, 2, 0],
                  [0, 0, 2],
                  [0, 0, 1]])
    y = np.array([4, 4, 5, 5, 1, 1])
    cls = neighbors.KNeighborsClassifier(n_neighbors=3, p=1)  # cityblock dist
    cls.fit(X, y)
    y_prob = cls.predict_proba(X)
    real_prob = np.array([[0, 2. / 3, 1. / 3],
                          [1. / 3, 2. / 3, 0],
                          [1. / 3, 0, 2. / 3],
                          [0, 1. / 3, 2. / 3],
                          [2. / 3, 1. / 3, 0],
                          [2. / 3, 1. / 3, 0]])
    assert_array_equal(real_prob, y_prob)
    # Check that it also works with non integer labels
    cls.fit(X, y.astype(str))
    y_prob = cls.predict_proba(X)
    assert_array_equal(real_prob, y_prob)
    # Check that it works with weights='distance'
    cls = neighbors.KNeighborsClassifier(
        n_neighbors=2, p=1, weights='distance')
    cls.fit(X, y)
    y_prob = cls.predict_proba(np.array([[0, 2, 0], [2, 2, 2]]))
    real_prob = np.array([[0, 1, 0], [0, 0.4, 0.6]])
    assert_array_almost_equal(real_prob, y_prob)
コード例 #4
0
def test_partial_dependence_helpers(est, method, target_feature):
    # Check that what is returned by _partial_dependence_brute or
    # _partial_dependence_recursion is equivalent to manually setting a target
    # feature to a given value, and computing the average prediction over all
    # samples.
    # This also checks that the brute and recursion methods give the same
    # output.

    X, y = make_regression(random_state=0)
    # The 'init' estimator for GBDT (here the average prediction) isn't taken
    # into account with the recursion method, for technical reasons. We set
    # the mean to 0 to that this 'bug' doesn't have any effect.
    y = y - y.mean()
    est.fit(X, y)

    # target feature will be set to .5 and then to 123
    features = np.array([target_feature], dtype=np.int32)
    grid = np.array([[.5],
                     [123]])

    if method == 'brute':
        pdp = _partial_dependence_brute(est, grid, features, X,
                                        response_method='auto')
    else:
        pdp = _partial_dependence_recursion(est, grid, features)

    mean_predictions = []
    for val in (.5, 123):
        X_ = X.copy()
        X_[:, target_feature] = val
        mean_predictions.append(est.predict(X_).mean())

    pdp = pdp[0]  # (shape is (1, 2) so make it (2,))
    assert_allclose(pdp, mean_predictions, atol=1e-3)
コード例 #5
0
ファイル: test_neighbors.py プロジェクト: 93sam/scikit-learn
def test_RadiusNeighborsRegressor_multioutput_with_uniform_weight():
    """Test radius neighbors in multi-output regression (uniform weight)"""

    rng = check_random_state(0)
    n_features = 5
    n_samples = 40
    n_output = 4

    X = rng.rand(n_samples, n_features)
    y = rng.rand(n_samples, n_output)
    X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)

    for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):

        rnn = neighbors. RadiusNeighborsRegressor(weights=weights,
                                                  algorithm=algorithm)
        rnn.fit(X_train, y_train)

        neigh_idx = rnn.radius_neighbors(X_test, return_distance=False)
        y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
                               for idx in neigh_idx])

        y_pred_idx = np.array(y_pred_idx)
        y_pred = rnn.predict(X_test)

        assert_equal(y_pred_idx.shape, y_test.shape)
        assert_equal(y_pred.shape, y_test.shape)
        assert_array_almost_equal(y_pred, y_pred_idx)
コード例 #6
0
  def __init__(self):
    """
    Setup tri33 cell.
    """
    vertices = numpy.array([[-1.0, -1.0],
                            [+1.0, -1.0],
                            [-1.0, +1.0]])
    quadPts = vertices[:]
    quadWts = numpy.array( [2.0/3.0, 2.0/3.0, 2.0/3.0])

    # Compute basis fns and derivatives at quadrature points
    basis = numpy.zeros( (3, 3), dtype=numpy.float64)
    basisDeriv = numpy.zeros( (3, 3, 2), dtype=numpy.float64)
    iQuad = 0
    for q in quadPts:
      basis[iQuad] = numpy.array([self.N0(q), self.N1(q), self.N2(q)],
                                 dtype=numpy.float64).reshape( (3,) )
      deriv = numpy.array([[self.N0p(q), self.N0q(q)],
                           [self.N1p(q), self.N1q(q)],
                           [self.N2p(q), self.N2q(q)]])      
      basisDeriv[iQuad] = deriv.reshape((3, 2))
      iQuad += 1

    self.cellDim = 2
    self.numCorners = len(vertices)
    self.numQuadPts = len(quadPts)
    self.vertices = vertices
    self.quadPts = quadPts
    self.quadWts = quadWts
    self.basis = basis
    self.basisDeriv = basisDeriv
    return
コード例 #7
0
ファイル: Anritsu_VNA.py プロジェクト: rotzinger/qkit
    def get_tracedata(self, format = 'AmpPha', single=False):
        '''
        Get the data of the current trace

        Input:
            format (string) : 'AmpPha': Amp in dB and Phase, 'RealImag',

        Output:
            'AmpPha':_ Amplitude and Phase
        '''
        #data = self._visainstrument.ask_for_values(':FORMAT REAL,32;*CLS;CALC1:DATA:NSW? SDAT,1;*OPC',format=1)      
        data = self._visainstrument.ask_for_values('FORM:DATA REAL; FORM:BORD SWAPPED; CALC%i:SEL:DATA:SDAT?'%(self._ci), format = visa.double)      
        data_size = numpy.size(data)
        datareal = numpy.array(data[0:data_size:2])
        dataimag = numpy.array(data[1:data_size:2])
          
        if format.upper() == 'REALIMAG':
          if self._zerospan:
            return numpy.mean(datareal), numpy.mean(dataimag)
          else:
            return datareal, dataimag
        elif format.upper() == 'AMPPHA':
          if self._zerospan:
            datareal = numpy.mean(datareal)
            dataimag = numpy.mean(dataimag)
            dataamp = numpy.sqrt(datareal*datareal+dataimag*dataimag)
            datapha = numpy.arctan(dataimag/datareal)
            return dataamp, datapha
          else:
            dataamp = numpy.sqrt(datareal*datareal+dataimag*dataimag)
            datapha = numpy.arctan2(dataimag,datareal)
            return dataamp, datapha
        else:
          raise ValueError('get_tracedata(): Format must be AmpPha or RealImag') 
コード例 #8
0
 def test_basic_instantiation(self):
     '''
     Tests the basic instantiation of the SHIFT class
     '''
     # Instantiatiation with float
     self.model = Shift(5.0)
     np.testing.assert_array_almost_equal(self.model.target_magnitudes,
                                          np.array([5.0]))
     self.assertEqual(self.model.number_magnitudes, 1)
     # Instantiation with a numpy array
     self.model = Shift(np.arange(5., 8., 0.5))
     np.testing.assert_array_almost_equal(self.model.target_magnitudes,
                                          np.arange(5., 8., 0.5))
     self.assertEqual(self.model.number_magnitudes, 6)
     # Instantiation with  list
     self.model = Shift([5., 6., 7., 8.])
     np.testing.assert_array_almost_equal(self.model.target_magnitudes,
                                          np.array([5., 6., 7., 8.]))
     self.assertEqual(self.model.number_magnitudes, 4)
     # Otherwise raise an error
     with self.assertRaises(ValueError) as ae:
         self.model = Shift(None)
     self.assertEqual(ae.exception.message,
                      'Minimum magnitudes must be float, list or array')
     # Check regionalisation - assuming defaults
     self.model = Shift(5.0)
     for region in self.model.regionalisation.keys():
         self.assertDictEqual(BIRD_GLOBAL_PARAMETERS[region],
                              self.model.regionalisation[region])
     np.testing.assert_array_almost_equal(np.log10(self.model.base_rate),
                                          np.array([-20.74610902]))
コード例 #9
0
    def testCNCS(self):
        # CNCS_7860 is not an incoherent scatterer but for this test
        # it doesn't matter
        SA, Flux = MDNormSCDPreprocessIncoherent(Filename='CNCS_7860',
                                                 MomentumMin=1,
                                                 MomentumMax=1.5)

        # Just compare 10 points of the Flux
        flux_cmp = np.array([0.00000000e+00, 7.74945234e-04, 4.96143098e-03,
                             1.18914010e-02, 1.18049991e-01, 7.71872176e-01,
                             9.93078957e-01, 9.96312349e-01, 9.98450129e-01,
                             1.00000002e+00])
        np.testing.assert_allclose(Flux.extractY()[0][::1000], flux_cmp)
        self.assertEqual(Flux.getXDimension().name, 'Momentum')
        self.assertEqual(Flux.getXDimension().getUnits(), 'Angstrom^-1')
        self.assertEqual(Flux.blocksize(), 10000)
        self.assertEqual(Flux.getNumberHistograms(), 1)

        # Compare every 20-th bin of row 64
        SA_cmp = np.array([0.11338311, 0.18897185, 0.15117748, 0.11338311, 0.03779437,
                           0.07558874, 0.15117748, 0.18897185, 0.03779437, 0.15117748,
                           0.11338311, 0.07558874, 0.03779437, 0.        , 0.56691555,
                           0.26456059, 0.11338311, 0.07558874, 0.11338311, 0.])
        np.testing.assert_allclose(SA.extractY().reshape((-1,128))[::20,64], SA_cmp)
        self.assertEqual(SA.getXDimension().name, 'Momentum')
        self.assertEqual(SA.getXDimension().getUnits(), 'Angstrom^-1')
        self.assertEqual(SA.blocksize(), 1)
        self.assertEqual(SA.getNumberHistograms(), 51200)
        self.assertEqual(SA.getNEvents(), 51200)
コード例 #10
0
def condition_on_grades(user="******"):
	c = new_conn.cursor()
	models = [None, None, None, None, None, None]
	for i in range(6):
		c.execute('SELECT easiness, ret_reps, ret_reps_since_lapse, lapses, pred_grade, acq_reps from discrete_log where user_id="%s" and grade=%d' % (user, i))
		x_train = np.array(c.fetchall())
		c.execute('SELECT interval_bucket from discrete_log where user_id="%s" and grade=%d' % (user, i))
		y_train = np.array(c.fetchall())[:,0]
		clf = SVC()
		clf.fit(x_train, y_train)
		print clf.score(x_train, y_train)
		models[i] = clf
	print "====================="
	c.execute('SELECT user_id from (select user_id, count(distinct grade) as cnt from discrete_log group by user_id) where cnt = 6 limit 5')
	users = [row[0] for row in c.fetchall()]
	scores = [0, 0, 0, 0, 0, 0]
	for user in users:
		for i in range(6):
			c.execute('SELECT easiness, ret_reps, ret_reps_since_lapse, lapses, pred_grade, acq_reps from discrete_log where user_id="%s" and grade=%d' % (user, i))
			x_train = np.array(c.fetchall())
			c.execute('SELECT interval_bucket from discrete_log where user_id="%s" and grade=%d' % (user, i))
			y_train = np.array(c.fetchall())[:,0]
			scores[i] += models[i].score(x_train, y_train)
	for i in range(6):
		scores[i] /= len(users);
		print scores[i]
コード例 #11
0
 def test_continuum_seismicity(self):
     '''
     Tests the function hmtk.strain.shift.Shift.continuum_seismicity - 
     the python implementation of the Subroutine Continuum Seismicity from
     the Fortran 90 code GSRM.f90
     '''
     self.strain_model = GeodeticStrain()
     # Define a simple strain model
     test_data = {'longitude': np.zeros(3, dtype=float),
                  'latitude': np.zeros(3, dtype=float),
                  'exx': np.array([1E-9, 1E-8, 1E-7]),
                  'eyy': np.array([5E-10, 5E-9, 5E-8]),
                  'exy': np.array([2E-9, 2E-8, 2E-7])}
     self.strain_model.get_secondary_strain_data(test_data)
     self.model = Shift([5.66, 6.66])
     threshold_moment = moment_function(np.array([5.66, 6.66]))
     
     expected_rate = np.array([[-14.43624419, -22.48168502],
                               [-13.43624419, -21.48168502],
                               [-12.43624419, -20.48168502]]) 
     np.testing.assert_array_almost_equal(
         expected_rate,
         np.log10(self.model.continuum_seismicity(
             threshold_moment,
             self.strain_model.data['e1h'],
             self.strain_model.data['e2h'],
             self.strain_model.data['err'],
             BIRD_GLOBAL_PARAMETERS['OSRnor'])))
コード例 #12
0
ファイル: test_huayno.py プロジェクト: mherkazandjian/amuse
    def test3(self):
        convert_nbody = nbody_system.nbody_to_si(1.0 | units.MSun, 149.5e6 | units.km)

        instance = Huayno(convert_nbody)
        instance.initialize_code()
        instance.parameters.epsilon_squared = 0.00001 | units.AU**2
        
        stars = datamodel.Stars(2)
        star1 = stars[0]
        star2 = stars[1]

        star1.mass = units.MSun(1.0)
        star1.position = units.AU(numpy.array((-1.0,0.0,0.0)))
        star1.velocity = units.AUd(numpy.array((0.0,0.0,0.0)))
        star1.radius = units.RSun(1.0)

        star2.mass = units.MSun(1.0)
        star2.position = units.AU(numpy.array((1.0,0.0,0.0)))
        star2.velocity = units.AUd(numpy.array((0.0,0.0,0.0)))
        star2.radius = units.RSun(100.0)
        
        instance.particles.add_particles(stars)
    
        for x in range(1,2000,10):
            instance.evolve_model(x | units.day)
            instance.particles.copy_values_of_all_attributes_to(stars)
            stars.savepoint()
コード例 #13
0
def mult_leave_out (random, percent):
    i = 1
    J = [[(5)]]
    for row in random:
        test,train = np.vsplit(random, np.array([i]))
        if i == 1:
            y_all, new_feature_number = ypred_leave_one_out(train, test, percent)
        if i != 1:
            if i < sample_n:
                train2,test = np.vsplit(test, np.array([i-1])) 
                train = np.vstack((train2,train))
                y_all, new_feature_number= ypred_leave_one_out(train, test, percent)
            if i > training_n:
                train,test = np.vsplit(random, np.array([training_n]))
                y_all, new_feature_number = ypred_leave_one_out(train, test, percent)
    
        J = np.append(J, np.array(y_all))
        i = i + 1
    J = np.delete(J,0,0)
    ground_truth = random[:,0]
    
    #print J, ground_truth
    slope, intercept, r_value, p_value, std_err = stats.linregress(ground_truth,J)
    #print r_value
    #print np.square(r_value)
    return r_value, std_err, p_value, slope, intercept, ground_truth, J, new_feature_number
コード例 #14
0
def lars_regression_noise_ipyparallel(pars): 
    import numpy as np
    import os
    import sys
    import gc
        
    
    Y_name,C_name,noise_sn,idxs_C, idxs_Y=pars
    Y=np.load(Y_name,mmap_mode='r')
    Y=np.array(Y[idxs_Y,:])
    C=np.load(C_name,mmap_mode='r')
    C=np.array(C)
    _,T=np.shape(C)
    #sys.stdout = open(str(os.getpid()) + ".out", "w")
    st=time.time()
    As=[]    
    #print "*****************:" + str(idxs_Y[0]) + ',' + str(idxs_Y[-1])
    sys.stdout.flush()    
    for y,px in zip(Y,idxs_Y):  
        #print str(time.time()-st) + ": Pixel" + str(px)
        sys.stdout.flush()    
        c=C[idxs_C[px],:]
        if np.size(c)>0:             
            sn=noise_sn[px]**2*T            
            _,_,a,_,_=lars_regression_noise(y, c.T, 1, sn)
            if not np.isscalar(a):                
                a=a.T  
                 
            As.append((px,idxs_C[px],a))
    
    del Y
    del C
    gc.collect()
    
    return As#As
コード例 #15
0
ファイル: egarchmreg.py プロジェクト: ekote/pyflux
    def __init__(self,data,p,q,formula):

        # Initialize TSM object
        super(EGARCHMReg,self).__init__('EGARCHMReg')

        # Latent variables
        self.p = p
        self.q = q
        self.max_lag = max(self.p,self.q)  
        self.z_no = self.p + self.q + 2
        self._z_hide = 0 # Whether to cutoff variance latent variables from results
        self.supported_methods = ["MLE","PML","Laplace","M-H","BBVI"]
        self.default_method = "MLE"
        self.multivariate_model = False
        self.leverage = False
        self.model_name = "EGARCHMReg(" + str(self.p) + "," + str(self.q) + ")"

        # Format the data
        self.is_pandas = True # This is compulsory for this model type
        self.data_original = data
        self.formula = formula
        self.y, self.X = dmatrices(formula, data)
        self.z_no += self.X.shape[1]*2
        self.y_name = self.y.design_info.describe()
        self.data_name = self.y_name
        self.X_names = self.X.design_info.describe().split(" + ")
        self.y = np.array([self.y]).ravel()
        self.data = self.y
        self.X = np.array([self.X])[0]
        self.index = data.index
        self.initial_values = np.zeros(self.z_no)

        self._create_latent_variables()
コード例 #16
0
ファイル: graph_utils.py プロジェクト: 812864539/models
def rng_target_dist_field(batch_size, gtG, rng, max_dist, max_dist_to_compute,
                          nodes=None, compute_path=False):
  # Sample a single node, compute distance to all nodes less than max_dist,
  # sample nodes which are a particular distance away.
  dists = []; pred_maps = []; paths = []; start_node_ids = []
  end_node_ids = rng.choice(gtG.num_vertices(), size=(batch_size,),
                            replace=False).tolist()

  for i in range(batch_size):
    dist, pred_map = gt.topology.shortest_distance(
        gt.GraphView(gtG, reversed=True), source=gtG.vertex(end_node_ids[i]),
        target=None, max_dist=max_dist_to_compute, pred_map=True)
    dist = np.array(dist.get_array())
    pred_map = np.array(pred_map.get_array())
    dists.append(dist)
    pred_maps.append(pred_map)

    # Randomly sample nodes which are withing max_dist
    near_ids = np.where(dist <= max_dist)[0]
    start_node_id = rng.choice(near_ids, size=(1,), replace=False)[0]
    start_node_ids.append(start_node_id)

    path = None
    if compute_path:
      path = get_path_ids(start_node_ids[i], end_node_ids[i], pred_map)
    paths.append(path)

  return start_node_ids, end_node_ids, dists, pred_maps, paths
コード例 #17
0
    def test__build_row_representation(self):
        id_cols = [
            array([2,4,6]),
            array([1,2,3])]
        attribute_cols = {
            'col1':array([3,2,1]),
            'col2':array([2,1,0]),
            'col3':array([4,5,6])                  
        }
        
        expected = {
            (2,1):{'col1':3,
                   'col2':2,
                   'col3':4},
            (4,2):{'col1':2,
                   'col2':1,
                   'col3':5},
            (6,3):{'col1':1,
                   'col2':0,
                   'col3':6},            
        }
        
        dataset = self._get_dataset(dataset_name = 'test', 
                                    cache_directory = self.temp_cache_path, 
                                    year = 1980)
        dataset_junior = DatasetJunior(dataset=dataset,
                                       name = 'test')

        
        dataset_junior._build_row_representation(id_cols = id_cols, 
                                                   attribute_cols = attribute_cols) 
        output = dataset_junior.row_representation
          
        self.assertEqual(expected,output)
コード例 #18
0
ファイル: test_nose.py プロジェクト: lmfit/lmfit-py
    def test_emcee_lnpost(self):
        # check ln likelihood is calculated correctly. It should be
        # -0.5 * chi**2.
        result = self.mini.minimize()

        # obtain the numeric values
        # note - in this example all the parameters are varied
        fvars = np.array([par.value for par in result.params.values()])

        # calculate the cost function with scaled values (parameters all have
        # lower and upper bounds.
        scaled_fvars = []
        for par, fvar in zip(result.params.values(), fvars):
            par.value = fvar
            scaled_fvars.append(par.setup_bounds())

        val = self.mini.penalty(np.array(scaled_fvars))

        # calculate the log-likelihood value
        bounds = np.array([(par.min, par.max)
                           for par in result.params.values()])
        val2 = _lnpost(fvars,
                       self.residual,
                       result.params,
                       result.var_names,
                       bounds,
                       userargs=(self.x, self.data))

        assert_almost_equal(-0.5 * val, val2)
コード例 #19
0
def load_adm_sat_school_data(return_X_y=False):

    with open("./merged_adm_sat_data.csv") as csv_file:
        data_file = csv.reader(csv_file)
        temp = next(data_file)
        n_samples = int(temp[0])
        n_features = int(temp[1])
        target_names = np.array(temp[2:])


    df = pd.read_csv("./merged_adm_sat_data.csv", sep=",", usecols=(0, 1, 2, 3), skiprows=0)
    data = np.empty((n_samples, n_features), dtype=int)
    target = np.ma.empty((n_samples,), dtype=int)

    for index, row in df.iterrows():
        data[index] = np.asarray([df.iloc[index][0], df.iloc[index][1], df.iloc[index][2]], dtype=np.float)
        target[index] = np.asarray(df.iloc[index][3], dtype=np.int)

    feature_names = np.array(['ACT_AVG','SAT_AVG','GRAD_DEBT','REGION'])

    if return_X_y:
        return data, target

    return datasets.base.Bunch(data=data, target=target,
                 target_names=target_names,
                 DESCR='School Data set',
                 feature_names=feature_names)
コード例 #20
0
 def measure_objects(self, operand, workspace):
     '''Performs the measurements on the requested objects'''
     objects = workspace.get_objects(operand.operand_objects.value)
     if objects.has_parent_image:
         area_occupied = np.sum(objects.segmented[objects.parent_image.mask]>0)
         perimeter = np.sum(outline(np.logical_and(objects.segmented != 0,objects.parent_image.mask)))
         total_area = np.sum(objects.parent_image.mask)
     else:
         area_occupied = np.sum(objects.segmented > 0)
         perimeter = np.sum(outline(objects.segmented) > 0)
         total_area = np.product(objects.segmented.shape)
     m = workspace.measurements
     m.add_image_measurement(F_AREA_OCCUPIED%(operand.operand_objects.value),
                             np.array([area_occupied], dtype=float ))
     m.add_image_measurement(F_PERIMETER%(operand.operand_objects.value),
                             np.array([perimeter], dtype=float ))
     m.add_image_measurement(F_TOTAL_AREA%(operand.operand_objects.value),
                             np.array([total_area], dtype=float))
     if operand.should_save_image.value:
         binary_pixels = objects.segmented > 0
         output_image = cpi.Image(binary_pixels,
                                  parent_image = objects.parent_image)
         workspace.image_set.add(operand.image_name.value,
                                 output_image)
     return[[operand.operand_objects.value,
             str(area_occupied),str(perimeter),str(total_area)]]
コード例 #21
0
ファイル: layer.py プロジェクト: coders-circle/batss
    def __init__(self, num_neurons, prev_layer=None):
        """Constructs a layer with given number of neurons.

        Args:
            num_neurons: Number of neurons in this layer.
            prev_layer: Previous layer which acts as input to this
                        layer. None for input layer.
        """

        # x : Activation vector of the neurons.
        # nets : Vector of weighted sum of inputs of the neurons.
        # deltas : Delta error vector, used to adjust the weights.
        self.x = np.array([0] * num_neurons)
        self.nets = np.array([0] * num_neurons)
        self.deltas = np.array([0] * num_neurons)

        self.prev_layer = prev_layer

        # If previous layer exists, create a weight matrix
        # with random values.
        if prev_layer:
            self.weights = []
            for i in range(num_neurons):

                # Each neuron is connected to all neurons of previous layer
                # plus a constant input of '1' (the weight of which is
                # bias). So total number of weights = num_inputs + 1.

                prev_x_len = len(prev_layer.x) + 1
                w = [get_random_weight(prev_x_len) for _ in range(prev_x_len)]
                self.weights.append(w)

            self.weights = np.matrix(self.weights)
コード例 #22
0
    def setUp(self):
        x = numpy.array([ 8.375, 7.545, 8.828, 8.5  , 1.757, 5.928,
                          8.43 , 7.78 , 9.865, 5.878, 8.979, 4.732,
                          3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
                          6.04 , 9.63 , 7.712, 3.382, 4.489, 6.479,
                          7.189, 9.645, 5.395, 4.961, 9.894, 2.893,
                          7.357, 9.828, 6.272, 3.758, 6.693, 0.993])
        X = x.reshape(6, 6)
        XX = x.reshape(3, 2, 2, 3)

        m = numpy.array([0, 1, 0, 1, 0, 0,
                         1, 0, 1, 1, 0, 1,
                         0, 0, 0, 1, 0, 1,
                         0, 0, 0, 1, 1, 1,
                         1, 0, 0, 1, 0, 0,
                         0, 0, 1, 0, 1, 0])
        mx = array(data=x, mask=m)
        mX = array(data=X, mask=m.reshape(X.shape))
        mXX = array(data=XX, mask=m.reshape(XX.shape))

        m2 = numpy.array([1, 1, 0, 1, 0, 0,
                          1, 1, 1, 1, 0, 1,
                          0, 0, 1, 1, 0, 1,
                          0, 0, 0, 1, 1, 1,
                          1, 0, 0, 1, 1, 0,
                          0, 0, 1, 0, 1, 1])
        m2x = array(data=x, mask=m2)
        m2X = array(data=X, mask=m2.reshape(X.shape))
        m2XX = array(data=XX, mask=m2.reshape(XX.shape))
        self.d = (x, X, XX, m, mx, mX, mXX)
コード例 #23
0
ファイル: scale_obj.py プロジェクト: fmelinscak/tvb-data
def vertex_transform1(vertex):
    """
    This transform was applied on the original surface.
    """
    return np.dot(rotation_matrix(np.array([0.0, 0.0, 1.0]), math.pi),
                  np.dot(rotation_matrix(np.array([1.0, 0.0, 0.0]), -math.pi / 1.6),
                         np.array([float(x) / 1.5 for x in vertex[:3]]) + np.array([0.0, -40.0, 20.0])))
コード例 #24
0
	def calculate_user_similarity(user_rating_dict,user_list,restaurant_list,score_matrix,user_mean):
		similarity_matrix = []
		for row in range(len(user_list)):
			similarity_vector = []
			list1 = user_rating_dict[user_list[row]].keys()
			mean1 = user_mean[row]
			for col in range(row,len(user_list)):		
				list2 = user_rating_dict[user_list[col]].keys()
				mean2 = user_mean[col]
				join_list = list(set(list1+list2))
				rating_vector1 = []
				rating_vector2 = []
				for item in join_list:
					if item in list1:
						rating_vector1.append(user_rating_dict[user_list[row]][item]-mean1)
					else:
						rating_vector1.append(score_matrix[row,restaurant_list.index(item)]-mean1)
					if item in list2:
						rating_vector2.append(user_rating_dict[user_list[col]][item]-mean2)
					else:
						rating_vector2.append(score_matrix[col,restaurant_list.index(item)]-mean2)
				similarity = numpy.sum(numpy.array(rating_vector1)*numpy.array(rating_vector2))/sqrt(numpy.sum(numpy.square(rating_vector1))*numpy.sum(numpy.square(rating_vector2)))
				similarity_vector.append(similarity)
			similarity_matrix.append(similarity_vector)
		similarity_matrix = numpy.array(similarity_matrix)
		for col in range(len(user_list)):
			for row in range(col,len(user_list)):
				similarity_matrix[row,col] = similarity_matrix[col,row]
		return similarity_matrix
コード例 #25
0
    def test_array_richcompare_legacy_weirdness(self):
        # It doesn't really work to use assert_deprecated here, b/c part of
        # the point of assert_deprecated is to check that when warnings are
        # set to "error" mode then the error is propagated -- which is good!
        # But here we are testing a bunch of code that is deprecated *because*
        # it has the habit of swallowing up errors and converting them into
        # different warnings. So assert_warns will have to be sufficient.
        assert_warns(FutureWarning, lambda: np.arange(2) == "a")
        assert_warns(FutureWarning, lambda: np.arange(2) != "a")
        # No warning for scalar comparisons
        with warnings.catch_warnings():
            warnings.filterwarnings("error")
            assert_(not (np.array(0) == "a"))
            assert_(np.array(0) != "a")
            assert_(not (np.int16(0) == "a"))
            assert_(np.int16(0) != "a")

        for arg1 in [np.asarray(0), np.int16(0)]:
            struct = np.zeros(2, dtype="i4,i4")
            for arg2 in [struct, "a"]:
                for f in [operator.lt, operator.le, operator.gt, operator.ge]:
                    if sys.version_info[0] >= 3:
                        # py3
                        with warnings.catch_warnings() as l:
                            warnings.filterwarnings("always")
                            assert_raises(TypeError, f, arg1, arg2)
                            assert_(not l)
                    else:
                        # py2
                        assert_warns(DeprecationWarning, f, arg1, arg2)
コード例 #26
0
def loadTrajectoryData(inFile = UJILocDataFile):

	with open(UJILocDataFile, 'r') as dataFile: 
		data = dataFile.read()

	# 9-axis IMU data
	# trajectory: dictionary with three elements
	# N is number of samples in the trajectory (data taken at 10Hz)
	# mag: Nx3 numpy array where each line has XYZ mag data
	# gyro: Nx3 numpy array where each line has XYZ gyro vel data
	# accel: Nx3 numpy array where each line has XYZ lin accelerometer data
	segments = data.split("<", 2)
	IMUDataStr = segments[0].split('\n')[:-1]
	magArr = []
	oriArr = []	
	accelArr = []

	for i, lineStr in enumerate(IMUDataStr): 

		lineStr = lineStr.split(' ', 10)[:-1]
		lineStr = [float(x) for x in lineStr]
		magArr.append(lineStr[1:4]) # xyz mag data for sample
		accelArr.append(lineStr[4:7]) # xyz accelerometer data for single samp
		oriArr.append(lineStr[7:10]) # xyz gyro data for sample

	# values initially are given as euler angles which are not good for imu-type calculations. 
	# so we fix em! 	
	gyroArr = rawSensorStateProc.orientationToGyro(oriArr) 
	initOrientationMatrix = rawSensorStateProc.calcInitialOrientation(oriArr[0])

	# IMUData = [{'mag': magArr, 'gyro': gyroArr, 'accel': accelArr}]
	
	# process waypoint data
	# each waypoint consists of a latitude coordinate, longitude coordinate,
	# and index (what IMU dataopoint it represents)
	waypoints = []
	waypointStr = segments[1].split(">", 2)
	numWaypoints = int(waypointStr[0])
	waypointLns = waypointStr[1].lstrip().split('\n')

	for i, lineStr in enumerate(waypointLns): 

		line = lineStr.split(' ', WAYPOINTS_ELEMS_PER_LINE)
		line = [float(x) for x in line]
		
		if i == 0:
			waypoints.append({'lat': line[0], 'long': line[1], 'index': line[4]}) 
		
		waypoints.append({'lat': line[2], 'long': line[3], 'index': line[5]})

		seqLen = line[5]

	
	traj = ({'waypoints': np.array(waypoints), 'mag': np.array(magArr), 'gyro': np.array(gyroArr), 
			 'accel': np.array(accelArr), 'orientSensed': np.array(oriArr), 
			 'initOrient': initOrientationMatrix, 'seqLen': seqLen})

	return traj

# loadTrajectoryData()
コード例 #27
0
    def test(self):

        with self.test_session() as sess:

            m = tf.constant(np.array([
                [1.0, 2.0],
                [2.0, 0.0]
            ], dtype=np.float32))

            l = linear(m, 4)

            result = sess.run(l, {
                'SimpleLinear/Matrix:0': np.array([
                    [1.0, 2.0],
                    [1.0, 2.0],
                    [1.0, 2.0],
                    [1.0, 2.0],
                ]),
                'SimpleLinear/Bias:0': np.array([
                    0.0,
                    1.0,
                    2.0,
                    3.0,
                ]),
            })

            self.assertAllClose(result, np.array([
                [5.0, 6.0, 7.0, 8.0],
                [2.0, 3.0, 4.0, 5.0],
            ]))
            print(result)
コード例 #28
0
ファイル: gm121.py プロジェクト: geo7/csci480
def init():
    global theMesh,  theLight, theCamera, \
           theScreen,    resolution
    initializeVAO()
    glEnable(GL_CULL_FACE)
    glEnable(GL_DEPTH_TEST)

    # Add our object
    # LIGHT
    theLight = N.array((-0.577, 0.577, 0.577, 0.0),dtype=N.float32)
    # OBJECT
    phongshader = makeShader("phongshader.vert","phongshader.frag")
    verts, elements = readOBJ("suzanne.obj")
    suzanneVerts = getArrayBuffer(verts)
    suzanneElements = getElementBuffer(elements)
    suzanneNum = len(elements)
    theMesh = coloredMesh(N.array((1.0, 0.5, 1.0, 1.0), dtype=N.float32),
                          suzanneVerts,
                          suzanneElements,
                          suzanneNum,
                          phongshader)

    # CAMERA
    width,height = theScreen.get_size()
    aspectRatio = float(width)/float(height)
    near = 0.01
    far = 100.0
    lens = 4.0  # "longer" lenses mean more telephoto
    theCamera = Camera(lens, near, far, aspectRatio)
    theCamera.moveBack(6)
コード例 #29
0
ファイル: test_signaltools.py プロジェクト: josef-pkt/scipy
 def test_2d_complex_same(self):
     a = array([[1+2j,3+4j,5+6j],[2+1j,4+3j,6+5j]])
     c = signal.fftconvolve(a,a)
     d = array([[-3+4j,-10+20j,-21+56j,-18+76j,-11+60j],\
                [10j,44j,118j,156j,122j],\
                [3+4j,10+20j,21+56j,18+76j,11+60j]])
     assert_array_almost_equal(c,d)
コード例 #30
0
ファイル: IO_test.py プロジェクト: hbuschme/PRIMO
 def test_readXMLBIF(self):
     bn = XMLBIFParser.parse("primo2/tests/slippery.xbif")
     
     nodes = bn.get_all_nodes()        
     self.assertTrue("slippery_road" in nodes)
     self.assertTrue("sprinkler" in nodes)
     self.assertTrue("rain" in nodes)
     self.assertTrue("wet_grass" in nodes)
     self.assertTrue("winter" in nodes)
     self.assertEqual(len(nodes), 5)
     slipperyNode = bn.get_node("slippery_road")
     self.assertTrue("rain" in slipperyNode.parents)
     sprinklerNode = bn.get_node("sprinkler")
     self.assertTrue("winter" in sprinklerNode.parents)
     rainNode = bn.get_node("rain")
     self.assertTrue("winter" in rainNode.parents)
     cpt = np.array([[0.8,0.1],[0.2,0.9]])        
     np.testing.assert_array_almost_equal(rainNode.cpd, cpt)
     
     wetNode = bn.get_node("wet_grass")
     self.assertTrue("sprinkler" in wetNode.parents)
     self.assertTrue("rain" in wetNode.parents)
     self.assertTrue("true" in wetNode.values)
     cpt = np.array([[[0.95, 0.8],[0.1,0.0]], [[0.05, 0.2],[0.9, 1.0]]])
     self.assertEqual(wetNode.get_probability("false", {"rain":["true"], "sprinkler":["false"]}),0.2)
     self.assertEqual(wetNode.get_probability("true", {"rain":["false"], "sprinkler":["true"]}),0.1)
コード例 #31
0
    def __init__(self, opt):
        """Initialize this dataset class.

        Parameters:
            opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
        """
        BaseDataset.__init__(self, opt)
        assert (opt.image_type == 'exr')
        assert (opt.output_nc == 1)
        assert (opt.input_nc == 1)

        #self.A = os.path.join(opt.dataroot, opt.phase + '_input')
        self.A1 = os.path.join(opt.dataroot, opt.phase + '_input_terraform')
        self.B = os.path.join(opt.dataroot, opt.phase + '_output')

        #self.A_paths = sorted(make_dataset(self.A, opt.max_dataset_size))
        self.A1_paths = sorted(make_dataset(self.A1, opt.max_dataset_size))
        self.B_paths = sorted(make_dataset(self.B, opt.max_dataset_size))
        #self.A_size = len(self.A_paths)  # get the size of dataset A
        self.A1_size = len(self.A1_paths)
        self.B_size = len(self.B_paths)  # get the size of dataset B

        self.A1_test_paths = sorted(
            make_dataset(os.path.join(opt.dataroot, 'test_input_terraform')))
        self.B_test_paths = sorted(
            make_dataset(os.path.join(opt.dataroot, 'test_output')))
        self.A1_test_size = len(self.A1_test_paths)
        self.B_test_size = len(self.B_test_paths)

        self.input_names = np.array([
            "RockDetailMask.RockDetailMask", "SoftDetailMask.SoftDetailMask",
            "cliffs.cliffs", "height.height", "mesa.mesa", "slope.slope",
            "slopex.slopex", "slopez.slopez"
        ])
        self.output_names = np.array([
            "RockDetailMask.RockDetailMask", "SoftDetailMask.SoftDetailMask",
            "bedrock.bedrock", "cliffs.cliffs", "flow.flow", "flowx.flowx",
            "flowz.flowz", "height.height", "mesa.mesa", "sediment.sediment",
            "water.water"
        ])
        self.input_channels = np.array([3])
        self.output_channels = np.array([7])

        if not self.opt.compute_bounds:
            self.i_channels_min = np.array([[[-86]]])  #0
            self.i_channels_max = np.array([[[910]]])  #824
            self.o_channels_min = np.array([[[-86]]])  #-4
            self.o_channels_max = np.array([[[910]]])  #819
            return

        channels_min = np.array([2**16 for _ in self.input_channels])
        channels_max = np.array([0 for _ in self.input_channels])
        examples = 0
        for A1_path in self.A1_paths:
            A1_img = exrlib.read_exr_float32(
                A1_path, list(self.input_names[self.input_channels]), 512,
                512).transpose(2, 0, 1).reshape(len(self.input_channels), -1)
            channels_min = np.min(
                np.concatenate((np.expand_dims(
                    channels_min, 1), np.expand_dims(np.min(A1_img, 1), 1)),
                               1), 1)
            channels_max = np.max(
                np.concatenate((np.expand_dims(
                    channels_min, 1), np.expand_dims(np.max(A1_img, 1), 1)),
                               1), 1)
            examples += 1
            if examples >= 1000:
                break

        print(channels_min)
        self.i_channels_min = np.expand_dims(
            np.expand_dims(np.array(channels_min), 1), 2)
        print(channels_max)
        self.i_channels_max = np.expand_dims(
            np.expand_dims(np.array(channels_max), 1), 2)

        channels_min = np.array([2**16 for _ in self.output_channels])
        channels_max = np.array([0 for _ in self.output_channels])
        examples = 0
        for B_path in self.B_paths:
            B_img = exrlib.read_exr_float32(
                B_path, list(self.output_names[self.output_channels]), 512,
                512).transpose(2, 0, 1).reshape(len(self.output_channels), -1)
            channels_min = np.min(
                np.concatenate((np.expand_dims(
                    channels_min, 1), np.expand_dims(np.min(B_img, 1), 1)), 1),
                1)
            channels_max = np.max(
                np.concatenate((np.expand_dims(
                    channels_min, 1), np.expand_dims(np.max(B_img, 1), 1)), 1),
                1)
            examples += 1
            if examples >= 1000:
                break

        print(channels_min)
        self.o_channels_min = np.expand_dims(np.expand_dims(channels_min, 1),
                                             2)
        print(channels_max)
        self.o_channels_max = np.expand_dims(np.expand_dims(channels_max, 1),
                                             2)
コード例 #32
0
ファイル: motifs.py プロジェクト: gialdetti/netsci
        motif = motifs[i]
        for perm, perm_inv in permutations:
            isomporth = permute(motif, perm)
            edges = isomporth + 2 * isomporth.T
            tags[edges[0, 1], edges[0, 2],
                 edges[1, 2], :] = np.hstack([i, perm_inv])

    return tags


# np.testing.assert_equal( triads_classification_tree(), triads_classification_tree_old() )
# %timeit triads_classification_tree_old()
# %timeit triads_classification_tree()

# Compatibility with several conventions
triad_order_bct = 3 + np.array([1, 0, 2, 5, 3, 4, 6, 10, 7, 8, 9, 11, 12
                                ])  # j.neuroimage.2009.10.003
triad_order_egger2014 = 3 + np.array(
    [12, 6, 11, 8, 9, 10, 3, 7, 0, 4, 5, 1, 2])  # fnana.2014.00129
triad_order_nn4576 = 3 + np.arange(13)  # nn.4576


def index_all(elements, array):
    return np.array([np.where(array == x)[0][0] for x in elements])


conv_triad_order_nn4576_to_bct = index_all(triad_order_bct, triad_order_nn4576)
assert np.array_equal(triad_order_nn4576[conv_triad_order_nn4576_to_bct],
                      triad_order_bct)
conv_triad_order_nn4576_to_egger2014 = index_all(triad_order_egger2014,
                                                 triad_order_nn4576)
assert np.array_equal(triad_order_nn4576[conv_triad_order_nn4576_to_egger2014],
コード例 #33
0
def bessel_i1_spherical_values ( n_data ):

#*****************************************************************************80
#
## BESSEL_I1_SPHERICAL_VALUES returns some values of the Spherical Bessel function i1.
#
#  Discussion:
#
#    In Mathematica, the function can be evaluated by:
#
#      Sqrt[Pi/(2*x)] * BesselI[3/2,x]
#
#  Licensing:
#
#    This code is distributed under the GNU LGPL license.
#
#  Modified:
#
#    31 December 2014
#
#  Author:
#
#    John Burkardt
#
#  Reference:
#
#    Milton Abramowitz, Irene Stegun,
#    Handbook of Mathematical Functions,
#    National Bureau of Standards, 1964,
#    LC: QA47.A34,
#    ISBN: 0-486-61272-4.
#
#    Stephen Wolfram,
#    The Mathematica Book,
#    Fourth Edition,
#    Wolfram Media / Cambridge University Press, 1999.
#
#  Parameters:
#
#    Input/output, integer N_DATA.  The user sets N_DATA to 0 before the
#    first call.  On each call, the routine increments N_DATA by 1, and
#    returns the corresponding data; when there is no more data, the
#    output value of N_DATA will be 0 again.
#
#    Output, real X, the argument of the function.
#
#    Output, real FX, the value of the function.
#
  import numpy as np

  n_max = 21

  fx_vec = np.array ( ( \
    0.03336667857363341E+00, \
    0.06693371456802954E+00, \
    0.1354788933285401E+00, \
    0.2072931911031093E+00, \
    0.2841280857128948E+00, \
    0.3678794411714423E+00, \
    0.4606425870674146E+00, \
    0.5647736480096238E+00, \
    0.6829590627779635E+00, \
    0.8182955028627777E+00, \
    0.9743827435800610E+00, \
    1.155432469636406E+00, \
    1.366396525527973E+00, \
    1.613118767572064E+00, \
    1.902515460838681E+00, \
    2.242790117769266E+00, \
    2.643689828630357E+00, \
    3.116811526884873E+00, \
    3.675968313148932E+00, \
    4.337627987747642E+00, \
    5.121438384183637E+00 ) )

  x_vec = np.array ( ( \
     0.1E+00, \
     0.2E+00, \
     0.4E+00, \
     0.6E+00, \
     0.8E+00, \
     1.0E+00, \
     1.2E+00, \
     1.4E+00, \
     1.6E+00, \
     1.8E+00, \
     2.0E+00, \
     2.2E+00, \
     2.4E+00, \
     2.6E+00, \
     2.8E+00, \
     3.0E+00, \
     3.2E+00, \
     3.4E+00, \
     3.6E+00, \
     3.8E+00, \
     4.0E+00  ) )

  if ( n_data < 0 ):
    n_data = 0

  if ( n_max <= n_data ):
    n_data = 0
    x = 0.0
    fx = 0.0
  else:
    x = x_vec[n_data]
    fx = fx_vec[n_data]
    n_data = n_data + 1

  return n_data, x, fx
コード例 #34
0
ファイル: FitSFPoly.py プロジェクト: smoortga/ttcc
}

for hist_name in histos_names:
    print hist_name
    if not("central" in hist_name or "_Down" in hist_name or "_Up" in hist_name): continue
    flav = hist_name.split("_")[0]
    syst = hist_name.split("_")[-1]
    hist_ = central_SF_file.Get(hist_name)
    for binx in range(hist_.GetNbinsX()):
        for biny in range(hist_.GetNbinsY()):
            Z[binx][biny] = hist_.GetBinContent(binx+1,biny+1)
    print Z
    X_ = X.flatten()
    Y_ = Y.flatten()

    A = np.array([X_*0+1, X_, Y_, X_**2, Y_**2, X_*Y_, X_**2*Y_,X_*Y_**2, X_**3, Y_**3]).T
    B = Z.flatten()
    coeff, r, rank, s = np.linalg.lstsq(A, B)
    print coeff
    
    xx = np.arange(0,1,0.1)
    yy = np.arange(0,1,0.1)
    XX, YY = np.meshgrid(xx, yy, copy=False)
    XX_ = XX.flatten()
    YY_ = YY.flatten()
    ZZ = np.dot(np.c_[np.ones(XX_.shape), XX_, YY_, XX_**2, YY_**2, XX_*YY_, XX_**2*YY_,XX_*YY_**2, XX_**3, YY_**3], coeff).reshape(XX.shape)
    
    results_dict[syst][flav]["values"] = Z
    results_dict[syst][flav]["smooth"] = ZZ
    
    # fig = plt.figure()
コード例 #35
0
ファイル: circuit_integral.py プロジェクト: cycle13/scripts-1
def calc_circulation(trajectories, forecast, theta_level, dtheta):
    # Select an individual theta level
    trajectories = trajectories.select(
        'air_potential_temperature', '==', theta_level)
    print(len(trajectories))
    levels = ('air_potential_temperature', [theta_level])

    results = iris.cube.CubeList()
    for n, cubes in enumerate(forecast):
        print(n)
        if n == 0:
            # Load grid parameters
            example_cube = convert.calc('upward_air_velocity', cubes,
                                        levels=levels)

            # Create a 1d array of points for determining which gridpoints are
            # contained in the trajectory circuit when performing volume
            # integrals
            glon, glat = grid.get_xy_grids(example_cube)
            gridpoints = np.array([glon.flatten(), glat.flatten()]).transpose()
            cs = example_cube.coord_system()

        # Load trajectory positions -(n+2) because the trajectories are
        # backwards in time. +2 to skip the analysis which is not in the
        # forecast object (i.e. n=0 corresponds to idx=-2 in the trajectories)
        x = trajectories.x[:, -(n + 2)]
        y = trajectories.y[:, -(n + 2)]
        z = trajectories['altitude'][:, -(n + 2)]
        u = trajectories['x_wind'][:, -(n + 2)]
        v = trajectories['y_wind'][:, -(n + 2)]
        w = trajectories['upward_air_velocity'][:, -(n + 2)]

        # Integrals are invalid once trajectories leave the domain but we don't
        # want to stop the script so just print out the number of trajectories
        # that have left the domain
        leftflag = (trajectories['air_pressure'][:, -(n+2)] < 0).astype(int)
        leftcount = np.count_nonzero(leftflag)
        print(leftcount)

        # Calculate enclosed area integrals
        integrals = mass_integrals(cubes, x, y, glat, gridpoints,
                                   theta_level, dtheta)
        for icube in integrals:
            # Set integrals to zero if trajectories have left the domain
            if leftcount > 0:
                icube.data = 0.
            results.append(icube)

        # Convert to global coordinates in radians
        u, v, lon, lat = get_geographic_coordinates(u, v, x, y, cs)

        # Unrotated coordinates in radians
        lon = np.deg2rad(lon)
        lat = np.deg2rad(lat)

        # Calculate the velocity due to Earth's rotation
        u_abs = omega.data * (a+z) * np.cos(lat)
        u += u_abs

        # Integrate around the circuit
        if leftcount > 0:
            circulation = 0
        else:
            circulation = circuit_integral_rotated(u, v, w, lon, lat, z)
        ccube = icube.copy(data=circulation)
        ccube.rename('circulation')
        ccube.units = 's-1'
        results.append(ccube)

    iris.save(results.merge(),
              datadir + 'circulations_' + str(theta_level) + 'K.nc')

    return
コード例 #36
0
ファイル: circuit_integral.py プロジェクト: cycle13/scripts-1
def circuit_integrals(u_abs, u, v, w, lon, lat, glon, glat, z, r):
    # Integrate u.dl around the circuit of trajectories
    # 1st and last 2 trajectories are the same so don't double count
    dlambda, dx, dy, dz = [], [], [], []
    for n in range(1, len(u) - 1):
        # dlambda is length along true longitudes to match the direction of
        # the Earth rotation
        dlambda.append(r[n] * np.cos(lat[n]) * 0.5 * (lon[n + 1] - lon[n - 1]))

        # dx and dy are in the direction of the rotated grid which corresponds
        # to the wind fields in the forecast
        dx.append(r[n] * np.cos(glat[n]) * 0.5 * (glon[n + 1] - glon[n - 1]))
        dy.append(r[n] * 0.5 * (glat[n + 1] - glat[n - 1]))

        # dz is independent of grid rotation
        dz.append(0.5 * (z[n + 1] - z[n - 1]))

    dlambda = np.array(dlambda)
    dx = np.array(dx)
    dy = np.array(dy)
    dz = np.array(dz)

    # \int dl: Tracks the errors in each calculation (should be zero)
    dx_tot = np.sum(dx)
    dy_tot = np.sum(dy)
    dz_tot = np.sum(dz)
    dlambda_tot = np.sum(dlambda)

    # \int |dl|
    length = np.sum(np.sqrt(dx ** 2 + dy ** 2 + dz ** 2))

    # u * r cos(phi) dlambda
    circ_u = u[1:-1] * dx

    # v * r dphi
    circ_v = v[1:-1] * dy

    # w * dz
    circ_w = w[1:-1] * dz

    # u_abs * r cos(phi) dlambda
    circ_p = u_abs[1:-1] * dlambda
    """
    r_ave = 0.5 * (r[1:] + r[:-1])

    dlambda = r_ave * np.cos(0.5 * (lat[1:] + lat[:-1])) * (lon[1:] - lon[:-1])
    dx = r_ave * np.cos(0.5 * (glat[1:] + glat[:-1])) * (glon[1:] - glon[:-1])
    dy = r_ave * (glat[1:] - glat[:-1])
    dz = (z[1:] - z[:-1])

    # \int dl
    dx_tot = np.sum(dx)
    dy_tot = np.sum(dy)
    dz_tot = np.sum(dz)
    dlambda_tot = np.sum(dlambda)

    # \int |dl|
    length = np.sum(np.sqrt(dx ** 2 + dy ** 2 + dz ** 2))

    # u * r cos(phi) dlambda
    circ_u = 0.5 * (u[1:] + u[:-1]) * dx

    # v * r dphi
    circ_v = 0.5 * (v[1:] + v[:-1]) * dy

    # w * dz
    circ_w = 0.5 * (w[1:] + w[:-1]) * dz

    # u_abs * r cos(phi) dlambda
    circ_p = 0.5 * (u_abs[1:] + u_abs[:-1]) * dlambda
    """

    rel_circulation = np.sum(circ_u + circ_v + circ_w)
    planetary_circulation = np.sum(circ_p)
    abs_circulation = np.sum(circ_u + circ_v + circ_w + circ_p)

    return (dx_tot, dy_tot, dz_tot, dlambda_tot, length,
            rel_circulation, planetary_circulation, abs_circulation)
コード例 #37
0
import pickle
import numpy as np
import pandas as pd
from sklearn.naive_bayes import MultinomialNB
from training_functions import get_bow_vector


clf = MultinomialNB()
data = pd.read_excel(r'data\intents.xlsx')

X = []
for quest in data['question']:
    X.append(get_bow_vector(quest))


y = np.array(data['class'])
X = np.array(X)
clf.fit(X, y)


with open(r'models\MultinominalNB_model.pkl', 'wb') as fid:
    pickle.dump(clf, fid)
コード例 #38
0
ファイル: io.py プロジェクト: PomGGMM/PyCINRAD
 def projection(self, reso):
     r = np.arange(reso, self.drange + reso, reso)
     theta = np.array(self.data[self.tilt]['azimuth']) * deg2rad
     lonx, latx = get_coordinate(r, theta, self.elev, self.stationlon, self.stationlat)
     hght = height(r, self.elev, self.radarheight) * np.ones(theta.shape[0])[:, np.newaxis]
     return lonx, latx, hght, r, theta
コード例 #39
0
ファイル: NewDataset.py プロジェクト: ldelprete/TagLab
	def findAreas(self, target_classes):
		"""
		Find the validation and test areas with landscape metrics similar to the ones of the entire map.
		"""

		area_info = []

		map_w = self.ortho_image.width()
		map_h = self.ortho_image.height()

		area_w = int(math.sqrt(0.15) * map_w)
		area_h = int(math.sqrt(0.15) * map_h)

		landscape_number, landscape_coverage, landscape_PSCV = self.calculateMetrics([0, 0, map_w, map_h], target_classes)

		# calculate normalization factor
		numbers = []
		coverages = []
		PSCVs = []
		sn = []
		sc = []
		sP = []
		for i in range(5000):

			aspect_ratio_factor = factor = rnd.uniform(0.4, 2.5)
			w = int(area_w / aspect_ratio_factor)
			h = int(area_h * aspect_ratio_factor)
			px = rnd.randint(0, map_w - w - 1)
			py = rnd.randint(0, map_h - h - 1)

			area_bbox = [py, px, w, h]
			area_number, area_coverage, area_PSCV = self.calculateMetrics(area_bbox, target_classes)
			s1, s2, s3 = self.rangeScore(area_number, area_coverage, area_PSCV, landscape_number, landscape_coverage, landscape_PSCV)

			numbers.append(area_number)
			coverages.append(area_coverage)
			PSCVs.append(area_PSCV)

			sn.append(s1)
			sc.append(s2)
			sP.append(s3)

		sn = np.array(sn)
		sc = np.array(sc)
		sP = np.array(sP)
		self.sn_min = np.min(sn, axis=0)
		self.sn_max = np.max(sn, axis=0)
		self.sc_min = np.min(sc, axis=0)
		self.sc_max = np.max(sc, axis=0)
		self.sP_min = np.min(sP, axis=0)
		self.sP_max = np.max(sP, axis=0)

		for i in range(10000):

			aspect_ratio_factor = factor = rnd.uniform(0.4, 2.5)
			w = int(area_w / aspect_ratio_factor)
			h = int(area_h * aspect_ratio_factor)

			px = rnd.randint(0, map_w - w - 1)
			py = rnd.randint(0, map_h - h - 1)

			area_bbox = [py, px, w, h]

			area_number, area_coverage, area_PSCV = self.calculateMetrics(area_bbox, target_classes)
			scores = self.calculateNormalizedScore(area_number, area_coverage, area_PSCV, landscape_number, landscape_coverage, landscape_PSCV)

			for i, score in enumerate(scores):
				if math.isnan(score):
					scores[i] = 0.0

			aggregated_score = sum(scores) / len(scores)

			area_info.append((area_bbox, scores, aggregated_score))


		area_info.sort(key=lambda x:x[2])
		val_area = area_info[0][0]

		print("*** VALIDATION AREA ***")
		area_number, area_coverage, area_PSCV = self.calculateMetrics(val_area, target_classes)
		scoresNorm = self.calculateNormalizedScore(area_number, area_coverage, area_PSCV, landscape_number,
											   landscape_coverage, landscape_PSCV)
		lc = [value * 100.0 for value in landscape_coverage]
		ac = [value * 100.0 for value in area_coverage]

		for i, score in enumerate(scoresNorm):
			if math.isnan(score):
				scoresNorm[i] = 0.0
		print(scoresNorm)
		print("Normalized score:", sum(scoresNorm) / len(scoresNorm))
		print("Number of corals per class (landscape):", landscape_number)
		print("Coverage of corals per class (landscape):", lc)
		print("PSCV per class (landscape): ", landscape_PSCV)
		print("Number of corals per class (selected area):", area_number)
		print("Coverage of corals per class (selected area):", ac)
		print("PSCV of corals per class (selected area):", area_PSCV)

		for i in range(len(area_info)):
			intersection = self.bbox_intersection(val_area, area_info[i][0])
			if intersection < 10.0:
				test_area = area_info[i][0]
				break

		print("*** TEST AREA ***")
		area_number, area_coverage, area_PSCV = self.calculateMetrics(test_area, target_classes)
		scoresNorm = self.calculateNormalizedScore(area_number, area_coverage, area_PSCV, landscape_number,
											   landscape_coverage, landscape_PSCV)
		lc = [value * 100.0 for value in landscape_coverage]
		ac = [value * 100.0 for value in area_coverage]

		for i, score in enumerate(scoresNorm):
			if math.isnan(score):
				scoresNorm[i] = 0.0
		print(scoresNorm)
		print("Normalized score:", sum(scoresNorm) / len(scoresNorm))
		print("Number of corals per class (landscape):", landscape_number)
		print("Coverage of corals per class (landscape):", lc)
		print("PSCV per class (landscape): ", landscape_PSCV)
		print("Number of corals per class (selected area):", area_number)
		print("Coverage of corals per class (selected area):", ac)
		print("PSCV of corals per class (selected area):", area_PSCV)

		return val_area, test_area
コード例 #40
0
ファイル: trumck_diam.py プロジェクト: bbw7561135/RadioSNRs
def radiolightcurve(lmcthick, nh, tborn, ejmas, energ, nprof):
    #thick_lim = 5*(lmcthick/pc)*0.5
    tsnap_array = np.linspace(1.8e6, 2.4e6, 50)
    diam_array = np.zeros((tsnap_array.size, 9000))
    nsnrs = np.zeros(tsnap_array.size)

    #Declaring constants for Luminosity Calculation
    epsb = 0.01
    alpha = 1.0
    epse = epsb * alpha
    pp = 2.5
    compf = 4.0
    ff = 0.38
    nei = 1.14
    cl = 6.27e18
    c5 = 9.68e-24
    c6 = 8.10e-41
    dist = 50 * 1000 * 3.086e18
    nu = 1.4e9

    for xx in range(tsnap_array.size):
        tsnap = tsnap_array[xx]  #years
        #---------------------------------------------------------------#
        #---------------------------------------------------------------#
        #---------------------------------------------------------------#

        #random locations of densities and times for snrate = 2.0e3

        rad = np.zeros(nh.size)
        tim = np.zeros(
            tborn.size)  #800 is the size of lluni2 for the sedov taylor phases
        for i in range(nh.size):
            if tborn[i] > tsnap:
                break
            nt2 = 800
            lluni2 = 0.0
            mu = 1.4
            n0 = randomnh(nh[i], lmcthick)  #dimensionless, multiplied by 1cm^3
            mp = 1.67e-24
            ismrho = n0 * mu * mp  #in units of cm^-3
            e51 = energ[i]  #in units of 10^51 ergs of energy released per sne
            mej = ejmas[i]  #in units of solar masses
            if n0 == 0.0:
                print n0, nh[i]
#Characteristic scales
            tch = 423 * (e51**(-0.5)) * (mej**(5.0 / 6.0)) * (n0**(-1.0 / 3.0)
                                                              )  #years
            rch = 3.07 * (mej**(1.0 / 3.0)) * (n0**(-1.0 / 3.0))  #pcs
            vch = 7090 * (e51**0.5) * (mej**(-0.5))  #km/s

            #variables

            t_ed = tsnap - tborn[i]
            #if t_ed<0.0:
            #   continue
            if (nprof[i] == 7.0):
                tstar_st = 0.732
                t_st0 = tstar_st * tch
                tstar_ed = t_ed / tch
                vstar_ed = 0.606 * tstar_ed**(
                    -(3.0 / 7.0)) if t_ed < t_st0 else 0.569 * (
                        (1.42 * tstar_ed - 0.312)**(-3.0 / 5.0))
                rstar_ed = 1.06 * tstar_ed**(4.0 / 7.0) if t_ed < t_st0 else (
                    1.42 * tstar_ed - 0.312)**(2.0 / 5.0)
                v_ed = vstar_ed * vch  #in km/s
                r_ed = rstar_ed * rch  #in par

#--------------------------------------------------------#
            elif (nprof[i] == 12.0):
                tstar_st = 0.424
                t_st0 = tstar_st * tch
                tstar_ed = t_ed / tch
                vstar_ed = 0.545 * tstar_ed**(
                    -(3.0 / 7.0)) if t_ed < t_st0 else 0.569 * (
                        (1.42 * tstar_ed - 0.28)**(-3.0 / 5.0))
                rstar_ed = 0.953 * tstar_ed**(4.0 / 7.0) if t_ed < t_st0 else (
                    1.42 * tstar_ed - 0.28)**(2.0 / 5.0)
                v_ed = vstar_ed * vch  #in km/s
                r_ed = rstar_ed * rch  #in par

            vrad = 200.
            if (v_ed <= vrad):
                tim[i] = t_ed
                rad[i] = 0.0
                continue

            tim[i] = t_ed
            rad[i] = r_ed

        histrad = rad[np.nonzero(rad)]
        diam_array[xx, 0:histrad.size] = 2.0 * histrad
#-------------------------------------------------------------------#

#-------------------------------------------------------------------#
#-------------- LIKELIHOOD TESTING (Badenes2010) -------------------#
#-------------------------------------------------------------------#

#Saving stuff to the IpythonStuff folder for analysis
    userdoc = os.path.join(os.getcwd(), 'DataAnalysis')
    np.savetxt(os.path.join(userdoc, 'paramtst_diamhist.txt'), diam_array)

    #CALCULATE N(OBS) AND N(MODEL) PER BIN

    diam_cutoff = 80.0  #pc

    obs_bins = np.linspace(0, diam_cutoff, 6)
    n, bins = np.histogram(lmcdiams, bins=obs_bins)
    n2 = np.zeros((diam_array.shape[0], obs_bins.size - 1))
    for ind, lums in enumerate(diam_array):
        lums = lums[np.nonzero(lums)]
        n2[ind], bins2 = np.histogram(lums, bins=obs_bins)

#CALCULATING LIKELIHOOD
    avg_n = np.mean(n2, axis=0)
    likhood_temp = np.array(
        [poissonProb(n[ind], avg_n[ind]) for ind in range(n.size)])
    likhood = np.prod(likhood_temp)
    return likhood
コード例 #41
0
            tmp2[ii] = tmp2[ii] + (fast_A[lastM + jj] * fast_entity[t][jj])
        lastM = lastM + dimension
        ssum = ssum + abs(tmp1[ii] + fast_relation[r][ii] - tmp2[ii])
    fast_res = ssum
    owl_res = (owl_entity[h] + owl_relation[r] - owl_entity[t])
    return (-fast_res), (-np.linalg.norm(owl_res, 1))


file_in = open(files_src + 'entity2vec_OWL.vec', 'r')
for line in file_in:
    x = line.split()
    sub_mat = []
    for val in x:
        sub_mat.append(float(val))
    owl_entity.append(sub_mat)
owl_entity = np.array(owl_entity)

file_in = open(files_src + 'relation2vec_OWL.vec', 'r')
for line in file_in:
    x = line.split()
    sub_mat = []
    for val in x:
        sub_mat.append(float(val))
    owl_relation.append(sub_mat)
owl_relation = np.array(owl_relation)

file_in = open(files_src + 'entity2vec_250epoch.vec', 'r')
for line in file_in:
    x = line.split()
    sub_mat = []
    for val in x:
# data path
ctl_name="CTL" #os.environ["ctl_name"]
exp_name="TSIS" #os.environ["exp_name"]
ctl_pref="solar_CTL_cesm211_ETEST-f19_g17-ens_mean_2010-2019"
exp_pref="solar_TSIS_cesm211_ETEST-f19_g17-ens_mean_2010-2019"

fpath_ctl="/raid00/xianwen/data/cesm211_solar_exp/"+ctl_pref+"/climo/"
fpath_exp="/raid00/xianwen/data/cesm211_solar_exp/"+exp_pref+"/climo/"
 
years=np.arange(2010,2020) 
#months_all=["01","02","03","04","05","06","07","08","09","10","11","12"]

var_group_todo=1
# variable group 1:
varnms=np.array(["U"])
#varnms=np.array(["FSNTOA","FSNS","TS"])
var_long_name="zonal wind"
figure_name="fig6aux_vertical_heat_flux_zonal_JJA"
units=r"m $^-$$^1$"

nlat=np.int64(96)
nlev=np.int64(32)
means_yby_ctl=np.zeros((years.size,varnms.size,nlev,nlat)) #year by year mean for each variable
means_yby_exp=np.zeros((years.size,varnms.size,nlev,nlat)) #year by year mean for each variable
means_ctl=np.zeros((varnms.size,nlev,nlat)) #multi-year mean for each variable
means_exp=np.zeros((varnms.size,nlev,nlat)) #multi-year mean for each variable
diffs=np.zeros((varnms.size,nlev,nlat)) #multi-year exp-ctl diff for each variable
pvals=np.zeros((varnms.size,nlev,nlat)) #pvalues of ttest

means_yby_ps=np.zeros((years.size,nlat))
コード例 #43
0
ファイル: nimrod_single_MB.py プロジェクト: gptune/GPTune
def objectives(point):
    nodes = point['nodes']
    cores = point['cores']
    nstepmax = point['nstepmax']
    nstepmin = point['nstepmin']
    bmin = point['bmin']
    bmax = point['bmax']
    eta = point['eta']
    
    nprocmax = nodes*cores

    def budget_map(b, nmin=10, nmax=100):
        k1 = (nmax-nmin)/(bmax-bmin)
        b1 = nmin - k1
        assert k1 * bmax + b1 == nmax
        return int(k1 * b + b1) 
        # return int(45*(np.log(b)/np.log(eta)) + 10)
        # return int(10*b)
    try:
        budget = point['budget']
        nstep = budget_map(budget,nstepmin,nstepmax)
    except:
        nstep = budget_map(bmax,nstepmin,nstepmax)

    # COLPERM = point['COLPERM']
    # ROWPERM = point['ROWPERM']    
    
    COLPERM = '4'
    ROWPERM = '2'
    mx = point['mx']
    my = point['my']
    lphi = point['lphi']
    # nstep = point['nstep']
    # nprows = 2**point['nprows']
    # nproc = 2**point['nproc']
    # nproc = 32
    NSUP = point['NSUP']
    NREL = point['NREL']
    nbx = point['nbx']
    nby = point['nby']
    # nblock     = int(nprocmax/nproc)
    # npcols     = int(nproc/ nprows)
    params = ['mx',mx,'my',my,'lphi',lphi,'nstep',nstep,'ROWPERM', ROWPERM, 'COLPERM', COLPERM, 'NSUP', NSUP, 'NREL', NREL, 'nbx', nbx, 'nby', nby]

    # # INPUTDIR = os.path.abspath(__file__ + "/../superlu_dist/EXAMPLE/")

    nthreads   = 1


    """ pass some parameters through environment variables """	


    info = MPI.Info.Create()
    envstr= 'OMP_NUM_THREADS=1\n'
    envstr+= 'NREL=%d\n' %(NREL)   
    envstr+= 'NSUP=%d\n' %(NSUP)   
    info.Set('env',envstr)

    #####################################
    ####### npernode is very important, without setting it the application can be much slower
    info.Set('npernode','%d'%(cores)) # flat MPI # YL: npernode is deprecated in openmpi 4.0, but no other parameter (e.g. 'map-by') works
    #####################################

    fin = open("./nimrod_template.in","rt")
    fout = open("./nimrod.in","wt")

    for line in fin:
        #read replace the string and write to output file
        if(line.find("iopts(3)")!=-1):
            fout.write("iopts(3)= %s\n"%(ROWPERM))
        elif(line.find("iopts(4)")!=-1):
            fout.write("iopts(4)= %s\n"%(COLPERM))    
        elif(line.find("lphi")!=-1):
            fout.write("lphi= %s\n"%(lphi))    
        elif(line.find("nlayers")!=-1):
            fout.write("nlayers= %s\n"%(int(np.floor(2**lphi/3.0)+1)))  	
        elif(line.find("mx")!=-1):
            fout.write("mx= %s\n"%(2**mx))
        elif(line.find("nstep")!=-1):
            fout.write("nstep= %s\n"%(nstep))  			  
        elif(line.find("my")!=-1):
            fout.write("my= %s\n"%(2**my))   
        elif(line.find("nxbl")!=-1):
            fout.write("nxbl= %s\n"%(int(2**mx/2**nbx)))  
        elif(line.find("nybl")!=-1):
            fout.write("nybl= %s\n"%(int(2**my/2**nby)))  									  						        
        else:
            fout.write(line)
    #close input and output files
    fin.close()
    fout.close()


    nlayers=int(np.floor(2**lphi/3.0)+1)
    nproc = int(nprocmax/nlayers)*nlayers
    if(nprocmax<nlayers):
        print('nprocmax', nprocmax, 'nlayers', nlayers, 'decrease lphi!')
        raise Exception("nprocmax<nlayers")
    if(nproc>int(2**mx/2**nbx)*int(2**my/2**nby)*int(np.floor(2**lphi/3.0)+1)): # nproc <= nlayers*nxbl*nybl
        nproc = int(2**mx/2**nbx)*int(2**my/2**nby)*int(np.floor(2**lphi/3.0)+1) 

    os.system("./nimset")


    nrep=1 #3
    hist=[]
    for i in range(nrep):
        """ use MPI spawn to call the executable, and pass the other parameters and inputs through command line """
        print('exec', "./nimrod_spawn", 'nproc', nproc, 'env', 'OMP_NUM_THREADS=%d' %(nthreads), 'NSUP=%d' %(NSUP), 'NREL=%d' %(NREL))
        comm = MPI.COMM_SELF.Spawn("./nimrod_spawn", maxprocs=nproc,info=info)
        """ gather the return value using the inter-communicator, also refer to the INPUTDIR/pddrive_spawn.c to see how the return value are communicated """																	
        tmpdata = np.array([0,0,0,0,0],dtype=np.float64)
        comm.Reduce(sendbuf=None, recvbuf=[tmpdata,MPI.DOUBLE],op=MPI.MAX,root=mpi4py.MPI.ROOT) 
        comm.Disconnect()
        time.sleep(5.0)
        hist.append(tmpdata)
        print(params, ' nimrod time (trial) -- loop:', tmpdata[0],'slu: ', tmpdata[1],'factor: ', tmpdata[2], 'iter: ', tmpdata[3], 'total: ', tmpdata[4])
    
    tmpdata = min(hist, key=lambda x: x[0])
    retval = tmpdata[0]
    print(params, ' nimrod time -- loop:', tmpdata[0],'slu: ', tmpdata[1],'factor: ', tmpdata[2], 'iter: ', tmpdata[3], 'total: ', tmpdata[4])

    return retval 
コード例 #44
0
grayimages = []
filteredimages = []

np.random.shuffle(imagepaths)
for imagepath in imagepaths:
    print(imagepath)
    img = cv2.imread(imagepath).astype(np.float32)
    img = normalize_image255(img)
    gray_img = make_grayscale(img)
    filtered_img = filter_image_sobelx(gray_img)
    
    images.append(img)
    grayimages.append(gray_img)
    filteredimages.append(filtered_img)
    
images = np.array(images, dtype='float32')
grayimages = np.array(grayimages, dtype='float32')
filteredimages = np.array(filteredimages, dtype='float32')

# Expand the image dimension to conform with the shape required by keras and tensorflow, inputshape=(..., h, w, nchannels).
grayimages = np.expand_dims(grayimages, -1)
filteredimages = np.expand_dims(filteredimages, -1)

print("images shape: {}".format(images.shape))
print("grayimages shape: {}".format(grayimages.shape))
print("filteredimages shape: {}".format(filteredimages.shape))

# Visualize an arbitrary image and the filtered version of it
margin_img = np.ones(shape=(256, 10, 3))
combined_image = np.hstack((img, margin_img, np.dstack((gray_img,)*3), margin_img, np.dstack((normalize_image(filtered_img),)*3)))
コード例 #45
0
        fi[i, j] = 2 * math.pi * np.random.rand()

G = np.zeros([len(T), Num])
for i in range(len(T)):
    for j in range(Num):
        G[i, j] = Gt(W, T[i], dW, fi[j, :])
CovList1 = []
TimeInter = []
Base = G[0, :]
for i in range(len(T)):
    Comp = G[i, :]
    CovList1.append(pearsonr(Base, Comp)[0])
    TimeInter.append(T[i] - T[0])

fig1 = plt.figure()
plt.plot(np.array(TimeInter),
         np.array(CovList1),
         label='Simulate',
         linestyle='--')
plt.plot(np.array(TimeInter),
         np.exp(-np.array(TimeInter)),
         label='Target',
         linestyle='-')
plt.xlabel('Time Interval t(s)')
plt.ylabel('Correlation Coefficient R(t)')
plt.xticks(np.arange(0, T1 + 0.1, 0.1))
plt.legend(bbox_to_anchor=(1, 1), loc='upper left', ncol=1, frameon=0)
plt.grid(True)

##----------------------------KL-expansion
dT = 0.01
コード例 #46
0
class AugmentSelection:
    def __init__(self, flip=False, tint=False, degree=0., crop=(0, 0), scale=1.):
        self.flip = flip
        self.tint = tint
        self.degree = degree  # rotate
        self.crop = crop  # shift actually
        self.scale = scale

    @staticmethod  # staticmethod支持类对象或者实例对方法的调用
    def random(transform_params):
        flip = random.uniform(0., 1.) < transform_params.flip_prob
        tint = random.uniform(0., 1.) < transform_params.tint_prob
        degree = random.uniform(-1., 1.) * transform_params.max_rotate_degree

        scale = (transform_params.scale_max - transform_params.scale_min) * random.uniform(0., 1.) + \
                transform_params.scale_min \
            if random.uniform(0., 1.) < transform_params.scale_prob else 1.

        x_offset = int(random.uniform(-1., 1.) * transform_params.center_perterb_max)
        y_offset = int(random.uniform(-1., 1.) * transform_params.center_perterb_max)

        return AugmentSelection(flip, tint, degree, (x_offset, y_offset), scale)

    @staticmethod
    def unrandom():
        flip = False
        tint = False
        degree = 0.
        scale = 1.
        x_offset = 0
        y_offset = 0

        return AugmentSelection(flip, tint, degree, (x_offset, y_offset), scale)

    def affine(self, center, scale_self, config):
        # the main idea: we will do all image transformations with one affine matrix.
        # this saves lot of cpu and make code significantly shorter
        # same affine matrix could be used to transform joint coordinates afterwards
        scale_self *= (config.height / (config.height - 1))

        A = cos(self.degree / 180. * pi)
        B = sin(self.degree / 180. * pi)

        scale_size = config.transform_params.target_dist / scale_self * self.scale
        # target_dist是调整人占整个图像的比例吗?
        # It used in picture augmentation during training. Rough meaning is "height of main person on image should
        # be approximately 0.6 of the original image size". It used in this file in my code:
        # https://github.com/anatolix/keras_Realtime_Multi-Person_Pose_Estimation/blob/master/py_rmpe_server/py_rmpe_transformer.py
        # This mean we will scale picture so height of person always will be 0.6 of picture.
        # After it we apply random scaling (self.scale) from 0.6 to 1.1
         (width, height) = center
        center_x = width
        center_y = height

        # 为了处理方便,将图像变换到以原点为中心
        center2zero = np.array([[1., 0., -center_x],
                                [0., 1., -center_y],
                                [0., 0., 1.]])

        rotate = np.array([[A, B, 0],
                           [-B, A, 0],
                           [0, 0, 1.]])

        scale = np.array([[scale_size, 0, 0],
                          [0, scale_size, 0],
                          [0, 0, 1.]])

        flip = np.array([[-1 if self.flip else 1., 0., 0.],
                         [0., 1., 0.],
                         [0., 0., 1.]])

        # 最后再从原点中心变换到指定图像大小尺寸的中心上去并且进行随机平移
        center2center = np.array([[1., 0., config.width / 2 - 0.5 + self.crop[0]],
                                  [0., 1., config.height / 2 - 0.5 + self.crop[1]],
                                  [0., 0., 1.]])

        # order of combination is reversed
        # 这取决于坐标是行向量还是列向量,对应变换矩阵是左乘还是右乘,此处坐标用的是列向量形式
        combined = center2center.dot(flip).dot(scale).dot(rotate).dot(center2zero)

        return combined[0:2], scale_size
コード例 #47
0
model.compile(Adam(lr=0.1), 'categorical_crossentropy', metrics=['accuracy'])
history = model.fit(X, y_cat, verbose=1, batch_size=50, epochs=10)


#fuction for creating contours around classes
def plot_multiclass_decision_boundary(X, y, model):
    x_span = np.linspace(min(X[:, 0]) - 1, max(X[:, 0]) + 1)
    y_span = np.linspace(min(X[:, 1]) - 1, max(X[:, 1]) + 1)
    xx, yy = np.meshgrid(x_span, y_span)
    grid = np.c_[xx.ravel(), yy.ravel()]
    pred_func = model.predict_classes(grid)
    z = pred_func.reshape(xx.shape)
    plt.contourf(xx, yy, z)


#plots data with seperating contours
plot_multiclass_decision_boundary(X, y_cat, model)
plt.scatter(X[y == 0, 0], X[y == 0, 1])
plt.scatter(X[y == 1, 0], X[y == 1, 1])
plt.scatter(X[y == 2, 0], X[y == 2, 1])
plt.scatter(X[y == 3, 0], X[y == 3, 1])
plt.scatter(X[y == 4, 0], X[y == 4, 1])

#creates new point which is catagorized
x = -0.5
y = -0.5
point = np.array([[x, y]])
prediction = model.predict_classes(point)
plt.plot([x], [y], marker='o', markersize=10, color="yellow")
print("Prediction is: ", prediction)
コード例 #48
0
ファイル: nimrod_single_MB.py プロジェクト: gptune/GPTune
def main():

    
    args = parse_args()
    ntask = args.ntask
    Nloop = args.Nloop
    bmin = args.bmin
    bmax = args.bmax
    eta = args.eta

    TUNER_NAME = args.optimization
    (machine, processor, nodes, cores) = GetMachineConfiguration()
    print ("machine: " + machine + " processor: " + processor + " num_nodes: " + str(nodes) + " num_cores: " + str(cores))

    nstepmax = args.nstepmax
    nstepmin = args.nstepmin
    
    os.environ['TUNER_NAME'] = TUNER_NAME



    # Input parameters
    # ROWPERM   = Categoricalnorm (['1', '2'], transform="onehot", name="ROWPERM")
    # COLPERM   = Categoricalnorm (['2', '4'], transform="onehot", name="COLPERM")
    # nprows    = Integer     (0, 5, transform="normalize", name="nprows")
    # nproc    = Integer     (5, 6, transform="normalize", name="nproc")
    NSUP      = Integer     (30, 300, transform="normalize", name="NSUP")
    NREL      = Integer     (10, 40, transform="normalize", name="NREL")
    nbx      = Integer     (1, 3, transform="normalize", name="nbx")	
    nby      = Integer     (1, 3, transform="normalize", name="nby")	

    time   = Real        (float("-Inf") , float("Inf"), transform="normalize", name="time")

    # nstep      = Integer     (3, 15, transform="normalize", name="nstep")
    lphi      = Integer     (2, 3, transform="normalize", name="lphi")
    mx      = Integer     (5, 6, transform="normalize", name="mx")
    my      = Integer     (7, 8, transform="normalize", name="my")

    IS = Space([mx,my,lphi])
    # PS = Space([ROWPERM, COLPERM, nprows, nproc, NSUP, NREL])
    # PS = Space([ROWPERM, COLPERM, NSUP, NREL, nbx, nby])
    PS = Space([NSUP, NREL, nbx, nby])
    OS = Space([time])
    cst1 = "NSUP >= NREL"
    constraints = {"cst1" : cst1}
    models = {}
    constants={"nodes":nodes,"cores":cores,"nstepmin":nstepmin,"nstepmax":nstepmax,"bmin":bmin,"bmax":bmax,"eta":eta}

    """ Print all input and parameter samples """	
    print(IS, PS, OS, constraints, models)

    BINDIR = os.path.abspath("/project/projectdirs/m2957/liuyangz/my_research/nimrod/nimdevel_spawn/build_haswell_gnu_openmpi/bin")
    # BINDIR = os.path.abspath("/project/projectdirs/m2957/liuyangz/my_research/nimrod/nimdevel_spawn/build_knl_gnu_openmpi/bin")
    RUNDIR = os.path.abspath("/project/projectdirs/m2957/liuyangz/my_research/nimrod/nimrod_input")
    os.system("cp %s/nimrod.in ./nimrod_template.in"%(RUNDIR))
    os.system("cp %s/fluxgrid.in ."%(RUNDIR))
    os.system("cp %s/g163518.03130 ."%(RUNDIR))
    os.system("cp %s/p163518.03130 ."%(RUNDIR))
    os.system("cp %s/nimset ."%(RUNDIR))
    os.system("cp %s/nimrod ./nimrod_spawn"%(BINDIR))



    problem = TuningProblem(IS, PS, OS, objectives, constraints, None, constants=constants)
    computer = Computer(nodes = nodes, cores = cores, hosts = None)  

    """ Set and validate options """	
    options = Options()

    options['model_restarts'] = 1
    options['distributed_memory_parallelism'] = False
    options['shared_memory_parallelism'] = False
    options['objective_evaluation_parallelism'] = False
    options['objective_multisample_threads'] = 1
    options['objective_multisample_processes'] = 1
    options['objective_nprocmax'] = 1
    options['model_processes'] = 1
    # options['model_threads'] = 1
    # options['model_restart_processes'] = 1
    # options['search_multitask_processes'] = 1
    # options['search_multitask_threads'] = 1
    # options['search_threads'] = 16
    # options['mpi_comm'] = None
    # options['mpi_comm'] = mpi4py.MPI.COMM_WORLD
    options['model_class'] = 'Model_LCM' if args.LCMmodel == 'LCM' else 'Model_GPy_LCM' # Model_GPy_LCM or Model_LCM
    options['verbose'] = True
    options['sample_class'] = 'SampleLHSMDU'
    options['sample_algo'] = 'LHS-MDU'
    options.validate(computer=computer)

    options['budget_min'] = bmin
    options['budget_max'] = bmax
    options['budget_base'] = eta
    smax = int(np.floor(np.log(options['budget_max']/options['budget_min'])/np.log(options['budget_base'])))
    budgets = [options['budget_max'] /options['budget_base']**x for x in range(smax+1)]
    NSs = [int((smax+1)/(s+1))*options['budget_base']**s for s in range(smax+1)] 
    NSs_all = NSs.copy()
    budget_all = budgets.copy()
    for s in range(smax+1):
        for n in range(s):
            NSs_all.append(int(NSs[s]/options['budget_base']**(n+1)))
            budget_all.append(int(budgets[s]*options['budget_base']**(n+1)))
    Ntotal = int(sum(NSs_all) * Nloop)
    Btotal = int(np.dot(np.array(NSs_all), np.array(budget_all))/options['budget_max']*Nloop) # total number of evaluations at highest budget -- used for single-fidelity tuners
    print(f"bmin = {bmin}, bmax = {bmax}, eta = {eta}, smax = {smax}")
    print("samples in one multi-armed bandit loop, NSs_all = ", NSs_all)
    print("total number of samples: ", Ntotal)
    print("total number of evaluations at highest budget: ", Btotal)
    print(f"Sampler: {options['sample_class']}, {options['sample_algo']}")
    print()
    
    data = Data(problem)
    # giventask = [[1.0], [5.0], [10.0]]
    # giventask = [[1.0], [1.2], [1.3]]
    giventask = [[6,8,2]]
    Pdefault = [128,20,2,2]
    # t_end = args.t_end
    # giventask = [[i] for i in np.arange(1, t_end, (t_end-1)/ntask).tolist()] # 10 tasks
    # giventask = [[i] for i in np.arange(1.0, 6.0, 1.0).tolist()] # 5 tasks
    NI=len(giventask)
    assert NI == ntask # make sure number of tasks match
    
    np.set_printoptions(suppress=False, precision=4)
    if(TUNER_NAME=='GPTuneBand'):
        NS = Nloop
        data = Data(problem)
        gt = GPTune_MB(problem, computer=computer, NS=Nloop, options=options)
        (data, stats, data_hist)=gt.MB_LCM(NS = Nloop, Igiven = giventask, Pdefault=Pdefault)
        print("Tuner: ", TUNER_NAME)
        print("Sampler class: ", options['sample_class'])
        print("Model class: ", options['model_class'])
        print("stats: ", stats)
        """ Print all input and parameter samples """
        for tid in range(NI):
            print("tid: %d" % (tid))
            print("    mx:%s my:%s lphi:%s"%(data.I[tid][0],data.I[tid][1],data.I[tid][2]))
            print("    Ps ", data.P[tid])
            print("    Os ", data.O[tid].tolist())
            nth = np.argmin(data.O[tid])
            Popt = data.P[tid][nth]
            # find which arm and which sample the optimal param is from
            for arm in range(len(data_hist.P)):
                try:
                    idx = (data_hist.P[arm]).index(Popt)
                    arm_opt = arm
                except ValueError:
                    pass
            print('    Popt ', Popt, 'Oopt ', min(data.O[tid])[0], 'nth ', nth, 'nth-bandit (s, nth) = ', (arm_opt, idx))
         
    if(TUNER_NAME=='GPTune'):
        NS = Btotal
        if args.nrun > 0:
            NS = args.nrun
        NS1 = max(NS//2, 1)
        
        data.I = giventask
        data.P = [[Pdefault]] * NI

        gt = GPTune(problem, computer=computer, data=data, options=options, driverabspath=os.path.abspath(__file__))        
        """ Building MLA with the given list of tasks """
        (data, model, stats) = gt.MLA(NS=NS, NI=NI, Igiven=giventask, NS1=NS1)
        print("stats: ", stats)
        print("Sampler class: ", options['sample_class'], "Sample algo:", options['sample_algo'])
        print("Model class: ", options['model_class'])
        if options['model_class'] == 'Model_LCM' and NI > 1:
            print("Get correlation metric ... ")
            C = model[0].M.kern.get_correlation_metric()
            print("The correlation matrix C is \n", C)
        elif options['model_class'] == 'Model_GPy_LCM' and NI > 1:
            print("Get correlation metric ... ")
            C = model[0].get_correlation_metric(NI)
            print("The correlation matrix C is \n", C)

        
        """ Print all input and parameter samples """
        for tid in range(NI):
            print("tid: %d" % (tid))
            print("    mx:%s my:%s lphi:%s"%(data.I[tid][0],data.I[tid][1],data.I[tid][2]))
            print("    Ps ", data.P[tid])
            print("    Os ", data.O[tid])
            print('    Popt ', data.P[tid][np.argmin(data.O[tid])], f'Oopt  {min(data.O[tid])[0]:.3f}', 'nth ', np.argmin(data.O[tid]))
            
    if(TUNER_NAME=='opentuner'):
        NS = Btotal
        (data,stats) = OpenTuner(T=giventask, NS=NS, tp=problem, computer=computer, run_id="OpenTuner", niter=1, technique=None)
        print("stats: ", stats)

        """ Print all input and parameter samples """
        for tid in range(NI):
            print("tid: %d" % (tid))
            print("    mx:%s my:%s lphi:%s"%(data.I[tid][0],data.I[tid][1],data.I[tid][2]))
            print("    Ps ", data.P[tid])
            print("    Os ", data.O[tid])
            print('    Popt ', data.P[tid][np.argmin(data.O[tid][:NS])], 'Oopt ', min(data.O[tid][:NS])[0], 'nth ', np.argmin(data.O[tid][:NS]))
            
    if(TUNER_NAME=='hpbandster'):
        NS = Btotal
        (data,stats)=HpBandSter(T=giventask, NS=NS, tp=problem, computer=computer, run_id="HpBandSter", niter=1)
        print("stats: ", stats)
        """ Print all input and parameter samples """
        for tid in range(NI):
            print("tid: %d" % (tid))
            print("    mx:%s my:%s lphi:%s"%(data.I[tid][0],data.I[tid][1],data.I[tid][2]))
            print("    Ps ", data.P[tid])
            print("    Os ", data.O[tid])
            print('    Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid]))
            
    
    if(TUNER_NAME=='TPE'):
        NS = Ntotal
        (data,stats)=callhpbandster_bandit.HpBandSter(T=giventask, NS=NS, tp=problem, computer=computer, options=options, run_id="hpbandster_bandit", niter=1)
        print("Tuner: ", TUNER_NAME)
        print("stats: ", stats)
        """ Print all input and parameter samples """
        for tid in range(NI):
            print("tid: %d" % (tid))
            print("    mx:%s my:%s lphi:%s"%(data.I[tid][0],data.I[tid][1],data.I[tid][2]))
            print("    Ps ", data.P[tid])
            print("    Os ", data.O[tid].tolist())
            # print('    Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid]))
            max_budget = 0.
            Oopt = 99999
            Popt = None
            nth = None
            for idx, (config, out) in enumerate(zip(data.P[tid], data.O[tid].tolist())):
                for subout in out[0]:
                    budget_cur = subout[0]
                    if budget_cur > max_budget:
                        max_budget = budget_cur
                        Oopt = subout[1]
                        Popt = config
                        nth = idx
                    elif budget_cur == max_budget:
                        if subout[1] < Oopt:
                            Oopt = subout[1]
                            Popt = config
                            nth = idx                    
            print('    Popt ', Popt, 'Oopt ', Oopt, 'nth ', nth)
コード例 #49
0
ファイル: conj_basic.py プロジェクト: vene/misc-nlp
    colors = cycle('bgrcmyk')
    for k, col in zip(range(n_clusters_), colors):
        class_members = af.labels_ == k
        cluster_center = X[indices[k]]
        pl.plot(X[class_members,0], X[class_members,1], col+'.')
        pl.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
                                         markeredgecolor='k', markersize=14)
        for x in X[class_members]:
            pl.plot([cluster_center[0], x[0]], [cluster_center[1], x[1]], col) 

    pl.title('Estimated number of clusters: %d' % n_clusters_)
    pl.show()

if __name__ == '__main__':
    with open('infinitives.txt') as f:
        infinitives = np.array([unicode(inf) for inf in f])
    ta = False
    if ta:   
        ends_in_ta = np.array([inf.endswith('ta\n') for inf in infinitives])
        infinitives = infinitives[ends_in_ta]

    affinity(infinitives[:2000])    
#    plot_projection(RandomizedPCA(n_components=2), infinitives, 
#                    "PCA projection of %s$ infinitives" % ("-ta" if ta else ""))
#    plot_projection(NMF(n_components=2, tol=0.01, init="nndsvda"), infinitives, 
#                    "NMF projection of %s$ infinitives" % ("-ta" if ta else ""))
#    k_clusters(10, infinitives)
#    print infinitives[0]
#    data, vect = extract_features(infinitives, 2, True)
#    print vect.vocabulary.keys()
#    for token, idx in vect.vocabulary.items():
image = rgb2gray(imageRGB)
row, col = np.shape(image)

alpha = 15
alpha_rad = np.pi * alpha / 180

cx = col / 2
cy = row / 2

dx = cx - cx * np.cos(alpha_rad) - cy * np.sin(alpha_rad)
dy = cy + cx * np.sin(alpha_rad) - cy * np.cos(alpha_rad)

rot_m = np.matrix([[np.cos(alpha_rad), np.sin(alpha_rad), dx],\
         [-np.sin(alpha_rad), np.cos(alpha_rad), dy]])

p0 = np.round(rot_m * np.array([0, 0, 1]).reshape(3, 1),
              0).astype(int)  # x0,y0
p1 = np.round(rot_m * np.array([col, 0, 1]).reshape(3, 1),
              0).astype(int)  # x1,y0
p2 = np.round(rot_m * np.array([0, row, 1]).reshape(3, 1),
              0).astype(int)  # x0,y1
p3 = np.round(rot_m * np.array([col, row, 1]).reshape(3, 1),
              0).astype(int)  # x0,y0

p = [p0, p1, p2, p3]

i = 0

print("rotation ange...")
print(str(alpha) + "degrees")
コード例 #51
0
ファイル: qrsdet_dnn.py プロジェクト: marianux/pytest
                     default=10, 
                     type=int, 
                     help='Nombre de la base de datos')

args = parser.parse_args()

# data
train_list_fn = args.train_list
val_list_fn = args.val_list
test_list_fn = args.test_list

# model
drop_out = args.dropout

# Fit configuration
all_lr = np.array(args.learning_rates)
batch_size = args.batch_size
epochs = args.epochs


## Train
print('Build train generator ...')

if train_list_fn == '':
    raise EnvironmentError

train_samples, train_features, train_paths = get_dataset_size(train_list_fn)

train_generator = generator_class(train_paths, batch_size)

コード例 #52
0
import os

# this file is detecting faces in your images data and excising them

# face detecting network
net = cv2.dnn.readNetFromCaffe("deploy.prototxt.txt",
                               "res10_300x300_ssd_iter_140000.caffemodel")
for filename in os.listdir('/Users/kubab/PycharmProjects/ProjektInd/ZDJ/'):
    filename2 = ('/Users/kubab/PycharmProjects/ProjektInd/it/' + filename)
    img = cv2.imread(filename2)
    image = cv2.resize(img, (300, 300))
    (h, w) = image.shape[:2]
    blob = cv2.dnn.blobFromImage(cv2.resize(image, (300, 300)), 1.0,
                                 (300, 300), (104.0, 177.0, 123.0))
    # face detecting
    net.setInput(blob)
    detections = net.forward()

    for i in range(0, detections.shape[2]):
        confidence = detections[0, 0, i, 2]
        if confidence > 0.5:
            # face excision
            box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
            (startX, startY, endX, endY) = box.astype("int")
            y = startY - 10 if startY - 10 > 10 else startY + 10
            cropped = image[startY - w:endY, startX - h:endX]

    # saving file with excised face
    cv2.imwrite(filename, cropped)
    cv2.waitKey(0)
コード例 #53
0
# https://www.hackerrank.com/challenges/np-transpose-and-flatten/problem
import numpy
n = raw_input().split()
n = list(map(int, n))

l = []
for x in range(n[0]):
    arrin = raw_input().split()
    arrin = list(map(int, arrin))
    l.append(arrin)

l = numpy.array(l)
print numpy.transpose(l)
print l.flatten()
コード例 #54
0
    def Mission_Trigger(self):
        if self.arm_move == True and robot_ctr.get_robot_motion_state() == Arm_status.Isbusy:
            self.arm_move = False
        # if Arm_state_flag == Arm_status.Idle and Sent_data_flag == 1:
        if self.monitor_suc == True:
            robot_inputs_state = robot_ctr.Get_current_robot_inputs()
            if robot_inputs_state[0] == True:
                robot_ctr.Stop_motion()  #That is, it is sucked and started to place
                time.sleep(0.2)
                self.monitor_suc = False
                self.state = State.pick_obj
        # dig_inputs = robot_ctr.Get_current_digital_inputs()
        # if dig_inputs[2] == True:
        #     self.stop_flg = True
        #     return
        # elif dig_inputs[2] == False and self.stop_flg == True:
        #     self.stop_flg = False
        #     self.state = State.move2pic

        if robot_ctr.get_robot_motion_state() == Arm_status.Idle and self.arm_move == False:
            if self.state == State.move2pic:
                pos = self.pic_pos[self.pic_pos_indx]
                # self.pic_pos_indx += 1
                position = [pos[0], pos[1]-3, pos[2], pos[3], pos[4], pos[5]]
                # pos[1] -= 3
                robot_ctr.Set_ptp_speed(10)
                robot_ctr.Step_AbsPTPCmd(position)
                self.state = State.get_objinfo
                self.arm_move = True
                # time.sleep(10)

            elif self.state == State.take_pic:
                time.sleep(0.2)
                req = eye2baseRequest()
                req.ini_pose = np.array(np.identity(4)).reshape(-1)
                trans = self.hand_eye_client(req).tar_pose
                req = save_pcdRequest()
                req.curr_trans = np.array(trans)
                req.name = 'mdfk'                               
                if self.pic_pos_indx < len(self.pic_pos):
                    self.state = State.move2pic
                    req.save_mix = False
                else:
                    self.state = State.get_objinfo
                    req.save_mix = True
                self.get_pcd_client(req)

            elif self.state == State.get_objinfo:
                time.sleep(0.2)
                req = snapshotRequest()
                req.call = 0
                res = self.get_obj_client(req)
                # res = snapshotResponse()
                # res.doit = True
                # res.type = 0
                # t = tf.transformations.rotation_matrix(radians(90), [0, 1, 0], point=None)
                # res.trans = np.array([1,0,0,0,0,1,0,0,0,0,1,0.59,0,0,0,1])
                # t = np.array([0.746, -0.665, -0.011, 0.172, -0.663, -0.745, 0.066, -0.089, -0.053, -0.042, -0.997, 0.58, 0,0,0,1])
                # t = [0.108, 0.977, -0.180, -0.089, -0.993, 0.103, -0.040, -0.183, -0.020, 0.183, 0.982,0.58, 0,0,0,1]
                # t = [-0.040, -0.998, 0.033, -0.047, -0.999, 0.040, 0.012, -0.034, -0.013, -0.033, -0.999,  0.58, 0,0,0,1]
                # t = [0.906, -0.388, -0.168, -0.243, 0.334, 0.900, -0.278, -0.029, 0.259, 0.196, 0.945, 0.58, 0, 0, 0, 1]
                # t = [0.660, 0.750, -0.022, -0.282, -0.750, 0.660, 0.006, 0.146, 0.019, 0.012, 0.999, 0.58,0, 0, 0, 1]
                # res.trans = np.array(t).reshape(-1)
                # res.trans[11] = 0.58
                if res.doit == True:
                    trans = np.mat(np.asarray(res.trans)).reshape(4,4)
                    trans[2,3] = 0.59
                    if res.type == 1:
                        trans = np.array(trans).reshape(-1)                        
                    elif res.type == 2: # y 90
                        # pre_trans = np.mat([[1., 0, 0, 0],
                        #                     [0,  1, 0, 0],
                        #                     [0,  0, 1, 0],
                        #                     [0,  0, 0, 1]])
                        pre_trans = tf.transformations.euler_matrix(0, radians(90), 0, axes='sxyz')
                        # trans = trans * pre_trans
                        trans = pre_trans * trans
                        trans = np.array(trans).reshape(-1)
                    elif res.type == 3: # -90
                        # pre_trans = np.mat([[1., 0, 0, 0],
                        #                     [0,  1, 0, 0],
                        #                     [0,  0, 1, 0],
                        #                     [0,  0, 0, 1]])
                        pre_trans = tf.transformations.euler_matrix(0, radians(-90), 0, axes='sxyz')
                        # trans = trans * pre_trans
                        trans = pre_trans * trans
                        print('fucktrans', trans)
                        trans = np.array(trans).reshape(-1)

                    req = eye2baseRequest()
                    req.ini_pose = trans ##
                    self.target_obj = self.hand_eye_client(req).tar_pose
                    self.target_obj = np.mat(self.target_obj).reshape(4,4)
                    self.right_side = self.check_side(self.target_obj)
                     
                    x, y, z = np.array(np.multiply(self.target_obj[0:3, 3:], 100)).reshape(-1)
                    a, b, c = [degrees(abc) for abc in tf.transformations.euler_from_matrix(self.target_obj, axes='sxyz')]
                    self.target_obj = [x, y, z, a, b, c]
                    print('self.target_obj\n ', self.target_obj)
                    self.state = State.move2objup
                    self.pic_pos_indx = 0
                else:
                    # self.pic_pos_indx += 1
                    self.pic_pos_indx = self.pic_pos_indx  if self.pic_pos_indx < len(self.pic_pos) else 0
                    self.state = State.move2pic

            elif self.state == State.move2objup:
                req = collision_avoidRequest()
                req.ini_pose = np.array(self.target_obj).reshape(-1) ####
                req.limit = 1.5
                req.dis = 6
                res = self.CA_client(req)
                pose = np.array(res.tar_pose)
                self.dis_trans = np.mat(res.dis_trans).reshape(4,4)
                self.suc_angle = res.suc_angle
                robot_ctr.Set_ptp_speed(10)
                robot_ctr.Step_AbsPTPCmd(pose)
                req = tool_angleRequest()
                req.angle = self.suc_angle
                res = self.tool_client(req)
                self.state = State.move2obj
                self.arm_move = True


            elif self.state == State.move2obj:
                robot_ctr.Set_digital_output(1,True)
                req = collision_avoidRequest()
                req.ini_pose = np.array(self.target_obj).reshape(-1) ####
                req.limit = 2
                req.dis = -1
                res = self.CA_client(req)
                pose = np.array(res.tar_pose)
                robot_ctr.Set_ptp_speed(10)
                robot_ctr.Step_AbsPTPCmd(pose)
                self.state = State.pick_obj
                self.monitor_suc = True
                self.arm_move = True

            elif self.state == State.pick_obj:
                req = collision_avoidRequest()
                req.ini_pose = np.array(self.target_obj).reshape(-1) ####
                req.limit = 2
                req.dis = 6
                res = self.CA_client(req)
                pose = np.array(res.tar_pose)
                robot_ctr.Set_ptp_speed(10)
                robot_ctr.Step_AbsPTPCmd(pose)
                self.state = State.move2binup
                self.arm_move = True

            elif self.state == State.move2binup:
                if self.monitor_suc == True:
                    time.sleep(0.3)
                    self.monitor_suc = False
                pose = [15,15,10,180,0,0]
                robot_ctr.Set_ptp_speed(10)
                robot_ctr.Step_AbsPTPCmd(pose)
                self.state = State.move2placeup
                self.arm_move = True

            

            elif self.state == State.move2placeup:
                req = tool_angleRequest()
                req.angle = 0
                res = self.tool_client(req)
                robot_inputs_state = robot_ctr.Get_current_robot_inputs()
                if robot_inputs_state[0] == False:
                    robot_ctr.Set_digital_output(1,False)
                    self.state = State.move2pic
                    return
                pose = [7.7,-18,5.5714,180,0,0]
                robot_ctr.Set_ptp_speed(10)
                robot_ctr.Step_AbsPTPCmd(pose)
                self.state = State.move2placeup1
                self.arm_move = True

            elif self.state == State.move2placeup1:
                pose = [7.7,-18,-22,180,0,0]
                trans = tf.transformations.euler_matrix(radians(pose[3]), radians(pose[4]), radians(pose[5]), axes='sxyz')
                trans = np.mat(trans) * self.dis_trans
                pose[3:] = [degrees(abc) for abc in tf.transformations.euler_from_matrix(trans, axes='sxyz')]
                print('pose:\,n', pose)
                # robot_ctr.Set_ptp_speed(10)
                robot_ctr.Step_AbsPTPCmd(pose)
                self.state = State.place
                self.arm_move = True

            elif self.state == State.place:
                if self.right_side:
                    self.place_pos_right[0] += 6
                    if self.place_pos_right[0] > 18:
                        self.place_pos_right[0] = 18
                        self.place_pos_right[2] += 1.5
                    pose = copy.deepcopy(self.place_pos_right)  ##
                else:
                    self.place_pos_left[0] += 6
                    if self.place_pos_left[0] > 18:
                        self.place_pos_left[0] = 18
                        self.place_pos_left[2] += 1.5
                    pose = copy.deepcopy(self.place_pos_left) ##
                print('pose:\,n', pose)
                trans = tf.transformations.euler_matrix(radians(pose[3]), radians(pose[4]), radians(pose[5]), axes='sxyz')
                print(trans)
                print(self.dis_trans)
                trans = np.mat(trans) * self.dis_trans
                pose[3:] = [degrees(abc) for abc in tf.transformations.euler_from_matrix(trans, axes='sxyz')]
                print('pose:\,n', pose)
                robot_ctr.Set_ptp_speed(10)
                robot_ctr.Step_AbsPTPCmd(pose)
                self.state = State.placeup
                self.arm_move = True
                ##
            
            elif self.state == State.placeup:
                robot_ctr.Set_digital_output(1,False)
                pose = [7.7,-20,5.5714,180,0,0]
                robot_ctr.Set_ptp_speed(10)
                robot_ctr.Step_AbsPTPCmd(pose)
                self.state = State.move2pic
                self.arm_move = True
コード例 #55
0
def main(city):
    print os.getcwd()
    filenames = os.listdir('flaskexample/Data/')
    del filenames[0]

    for id_, filename in enumerate(filenames):
        if city not in filename:
            continue
    #     if id_<5:
    #         continue

        with open('flaskexample/Data/' + filename, 'r') as f:
            #rank by percent increase
            pred, cur = rank_by_increase(f)
        pc = np.column_stack((pred, cur))
        # print pc
        percGrowth = np.delete(np.divide(pc[:, 0], pc[:, 1]), 0)
        index = np.argsort(percGrowth)[::-1]
        thresholds = np.array([2e5, 5e5, 1e6])
        buys2 = np.empty((1, 1))

        try:
            #loop through thresholds, looking for good buys near those values
            for id_, value in enumerate(thresholds):
                print value

                buy = index[np.absolute(cur[index] - value) / value <
                            .2]  #20%a

                #if exists
                if buy.any():
                    #buys2 append
                    buys2 = np.append(
                        buys2, buy[np.where(~np.isnan(percGrowth[buy]))[0][0]])
            buys2 = np.delete(buys2, 0)

            print buys2
            with open('flaskexample/Data/' + filename, 'r') as f:
                ax2, city, state, lat, lng, name = plot_my_buys(
                    f, buys2.astype(int))

            print city + state

            ax2.hist(pc[~np.isnan(pc).any(axis=1)][:, 1] / 1000, 20)
            ax2.set_title(city + state + ', ' +
                          ' - Current Price Distribution')
            ax2.set_ylabel('Number')
            ax2.set_xlabel('Price Range [$1000s]')
            plt.tight_layout()

        except:
            continue
    plotstr = 'plot.png'
    plt.savefig('flaskexample/static/' + plotstr)
    myret = []
    for la, ln, na in zip(lat, lng, name):
        print na
        temp = {
            'plot_loc': plotstr,
            'lat': la,
            'lng': ln,
            'city': city,
            'state': state,
            'name': na
        }
        myret.append(temp)
    return json.dumps(myret)
コード例 #56
0
# -*- coding: utf-8 -*-
"""
Spyder Editor

This is a temporary script file.
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from imblearn.over_sampling import SMOTE

diabetes = pd.read_csv('diabetes_cleaned.csv')  # 4893 rows, 54 columns
# 765 positives, 4128 negatives

X = np.array(diabetes.loc[:, diabetes.columns != 'HbA1c_category'])  # 5267x52
y = np.array(diabetes.loc[:, diabetes.columns == 'HbA1c_category'])  # 5267x1


sm = SMOTE(random_state=2)
X_new, y_new = sm.fit_sample(X, y.ravel())
# 8866 rows, 54 columns

c = diabetes.columns.tolist()

c.remove('HbA1c_category')
diabetes = pd.DataFrame(data=X_new, columns=c)
diabetes['HbA1c_category'] = y_new

diabetes.to_csv(r'diabetes_cleaned_balanced.csv', index=False)

# New dataset: 8866x54
コード例 #57
0
rospy.init_node('robot_vs_webcam')
listener = tf.TransformListener()
for i in xrange(15):
    pause()
    apriltag_pose = lookupTransformList('/map', '/apriltag0_vicon', listener)
    pts_vicon.append(apriltag_pose[0:3])

    apriltag_detections = ROS_Wait_For_Msg('/apriltags/detections',
                                           AprilTagDetections).getmsg()
    for det in apriltag_detections.detections:
        if det.id == 0:
            pts_apriltag.append(pose2list(det.pose)[0:3])
            break

(R, t, rmse) = rigid_transform_3D(np.array(pts_apriltag), np.array(pts_vicon))

data = {}
data["pts_apriltag"] = pts_apriltag
data["pts_vicon"] = pts_vicon
with open('data.txt', 'w') as outfile:
    json.dump(data, outfile)

Rh = tfm.identity_matrix()
Rh[np.ix_([0, 1, 2], [0, 1, 2])] = R
quat = tfm.quaternion_from_matrix(Rh)

print 'webcam_T_robot:', " ".join('%.8e' % x
                                  for x in (t.tolist() + quat.tolist()))
print 'rmse:', rmse
コード例 #58
0
ファイル: exp1.py プロジェクト: rAm1n/social-relation
def validate(val_loader, model, criterion):
    batch_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()

    # switch to evaluate mode
    model.eval()
    end = time.time()

    correct = list()
    count = list()

    for i, (img, img_1, img_2, target) in enumerate(val_loader):
        # measure data loading time

        target = target.cuda(async=True)
        img_var = torch.autograd.Variable(img, volatile=True).cuda()
        img_1_var = torch.autograd.Variable(img_1, volatile=True).cuda()
        img_2_var = torch.autograd.Variable(img_2, volatile=True).cuda()
        target_var = torch.autograd.Variable(target, volatile=True)

        # compute output
        output = model(img_var, img_1_var, img_2_var)
        loss = criterion(output, target_var)

        # measure accuracy and record loss
        prec1, prec5, correct_batch, count_batch  = accuracy(output.data, target, \
         topk=(1, 5), num_cls=val_loader.dataset.config['num_class'])

        correct.append(correct_batch)
        count.append(count_batch)

        losses.update(loss.data[0], img.size(0))
        top1.update(prec1[0], img.size(0))
        top5.update(prec5[0], img.size(0))

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if i % args.print_freq == 0:
            logging.info('Test: [{0}/{1}]\t'
                         'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                         'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                         'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                         'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
                             i,
                             len(val_loader),
                             batch_time=batch_time,
                             loss=losses,
                             top1=top1,
                             top5=top5))

    logging.info(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'.format(
        top1=top1, top5=top5))

    correct = np.array(correct).sum(0)
    count = np.array(count).sum(0).astype(np.int32)
    pres = correct / count

    for idx, item in enumerate(pres):
        msg = str(idx) + ' ' + str(count[idx]) + ' ' + str(item)
        logging.info(msg)

    return top1.avg
コード例 #59
0
import numpy as np
import json
import pyemblib
import random

embedding_file = "top_10000_emb.txt"

include = set(['red', 'black', 'green', 'orange', 'apple', 'king', 'queen', 'man', 'woman', 'moscow', 'russia', 'tokyo', 'japan'])

embeddings = pyemblib.read(embedding_file, mode=pyemblib.Mode.Text)
keys = list(embeddings.keys())
values = list(embeddings.values())

new_vals, new_keys = [], []

for i in range(len(keys)):
  if keys[i] in include or random.random() < 0.05: 
    new_keys.append(keys[i])
    new_vals.append(values[i])

values = np.array(new_vals)
keys = new_keys

mds = MDS(n_components=3)
realspace = mds.fit_transform(values)

f = open('3dembeddings.csv', 'w')
for i in range(len(keys)):
  f.write(keys[i] + ',' + str(realspace[i][0]) + ',' + str(realspace[i][1]) + ',' + str(realspace[i][2]) + '\n')
f.close()
コード例 #60
0
import json
import numpy as np
import scipy
import setuptools
import matplotlib
import sklearn.datasets as datasets
import sklearn.svm as svm
import pickle
from library import trainingSet

testSet = trainingSet('Data/test1.bin')

clf = pickle.load(model1)

print(clf2.predict(np.array([73, 20, 14, 13, 7, 100, 0, 0, 0])))  # A
print(clf2.predict(np.array([17, 72, 96, 96, 97, 0, 0, 0, 0])))  # B

#for data in testSet.data: