예제 #1
0
파일: base.py 프로젝트: szhorizon/orange3
    def backmap_value(self, value, mapped_probs, n_values, backmappers):
        if backmappers is None:
            return value

        if value.ndim == 2:  # For multitarget, recursive call by columns
            new_value = np.zeros(value.shape)
            for i, n_value, backmapper in zip(
                    itertools.count(), n_values, backmappers):
                new_value[:, i] = self.backmap_value(
                    value[:, i], mapped_probs[:, i, :], [n_value], [backmapper])
            return new_value

        backmapper = backmappers[0]
        if backmapper is None:
            return value

        value = backmapper(value)
        nans = np.isnan(value)
        if not np.any(nans):
            return value
        if mapped_probs is not None:
            value[nans] = np.argmax(mapped_probs[nans], axis=1)
        else:
            value[nans] = np.RandomState(0).choice(
                backmapper(np.arange(0, n_values[0] - 1))
                (np.sum(nans), ))
        return value
    def GenerateSpins(self, NSpins):
        rng = np.RandomState()

        self.fSpinFreq = np.zeros(NSpins)
        self.fSpinSignal = np.zeros(NSpins)

        rand_r = np.sqrt(rng.uniform(0,1, size=NSpins))*self.fSampleR
        rand_phi = 2*np.pi*rng.uniform(0,1, size=NSpins)
        rand_z = self.fSampleL*rng.uniform(0,1, size=NSpins)-self.fSampleL/2.
        X = rand_r*np.cos(rand_phi)
        Y = rand_r*np.sin(rand_phi)
        ZRel = rand_z - self.fProbeCenter[2]
        self.fSpinFreq = self.fBFieldShape[0] + self.fBFieldShape[1]*X + self.fBFieldShape[2]*Y+ self.fBFieldShape[3]*ZRel + self.fBFieldShape[4]*X*X + self.fBFieldShape[5]*Y*Y+ self.fBFieldShape[6]*ZRel*ZRel;
        idx = np.floor(rand_r/self.fGridSize)*self.fSampleDimL+np.floor((self.fSampleL/2 + rand_z)/self.fGridSize)
        B_Field = np.sqrt((self.fB_coil_L[idx])**2 + (self.fB_coil_T[idx]*cos(rand_phi))**2)
        # //Signal strength should be proportional to B*sin(B). Pi/2 pulse efficiency and the induced signal amplitude. B is normalized to the center B Field, and we assume that for the center B Field the pi/2 pulse length is perfect
        self.fSpinSignal = B_Field*np.sin(self.fPulseEff*B_Field*np.pi/2.0)

        FreqMin = np.min(self.fSpinFreq)
        FreqMax = np.max(self.fSpinFreq)
        df = (FreqMax-FreqMin)/float(self.fNFreq)

        self.fWeightFunction = np.zeros(self.fNFreq)

        index = np.floor((self.fSpinFreq-FreqMin)/df);
        index[index>=self.fNFreq] = self.fNFreq-1
        for i, idx in enumerate(index):
          self.fWeightFunction[idx] += self.fSpinSignal[i]

        self.fFreqBins = FreqMin+df/2.0+df*np.arange(0, self.fNFreq)
        self.fAverageFrequency = np.sum(self.fFreqBins*self.fWeightFunction) / np.sum(self.fWeightFunction)
        # Normalize
        self.fWeightFunction /= np.sum(self.fWeightFunction)
예제 #3
0
def _make_n_folds(full_data,
                  data_splitter,
                  nfold,
                  params,
                  seed,
                  fpreproc=None,
                  stratified=False,
                  shuffle=True):
    """
    Make an n-fold list of Booster from random indices.
    """
    num_data = full_data.construct().num_data()
    if data_splitter is not None:
        if not hasattr(data_splitter, 'split'):
            raise AttributeError("data_splitter has no method 'split'")
        folds = data_splitter.split(np.arange(num_data))
    elif stratified:
        if not SKLEARN_INSTALLED:
            raise LightGBMError('Scikit-learn is required for stratified cv')
        sfk = LGBMStratifiedKFold(n_splits=nfold,
                                  shuffle=shuffle,
                                  random_state=seed)
        folds = sfk.split(X=np.zeros(num_data), y=full_data.get_label())
    else:
        if shuffle:
            randidx = np.RandomState(seed).random.permutation(num_data)
        else:
            randidx = np.arange(num_data)
        kstep = int(num_data / nfold)
        test_id = [randidx[i:i + kstep] for i in range_(0, num_data, kstep)]
        train_id = [
            np.concatenate([test_id[i] for i in range_(nfold) if k != i])
            for k in range_(nfold)
        ]
        folds = zip(train_id, test_id)

    ret = CVBooster()
    for train_idx, test_idx in folds:
        train_set = full_data.subset(train_idx)
        valid_set = full_data.subset(test_idx)
        # run preprocessing on the data set if needed
        if fpreproc is not None:
            train_set, valid_set, tparam = fpreproc(train_set, valid_set,
                                                    params.copy())
        else:
            tparam = params
        cvbooster = Booster(tparam, train_set)
        cvbooster.add_valid(valid_set, 'valid')
        ret.append(cvbooster)
    return ret
    def UpdateProbeCenter(self):
        # Use Monte Carlo Method
        NSpins = 80000000

        rng = np.RandomState(0)
        R = np.sqrt(rng.uniform(0,1,size=NSpins))*self.fSampleR
        Phi = rng.uniform(0,2*np.pi,size=NSpins)
        Z = self.fSampleL*rng.uniform(0,1,size=NSpins)-self.fSampleL/2.

        IndexZ = np.floor((self.fSampleL/2. + Z)/self.fGridSize)
        IndexR = np.floor(R/self.fGridSize)

        avg_z = 0.0
        for r_idx, z_idx, phi, z in zip(IndexR, IndexZ, Phi, Z):
          B_Field = np.sqrt((fB_coil_L[r_idx,z_idx])**2 + (fB_coil_T[r_idx,z_idx]*np.cos(phi))**2)
          Signal = B_Field*np.sin(self.fPulseEff*B_Field*np.pi/2.0)
          avg_z += z*Signal
        avg_z /= float(NSpins)
        self.fProbeCenter[2] = avg_z
예제 #5
0
def init_rand(model, data, random_state=None, rescale=True):
    """
    Random initialization with appropriate scaling.
    """

    if isinstance(random_state, npr.RandomState):
        rs = random_state
    else:
        rs = np.RandomState(random_state)

    n_features, n_time = data.shape

    W = rs.rand(model.maxlag, n_features, model.n_components)
    H = rs.rand(model.n_components, n_time)

    if rescale:
        # TODO add brief note/reference here.
        est = cmf_predict(W, H)
        alpha = np.dot(data.ravel(), est.ravel()) / np.linalg.norm(est)**2
        W *= alpha
        H *= alpha

    return W, H
예제 #6
0
    def __init__(self,
                 f,
                 xbounds,
                 chunk_size=1000,
                 xtf=None,
                 store_pts=False,
                 prng=None,
                 args=tuple(),
                 kwargs=dict()):
        super(MCSimpleInt, self).__init__()

        # Collect user-provided information.
        self.f = f
        self.xtf = xtf
        self.store_pts = store_pts
        self.args = args
        self.kwargs = kwargs

        # Make xbounds into an array.
        xbounds_list = []
        for i in range(len(xbounds)):
            xbounds_list.append([xbounds[i][0], xbounds[i][1]])
        self.xbounds = np.array(xbounds_list)

        # Calculate transformed bounds if needed.
        if self.xtf != None:
            xbounds_tf = self.xtf(self.xbounds)
        else:
            xbounds_tf = self.xbounds

        # TODO: Check to make sure that every max is greater than every min.
        for i in range(len(self.xbounds)):
            if xbounds_tf[i, 1] > xbounds_tf[i, 0]:
                continue
            else:
                raise ValueError('Upper bounds must be strictly greater' +
                                 ' than lower bounds.')

        # Calculate volume of integration region.
        dim_lens = []
        for i in range(len(xbounds_tf)):
            length = xbounds_tf[i, 1] - xbounds_tf[i, 0]
            dim_lens.append(length)
        self.volume = np.cumprod(dim_lens)[-1]

        # Initialize some properties of the integrator.
        self.npts = 0  # Total number of points tested
        self.f_sum = 0  # Sum of f values
        self.fsq_sum = 0  # Sum of squares of f values

        if store_pts:
            self.eval_list = []  # List of all evaluations
        else:
            self.eval_list = None

        # Handle prng.
        if type(prng) == int:
            self.prng = np.RandomState(prng)
        elif type(prng) == np.random.RandomState:
            self.prng = prng
        else:
            self.prng = np.random.RandomState()
예제 #7
0
def evaluate(lr=0.05, n_epochs=200, dataset='olivettifaces.gif', nkerns=[5,10], batch_size=40):
    # 随机数生成器,用于初始化参数
    rng = np.RandomState(23455)
예제 #8
0
y_train_odd = (y_train % 2 == 1)  # 儲存奇數
y_multilabel = np.c_[y_train_large, y_train_odd]

knn_clf = KNeighborsClassifier()  # default: n_neighbors = 5
knn_clf.fit(X_train, y_multilabel)
# print(knn_clf.predict([some_digit])) # [[False False]] 2 既非大於等於7也非奇數
# 計算 f1_score
y_train_knn_pred = cvp(knn_clf, X_train, y_train, cv=3)
print(f1_score(y_train, y_train_knn_pred, average="macro"))

# 多輸出分類
# 多標籤分類的泛化, 其標籤也可以是多種類別(兩個以上的值)
# 由以下例子說明: 構建一個系統去除圖片的雜訊
# 注意: 此分類器的輸出是多個標籤(一個pixel 一個label, 像素強度範圍 0~255)
# 首先, 先把乾淨的圖片加入雜訊並創建訓練集和測試集
rnd = np.RandomState(42)
noise_train = rnd.randint(0, 100, (len(X_train)), 784)
noise_test = rnd.randint(0, 100, (len(X_test)), 784)
X_train_mod = X_train + noise_train
X_test_mod = X_test + noise_test
y_train_mod = X_train
y_test_mod = X_test

# 隨機拿一張圖片視覺化
fig, ax = plt.subplots(1, 2, figsize=(16, 6))
ax[0].imshow(X_train_mod[36001])
ax[1].imshow(y_train_mod[36001])
ax[0].title("Noise")
ax[1].title("Original")
plt.show()