Пример #1
0
def prog10(x: pd.DataFrame, sigma: float, alpha1: float, beta1: float, alpha2: float, beta2: float, cutoff: float, low=0.01, high=1.00, step=0.01):
    k1 = np.arange(low, high, step)
    # k1 = np.arange(0.01, 1.00, 0.01)
    g1 = ( beta1**alpha1 ) / gamma(alpha1) * (k1**(alpha1 - 1))*np.exp(-beta1*k1)
    g1 = g1 / np.sum(g1)

    k2 = np.arange(1.1, 40.1, 0.1)
    g2 = (beta2 ** alpha2) / gamma(alpha2) * (k2 ** (alpha2 - 1)) * np.exp(-beta2 * k2)
    g2 = g2 / np.sum(g2)

    k = np.c_(k1, k2)
    g = np.c_(g1, g2) / 2
    _, m = k.shape

    z = ( x - x[0] ) / sigma
    _, length = z.shape

    w = 10**-10 * np.cumsum(np.ones(length))
    t1 = np.arange(length) + 1
    t2 = np.ones(length)

    N1 = np.outer(t1, t2)
    N2 = N1.T
    N = np.tril( N1 - N2 + 1 )

    # r1 = prog06.calculate_r(z, t2, length, eta1, N)
    # r2 = prog06.calculate_r(z, t2, length, eta2, N)
    r = g1 * calculate_r(z, t2, length, alpha1, beta1, N, N1, N2, m)
    c = np.cumsum(np.maximum(r, cutoff) - cutoff) - w
    d = c.min()
    I = np.argmin(c)
    N = I + 1
    nummtLENGTH = np.maximum(N-length, 0)

    return N, nummtLENGTH
Пример #2
0
def PyWavelet(img,level=1, wavlet="db1", mode="sym"):
  #画像ファイルをグレースケールへ変換
  gray = cv2.cvtColor(im, cv2.COLOR_RGB2GRAY)
  res = []
  tmp = gray.astype(np.uint16)
  res_pywt = pywt.wavedec2(gray,wavlet, level=level, mode=mode)
  print(res_pywt)
  res_kinji, (res_x, res_y, res_xy) = res_pywt
#マージ
  np.c_(tmp,res_x)
  np.r_(tmp,res_y)
  np.r_(tmp,res_xy)
  print("after merge")
  print(tmp)
  return tmp
Пример #3
0
def square_transform(X):
    """
    Implement the square transform including the bias variable 1 in the first column i.e 
    phi(x_1,x_2) = (1, x_1^2, x_2^2). 
    We will prepend a column of ones for the bias variable
    
    np.c_, np.transpose may be useful
    To raise a number to second power use operator ** i.e. 3**2 is 9
    
    As an example of what the function should do:
    >>> X = np.array([[1,2],[3,4]])
    >>> square_transform(X)
    array([[  1.,   1.,   4.],
       [  1.,   9.,  16.]])

    Args:
      X: np.array of shape (n, 2)

    Returns
      Xt: np.array of shape(n, 3) 
    """
    # Insert code here to transform the data - aim to make a vectorized solution!
    Xt = X
    ### YOUR CODE HERE 2-4 lines
    Xt = np.c_(np.ones(2), X)
    Xt = np.power(Xt, 2)
    Xt = np.transpose(Xt)
    ### END CODE

    return Xt
 def predict(self,X):
     if self.fit_intercept:
         X = np.c_(np.ones(X.shape[0]),X)
     y_pred = self._sigmoid(np.dot(X, self.w))
     np.putmask(y_pred, y_pred >= 0.5, 1.0)
     np.putmask(y_pred, y_pred < 0.5, 0.0)
     return y_pred
Пример #5
0
    def execute(self):
        if self.x_values is None or self.y_pred is None:
            return

        # Create color maps for 3-class classification problem, as with iris
        cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
        cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])

        X = [[_a, _b] for _a, _b, _, _ in self.x_values
             ]  # we only take the first two features. We could
        # avoid this ugly slicing by using a two-dim dataset
        X = np.array(X)
        x_min, x_max = X[:, 0].min() - .1, X[:, 0].max() + .1
        y_min, y_max = X[:, 1].min() - .1, X[:, 1].max() + .1
        xx, yy = np.meshgrid(np.linspace(x_min, x_max, 100),
                             np.linspace(y_min, y_max, 100))

        Z = np.c_(self.y_pred)

        Z = Z.reshape(xx.shape)
        plt.figure()
        plt.pcolormesh(xx, yy, Z, cmap=cmap_light)

        y = [self.class_no[l] for l in self.class_no]
        # Plot also the training points
        plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
        plt.xlabel('sepal length (cm)')
        plt.ylabel('sepal width (cm)')
        plt.axis('tight')
Пример #6
0
    def NoiseCovMat(self, X_ins, T):
        # プロセスノイズの共分散行列を計算
        # 各成分の式展開は『搬送波位相DGPS/INS 複合航法アルゴリズムの開発』を参考にした.URL:https://repository.exst.jaxa.jp/dspace/handle/a-is/32573

        qbi = np.array([[X_ins[6, 0]], [X_ins[7, 0]], [X_ins[8, 0]],
                        [X_ins[9, 0]]])

        q1 = qbi[0, 0]
        q2 = qbi[1, 0]
        q3 = qbi[2, 0]
        q4 = qbi[3, 0]

        Q = 0.5 * np.array([[q4, -q3, q2], [q3, q4, -q1], [-q2, q1, q4],
                            [-q1, -q2, -q3]])

        Cbi = self.Quate_ToDCM(qbi)

        # 時間積分時の近似値(指数で与えてもほとんど問題ないと思われる.参考論文は90年代のもの)
        T1g = T * (1 - T / self.Tau_g + 2 / 3 * (T / self.Tau_g)**2)
        T1a = T * (1 - T / self.Tau_a + 2 / 3 * (T / self.Tau_a)**2)
        T2g = T**2 * (1 / 2 - 1 / 3 * T / self.Tau_g + 1 / 6 *
                      (T / self.Tau_g)**2)
        T2a = T**2 * (1 / 2 - 1 / 3 * T / self.Tau_a + 1 / 6 *
                      (T / self.Tau_a)**2)
        T3 = 1 / 3 * T**3

        I_33 = np.eye(3)
        Ze33 = np.zeros(3, 3)
        Ze34 = np.zeros(3, 4)
        Ze43 = np.zeros(4, 3)

        Q_22 = T3 * self.Wno_a * Cbi.T
        Q_25 = T2a * self.Wno_a * Cbi
        Q_33 = T3 * self.Wno_g * np.dot(Q, Q.T)
        Q_34 = T2g * self.Wno_g * Q
        Q_43 = T2g * self.Wno_g * Q.T
        Q_44 = T1g * self.Wno_g * I_33
        Q_52 = T2a * self.Wno_a * Cbi.T
        Q_55 = T1a * self.Wno_a * I_33

        Q_noise = np.r_(np.c_(Ze33, Ze33, Ze34, Ze33, Ze33),
                        np.c_(Ze33, Q_22, Ze34, Ze33, Q_25),
                        np.c_(Ze43, Ze43, Q_33, Q_34, Ze43),
                        np.c_(Ze33, Ze33, Q_43, Q_44, Ze33),
                        np.c_(Ze33, Q_52, Ze34, Ze33, Q_55))

        return Q_noise
Пример #7
0
 def gen_x(self,d,n,F=False):
     """
     Generates x data. if F is true the data will be random distibuted in x dir."""
     if F:
         self.x=np.sort(np.random.rand(n)*d).reshape(-1,1)
     else:
         self.x=np.linspace(0.1,d,n).reshape(-1,1)
     self.xr=np.c_(np.ones(self.x.shape),self.x)
Пример #8
0
def animate(frame, population=population, toolbox=toolbox):
    offspring = algorithms.varAnd(population, toolbox, cxpb=0.1, mutpb=0)
    for ind in offspring:
        ind.fitshare.values = toolbox.evalfitsh(ind, offspring)
    population = toolbox.select(offspring, k=len(population))
    xdata=np.asarray([NormBinSeqToNum(i) for i in population])
    ydata=np.asarray([FitnessFunction(i)[0] for i in population])
    scatt.set_array(np.c_(xdata,ydata))
def load_datamulti(filename):
    matrixfile = numpy.loadtxt(filename, dtype=numpy.str, delimiter=',')
    X1 = matrixfile[:, 0].astype(numpy.float)
    X2 = matrixfile[:, 1].astype(numpy.float)
    #X = numpy.c_(X1[:, numpy.newaxis], X2[:,numpy.newaxis])
    X = numpy.c_((X1, X2))
    #onesmatrix = numpy.ones([X.shape[0], 1])
    #tmp = X.astype(numpy.float)
    #X = numpy.c_[onesmatrix, tmp]
    y = matrixfile[:, 2].astype(numpy.float)
    y = y[:, numpy.newaxis]
    return X, y
Пример #10
0
    def fit(self, X, y, lr, tol=1e-7, itermax=1e5):

        if self.fit_bias:
            X = np.c_(np.ones(X.shape[0]), X)

        self.W = np.random.rand(X.shape[1])

        prev_loss = np.inf

        for _ in range(itermax):
            y_pred = sigmoid(np.dot(self.W, X))
            loss = self._lossfun(X, y, y_pred)
            if prev_loss - loss < tol:
                return
            prev_loss = loss
            self.W -= lr * self._grad(X, y, y_pred)
Пример #11
0
    def fit(self, X, y):
        '''fit model

         Parameters
        ----------
        X:  numpy.ndarray
            train input feature
        y:  numpy.ndarray {0,1}
            train output
        '''
        y[y == 0] = -1
        z, self.Vm, self.shift = self.encode(X, y)
        X_bias = np.c_(np.ones(X.shape[0]), X)

        #通过正规方程求解系数
        self.w = (np.linalg.inv(X_bias.T * X_bias + self.alpha *
                                np.eye(X_bias.shape[1])) * X_bias.T) * y
Пример #12
0
def mapFeature(X1, X2):
    # MAPFEATURE Feature mapping function to polynomial features
    #
    # MAPFEATURE(X1, X2) maps the two input features
    # to quadratic features used in the regularization exercise.
    #
    # Returns a new feature array with more features, comprising of
    # X1, X2, X1.^2, X2.^2, X1*X2, X1*X2.^2, etc..
    #
    # Inputs X1, X2 must be the same size
    #

    degree = 6
    out = np.ones(np.size(X1)).reshape(-1, 1)
    for i in range(1, degree + 1):
        for j in range(i + 1):
            out = np.c_(out, np.power(X1, i - j) * np.power(X2, j))

    return out
Пример #13
0
    def predict(self, X):
        '''encode y use svd

        Parameters
       ----------
       X:   numpy.ndarray
            train input feature :code:`(n_samples, n_features)`

       Returns
       -------
       y_pred:      numpy.ndarray {0, 1}
                    predict of y shape :code:`(n_samples, n_traget)`
       y_pred_prob: numpy.ndarray [0, 1]
                    predict probility of y  shape :code:`(n_features, n_traget)`
        '''
        z_pred = self.w * np.c_(np.ones(X.shape(0)), X)
        y_real = z_pred * self.Vm.T + self.shift
        y_pred = np.zeros(y_real.shape)
        y_pred[y_real > 0] = 1
        y_pred[y_real <= 0] = 0
        y_pred_prob = minmax_scale(y_real, axis=1)
        return y_pred, y_pred_prob
Пример #14
0
    sigma = 10
    beta = 8 / 3
    rho = 28
    x, y, z = r.T
    return np.block([[sigma*(y-x)],\
     [x*(rho-z)-(y)],\
     [x*z - beta*z]]).T


n = 4000
t = 0
dt = 0.01
f[0, :] = 1.0
for i in range(n - 1):
    f1 = dt * func1(t, f[i, :])
    f2 = dt * func1(t + (dt / 2), f[i, :] + (f1 / 2))
    f3 = dt * func1(t + (dt / 2), f[i, :] + (f2 / 2))
    f4 = dt * func1(t + dt, f[i, :] + f3)
    f[i + 1, :] = f[i, :] + (f1 / 6) + (f2 / 3) + (f3 / 3) + (f4 / 6)

fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot(rs[:, 0], rs[:, 1], rs[:, 2], c='r')
ax.plot(f[:, 0], f[:, 1], f[:, 2], c='b')
plt.show()

t = np.arange(0, 40, 0.01)
rs = odeint(func, [1, 1, 1], np.arange(0, 40, 0.01))

np.savefig("lorenz.dat", np.c_(f[:, 0], f[:, 1], f[:, 2]))
Пример #15
0
print(clf.score(X_test, Y_test))
y_hat = clf.predict(X_test)
show_accuracy(y_hat, Y_test, 'train data accuracy')
print(clf.score(X_cv, Y_cv))
y_hat = clf.predict(X_cv)
show_accuracy(y_hat, Y_cv, 'test data accuracy')
print(grid.best_params_[1])
"""
print('Start for confusionmat: ')
confusionmat = trainmodelSVC(X_test, Y_test, X_cv, Y_cv)
print(confusionmat)

teamwin1314 = kmeanofwin(seasonArray, seasonArray_PCA)

print('The result of code')
seasonArray_num = np.c_(seasonArray[:,6], seasonArray_num)
floatsea = seasonArray_num.astype(float)
floatsea = sklearn.preprocessing.normalize(floatsea, axis = 0)
pca = PCA(n_components = 1)
PCA2 = pca.fit_reansform(floatsea[:, :])
floatper = seasonArray[:, 7].astype(float)
plt.figure(6)
plt.clf()
z = np.polyfit(floatper, PCA2, 1)
plt.plot(floatper, PCA2, 'bo')
plt.title('The result of every players for PER vs PCA ', fontsize = 18)
plt.xlabel("PER")
plt.ylabel('PCA')


Пример #16
0
    G_null = np.identity(G.shape[1]) - np.linalg.pinv(G).dot(G)
    return G_null


def check_linear_dependent(X):
    for i in range(X.shape[0]):
        print(np.ravel(np.array(X[i] / X[0])))


# m = 1.0
# p_g = np.array([0.0, 0.4])

# G_base = np.linalg.qr(np.linalg.pinv(G))[0].transpose()
# G_null_base = np.linalg.qr(G_null)[0].transpose()[0]

np.c_(np.cross(np.array([1, 2, 3]), np.identity(3)), np.identity(3))

p1 = np.array([-1.0, 0.0, 3.0])
p2 = np.array([2.0, 0.0, 3.0])
G = np.r_[np.c_[np.identity(3), np.identity(3)],
          np.c_[np.cross(p1, np.identity(3)),
                np.cross(p2, np.identity(3))]]

G = np.r_[np.c_[np.identity(3),
                np.zeros((3, 3)),
                np.identity(3),
                np.zeros((3, 3))], np.c_[np.cross(p1, np.identity(3)),
                                         np.identity(3),
                                         np.cross(p2, np.identity(3)),
                                         np.identity(3)]]
Пример #17
0
        cost_iter.append([iter, J])
        # print "iter %s | J: %.3f" % (iter, J)
        gradient = np.dot(x.T, loss) / m
        theta -= alpha * gradient
    return theta, cost_iter


# generate sample data using scikit-learn
x, y = make_regression(n_samples=100,
                       n_features=1,
                       n_informative=1,
                       random_state=0,
                       noise=35)
m, n = np.shape(x)
# add column as x0
x = np.c_(np.ones(m), x)
print np.shape(x)

alpha = 0.01
theta, cost_iter = gradient_descent(alpha, x, y, 1000)

# plot the result
for i in range(x.shape[1]):
    y_pred = theta[0] + theta[1] * x
plt.plot(x[:, 1], y, 'o')
plt.plot(x, y_pred, 'k-')

# plot cost trend
plt.plot(cost_iter[:, 0], cost_iter[:, 1])
plt.xlabel("Iteration Number")
plt.ylabel("Cost")
Пример #18
0
len(Z)
len(Z[0])
Z
plt.contourf(xx,yy,Z,cmap=plt.cm.Paired)
Z=Z.reshape(xx.shape)
xx.shape
Z
xx,yy=np.meshgrid(np.arange(x_min,x_max,.02),np.arange(y_min,y_max,.02))
xx
Z=model.predict(np.c_[xx.ravel(),yy.ravel()])
Z=model.predict([xx.ravel(),yy.ravel()])
Z=model.predict(np.c_[xx.ravel(),yy.ravel()])
Z=model.predict(np.c_[xx.ravel(),yy.ravel()])
xx.ravel()
xx.ravel(),yy.ravel()
np.c_(xx.ravel(),yy.ravel())
np.c_[xx.ravel(),yy.ravel()]
[xx.ravel(),yy.ravel()]
Z=model.predict([xx.ravel(),yy.ravel()])
xx.ravel()
len(xx.ravel())
Z=model.predict([xx.ravel(),yy.ravel()])
clf=DecisionTreeClassifier()
clf.fit(df,y)
output=clf.predict(test)
accuracy_score(y_true,output)
clf
X
clf=DecisionTreeClassifier()
clf.fit(X,y)
output=clf.predict(test)
Пример #19
0
# In[39]:

q

# In[40]:

p

# In[41]:

c_(p, q)

# In[42]:

np.c_(p, q)

# In[43]:

get_ipython().run_line_magic('pinfo', 'np.c_')

# In[45]:

np.c_[p, q].shape

# In[1]:

from sklearn.ensemble import RandomForestClassifier

p = RandomForestClassifier()
Пример #20
0
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
                     np.arange(y_min, y_max, plot_step))
plot_step=.02
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
                     np.arange(y_min, y_max, plot_step))
xx
yy
clf
clf.predict(np.c_[xx.ravel() yy.ravel()])
clf.predict(np.c_[xx.ravel(), yy.ravel()])
clf.predict(np.c_[xx.ravel(), yy.ravel()])
np.c_
np.c_([xx.ravel() yy.ravel()],2)
np.c_([xx.ravel(), yy.ravel()],2)
np.c_[[xx.ravel(), yy.ravel()],2]
get_ipython().magic(u'pinfo2 c_')
get_ipython().magic(u'pinfo2 np.c_')
np.r_[[xx.ravel(), yy.ravel()],2]
np.r_[xx.ravel(), yy.ravel()]
len(np.r_[xx.ravel(), yy.ravel()])
len(np.c_[xx.ravel(), yy.ravel()])
get_ipython().magic(u'pinfo2 np.ravel')
xx
len(xx)
len(xx[0])
X
X
df=pd.read_csv('train.csv')
label=df.Class
 def nonzip(self):
     print np.r_(self.x_array, self.y_array, self.cl_array)
     print np.c_(self.x_array, self.y_array, self.cl_array)
Пример #22
0
    def KF_System(self, a, w, X_ins):
        # カルマンフィルタのシステム行列算出
        # Fは,INS誤差のシステム行列

        r_i = np.array([[X_ins[0, 0]], [X_ins[1, 0]], [X_ins[2, 0]]])
        v_i = np.array([[X_ins[3, 0]], [X_ins[4, 0]], [X_ins[5, 0]]])
        qbi = np.array([[X_ins[6, 0]], [X_ins[7, 0]], [X_ins[8, 0]],
                        [X_ins[9, 0]]])
        wBias = np.array([[X_ins[10, 0]], [X_ins[11, 0]], [X_ins[12, 0]]])
        aBias = np.array([[X_ins[13, 0]], [X_ins[14, 0]], [X_ins[15, 0]]])

        w_b = w - wBias
        a_b = a - aBias

        r = np.linalg.norm(r_i)
        x = r_i[0, 0]
        y = r_i[1, 0]
        z = r_i[2, 0]

        q1 = qbi[0, 0]
        q2 = qbi[1, 0]
        q3 = qbi[2, 0]
        q4 = qbi[3, 0]

        wx = w_b[0, 0]
        wy = w_b[1, 0]
        wz = w_b[2, 0]

        ax = a_b[0, 0]
        ay = a_b[1, 0]
        az = a_b[2, 0]

        I_33 = np.eye(3)
        Ze33 = np.zeros(3, 3)
        Ze34 = np.zeros(3, 4)
        Ze43 = np.zeros(4, 3)

        G = 3 * self.Mu / (r**5) * np.array(
            [x * x, x * y, x * z], [x * y, y * y, y * z],
            [x * z, y * z, z * z]) - self.Mu / (r**3) * I_33
        Ome = 0.5 * np.array([[0, wz, -wy, wx], [-wz, 0, wx, wy],
                              [wy, -wx, 0, wz], [-wx, -wy, -wz, 0]])
        Q = 0.5 * np.array([[q4, -q3, q2], [q3, q4, -q1], [-q2, q1, q4],
                            [-q1, -q2, -q3]])
        Rbar = np.c_[2 * Q, -qbi]
        Abar = np.array([[0, -az, ay], [az, 0, -ax], [-ay, ax, 0],
                         [-ax, -ay, -az]])
        Cbi = self.Quate_ToDCM(qbi)

        D = np.dot(2 * Cbi, np.dot(Abar.T, Rbar.T))

        Fbg = -1 / self.Tau_g * I_33
        Fba = -1 / self.Tau_a * I_33

        F = np.r_(np.c_(Ze33, I_33, Ze34, Ze33, Ze33),
                  np.c_(G, Ze33, D, Ze33, Cbi), np.c_(Ze43, Ze43, Ome, Q,
                                                      Ze43),
                  np.c_(Ze33, Ze33, Ze34, Fbg, Ze33),
                  np.c_(Ze33, Ze33, Ze34, Ze33, Fba))

        return F
def linear_val_func(theta, x):
    return np.dot(theta.T, np.c_(np.ones(x.shape[0]), x))
Пример #24
0
 def predict(self, X):
     if self.fit_bias:
         X = np.c_(np.ones(X.shape[0]), X)
     return sigmoid(np.dot(self.W.T, X))