示例#1
0
def exampleB2_5(G=40, T= 1.174, gamma = 42.58, alpha = np.arange(180,60,-1)):
    #% Effect of Crushers
    
    #G = 40;		% mT/m;
    #T = 1.174;	% ms.
    #gamma = 42.58	% kHz/mT
    #alpha = [180:-1:60];
    
    #% 2 cycles/mm.
    
    
    x = np.arange(0,1,0.01)/1000;	#% m.
    
    Grot = 360*x*G*T*gamma; 	#% Rotation due to gradient at each voxel.
    
    Ms=[[1],[0],[0]];		#% Mstart
    
    Mend = []
    for alpha_i in alpha:
     for Grot_i in Grot:
      M = Ms;
      M = zrot(Grot_i)*M;		#% Crusher 1
      M = xrot(alpha_i)*M;		#% Refocusing pulse.
      M = zrot(Grot_i)*M;		#% Crusher 2
      Mend.append(M) #  Mend(:,k)=M;  I think this is what you want
     #end;
    
     #%figure(3);
     #%plot(abs(Mend(1,:)+i*Mend(2,:)));
     figure(1);
     Mxy = Mend[0]+1j*Mend[1];
     plot(x*1000,real(Mxy),'k--'); hold on;
     axis([np.min(x)*1000, np.max(x)*1000, -1.2, 1.2]);
     plot([np.min(x), np.max(x)]*1000,[1 1]*np.abs(mean(Mxy)),'b-'); 
     #hold off;
     #grid on;
     xlabel('Position (mm)'); ylabel('Signal');
     legend('M_{xy}','Avg M_{xy}');
     tt = sprintf('%d Degree Refocusing Angle',alpha(n)); title(tt);
     setprops();
     drawnow;
     #%fig2tiff('crush',n);
     print(n)
     Mse(n) = np.abs(np.mean(Mxy));
    
     
    
    figure(2);
    plot(alpha,Mse); #grid on; 
    xlabel('Refocusing Angle (deg)');
    ylabel('Spin Echo Signal'); 
    title('Spin Echo vs Refoc. Angle');
    a = plt.gca(); 
    #axis([a(1:2) 0 1]);
    mrs.setprops();
示例#2
0
def plot_perm_sample(data1, data2, n=50):
    perm_sample_1, perm_sample_2 = permutation_sample(data1, data2)

    def ecdf(data):
        """compute ECDF for a one-dimensional array of measurements"""
        n = len(data)
        x = np.sort(data)
        y = np.arange(1, n + 1) / n
        return x, y

    x_1, y_1 = ecdf(perm_sample_1)
    x_2, y_2 = ecdf(perm_sample_2)
    _ = plt.plot(x_1,
                 y_1,
                 marker='.',
                 linestyle='none',
                 color='red',
                 alpha=0.2)
    _ = plt.plot(x_2,
                 y_2,
                 marker='.',
                 linestyle='none',
                 color='blue',
                 alpha=0.2)

    x_1, y_1 = ecdf(data1)
    x_2, y_2 = ecdf(data2)
    _ = plt.plot(x_1, y_1, marker='.', linestyle='none', color='red')
    _ = plt.plot(x_2, y_2, marker='.', linestyle='none', color='blue')

    plt.margins(0.02)
    _ = plt.xlabel('x')
    _ = plt.ylabel('ECDF')
    plt.show()
# load the dataset and view top five records
dataset = pd.read_csv('Mall_Customer.csv')
x = dataset.iloc[:, [3, 4]].values
dataset.head()

# using the elbow method to find the optimal number of clusters
from sklearn.cluster import KMeans
wcss = []
for i in range(1, 11):
    kmeans = KMeans(n_clusters=i, init='k-means++', random_state=42)
    kmeans.fit(x)
    wcss.append(kmeans.inertia_)
plt.plot(range(1, 11), wcss)
plt.title('The Elbow Method')
plt.xlabel('Number of clusters')
plt.ylabel('WCSS')
plt.show()

# train the K-Mean model using dataset
kmeans = KMeans(n_clusters=5, init='k-means++', random_state=42)
y_kmeans = kmeans.fit_predict(x)

# visualising the clusters
plt.scatter(x[y_hc == 0, 0],
            x[y_hc == 0, 1],
            s=100,
            c='red',
            label='Cluster 1')
plt.scatter(x[y_hc == 1, 0],
            x[y_hc == 1, 1],
示例#4
0
#デフォルトでL2正則化alpha = 1

import mglearn
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import load_breast_cancer

import maplotlib.pyplot as plt

cancer = load_breast_cancer()

x_tr, x_te, y_tr, y_te = train_test_split(cancer.data,
                                          cancer.target,
                                          random_state=42)

logreg = LogisticRegression().fit(x_tr, y_tr)

print("score {}".format(logreg.score(x_te, y_te)))

#plt で グラフの表示、消すのを一つのコマンドラインで スレッドでも使うのかなあ?

plt.plot(logreg.coef_.T, 'o', label="c = 1")
plt.xticks(range(cancer.data.shape[1]), cancer.feature_names, rotation=90)
plt.hlines(0, 0, cancer.data.shape[1])
plt.ylim(-5, 5)
plt.xlabel("features")
plt.ylabel("coef magnitude")
plt.legend()
plt.show()
dataset = pd.read_csv('Mall_Customer.csv')
x = dataset.iloc[:, [3, 4]].values

# train the Hierarchical Clustering model using dataset
from sklearn.cluster import AgglomerativeClustering
hc = AgglomerativeClustering(n_clusters = 5, affinity = 'euclidian', linkage = 'ward')
y_hc = hc.fit_predict(x)

# visualising the clusters
plt.scatter(x[y_hc == 0, 0], x[y_hc == 0, 1], s = 100, c = 'red', label = 'Cluster 1')
plt.scatter(x[y_hc == 1, 0], x[y_hc == 1, 1], s = 100, c = 'blue', label = 'Cluster 2')
plt.scatter(x[y_hc == 2, 0], x[y_hc == 2, 1], s = 100, c = 'green', label = 'Cluster 3')
plt.scatter(x[y_hc == 3, 0], x[y_hc == 3, 1], s = 100, c = 'cyan', label = 'Cluster 4')
plt.scatter(x[y_hc == 4, 0], x[y_hc == 4, 1], s = 100, c = 'magenta', label = 'Cluster 5')
plt.title('Clusters of customers')
plt.xlabel('Annual Income (k$)')
plt.ylabel('Spending Score (1-100)')
plt.legend()
plt.show()

# https://www.kaggle.com/shwetabh123/mall-customers



# 10 Models for Clustering
    # K-Means
    # Affinity Propagation
    # BIRCH
    # DBSCAN
    # Mini Batch K-Means
    # Mean Shift
k_means = KMeans(init = "k-means++", n_clusters = clusterNum, n_init = 12)
k_means.fit(X)
labels = k_means.labels_
print(labels)

#Insights
df["Clus_km"] = labels
df.head(5)

#Centroid values are checked by averaging the features in each cluster
df.groupby('Clus_km').mean()

#Looking at the distribution of customers based on their age and income
area = np.pi * (X[:,1])**2
plt.scatter(X[:,0], X[:,3], s=area, c=labels.astype(np.float), alpha=0.5)
plt.xlabel('Age', fontsize = 18)
plt.ylabel('Income', fontsize = 16)

plt.show()

from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure(1, figsize = (8,6))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, 0.95, 1], elev=48, azim=134)

plt.cla()
# plt.ylabel('Age', fontsize=18)
# plt.xlabel('Income', fontsize=16)
# plt.zlabel('Education', fontsize=16)
ax.set_xlabel('Education')
ax.set_ylabel('Age')