def plot_component_variance(x, y): prim_basis = PrimitiveBasis(n_states=3, domain=[0, 2]) model = MKSHomogenizationModel(basis=prim_basis) model.n_components = 20 model.fit(x, y, periodic_axes=[0, 1]) # Draw the plot containing the PCA variance accumulation draw_component_variance(model.dimension_reducer.explained_variance_ratio_)
def test_stress(): from pymks.datasets import make_elastic_stress_random from pymks import MKSHomogenizationModel, DiscreteIndicatorBasis sample_size = 200 grain_size = [(5, 5), (6, 4), (4, 6), (2, 2)] n_samples = [sample_size] * len(grain_size) elastic_modulus = (410, 200) poissons_ratio = (0.28, 0.3) macro_strain = 0.001 size = (21, 21) X, y = make_elastic_stress_random(n_samples=n_samples, size=size, grain_size=grain_size, elastic_modulus=elastic_modulus, poissons_ratio=poissons_ratio, macro_strain=macro_strain, seed=0) dbasis = DiscreteIndicatorBasis(n_states=2, domain=[0, 1]) model = MKSHomogenizationModel(basis=dbasis, n_components=3, degree=3) model.fit(X, y) test_sample_size = 1 n_samples = [test_sample_size] * len(grain_size) X_new, y_new = make_elastic_stress_random(n_samples=n_samples, size=size, grain_size=grain_size, elastic_modulus=elastic_modulus, poissons_ratio=poissons_ratio, macro_strain=macro_strain, seed=3) y_result = model.predict(X_new) assert np.allclose(np.round(y_new, decimals=2), np.round(y_result, decimals=2))
def test_stress(): from pymks.datasets import make_elastic_stress_random from pymks import MKSHomogenizationModel, DiscreteIndicatorBasis sample_size = 200 grain_size = [(5, 5), (6, 4), (4, 6), (2, 2)] n_samples = [sample_size] * len(grain_size) elastic_modulus = (410, 200) poissons_ratio = (0.28, 0.3) macro_strain = 0.001 size = (21, 21) X, y = make_elastic_stress_random(n_samples=n_samples, size=size, grain_size=grain_size, elastic_modulus=elastic_modulus, poissons_ratio=poissons_ratio, macro_strain=macro_strain, seed=0) dbasis = DiscreteIndicatorBasis(n_states=2, domain=[0, 1]) model = MKSHomogenizationModel(basis=dbasis, n_components=3, degree=3) model.fit(X, y) test_sample_size = 1 n_samples = [test_sample_size] * len(grain_size) X_new, y_new = make_elastic_stress_random( n_samples=n_samples, size=size, grain_size=grain_size, elastic_modulus=elastic_modulus, poissons_ratio=poissons_ratio, macro_strain=macro_strain, seed=3) y_result = model.predict(X_new) assert np.allclose(np.round(y_new, decimals=2), np.round(y_result, decimals=2))
def test_intercept_setter(): from pymks import MKSHomogenizationModel from pymks import PrimitiveBasis p_basis = PrimitiveBasis(2) model = MKSHomogenizationModel(basis=p_basis) X = np.random.randint(2, size=(50, 10, 10)) y = np.random.randint(2, size=(50,)) model.fit(X, y) intercept = model.intercept_ model.intercept_ = intercept * 2 assert np.allclose(model.intercept_, intercept * 2)
def test_coef_setter(): from pymks import MKSHomogenizationModel from pymks import PrimitiveBasis p_basis = PrimitiveBasis(2) model = MKSHomogenizationModel(basis=p_basis) X = np.random.randint(2, size=(50, 10, 10)) y = np.random.randint(2, size=(50,)) model.fit(X, y) coefs = model.coef_ model.coef_ = coefs * 2 assert np.allclose(model.coef_, coefs * 2)
def test_coef_setter(): from pymks import MKSHomogenizationModel from pymks import PrimitiveBasis p_basis = PrimitiveBasis(2) model = MKSHomogenizationModel(basis=p_basis) X = np.random.randint(2, size=(50, 10, 10)) y = np.random.randint(2, size=(50, )) model.fit(X, y) coefs = model.coef_ model.coef_ = coefs * 2 assert np.allclose(model.coef_, coefs * 2)
def test_intercept_setter(): from pymks import MKSHomogenizationModel from pymks import PrimitiveBasis p_basis = PrimitiveBasis(2) model = MKSHomogenizationModel(basis=p_basis) X = np.random.randint(2, size=(50, 10, 10)) y = np.random.randint(2, size=(50, )) model.fit(X, y) intercept = model.intercept_ model.intercept_ = intercept * 2 assert np.allclose(model.intercept_, intercept * 2)
def test_n_componets_from_reducer(): from pymks import MKSHomogenizationModel, DiscreteIndicatorBasis from sklearn.manifold import LocallyLinearEmbedding reducer = LocallyLinearEmbedding(n_components=7) dbasis = DiscreteIndicatorBasis(n_states=3, domain=[0, 2]) model = MKSHomogenizationModel(dimension_reducer=reducer, basis=dbasis) assert model.n_components == 7
def test_default_correlations(): from pymks import PrimitiveBasis from pymks import MKSHomogenizationModel prim_basis = PrimitiveBasis(6) model_prim = MKSHomogenizationModel(basis=prim_basis) assert model_prim.correlations == [(0, 0), (0, 1), (0, 2), (0, 3), (0, 4), (0, 5)]
def test_set_correlations(): from pymks import PrimitiveBasis from pymks import MKSHomogenizationModel test_correlations = [(0, 0), (0, 2), (0, 4)] prim_basis = PrimitiveBasis(6) model_prim = MKSHomogenizationModel(basis=prim_basis, correlations=test_correlations) assert model_prim.correlations == test_correlations
def test_n_components_with_reducer(): from pymks import MKSHomogenizationModel, DiscreteIndicatorBasis from sklearn.manifold import Isomap reducer = Isomap(n_components=7) dbasis = DiscreteIndicatorBasis(n_states=3, domain=[0, 2]) model = MKSHomogenizationModel(dimension_reducer=reducer, basis=dbasis, n_components=9) assert model.n_components == 9
def plot_components(x, y, n_comps, linker_model, verbose=2): prim_basis = PrimitiveBasis(n_states=3, domain=[0, 2]) model = MKSHomogenizationModel(basis=prim_basis, property_linker=linker_model) model.n_components = 5 model.fit(x,y,periodic_axes=[0,1]) print model.property_linker.coef_ draw_components([model.reduced_fit_data[0:3, :2], model.reduced_fit_data[3:6, :2], model.reduced_fit_data[6:9, :2], model.reduced_fit_data[9:11, :2], model.reduced_fit_data[11:14, :2], model.reduced_fit_data[14:16, :2], model.reduced_fit_data[16:17, :2], model.reduced_fit_data[17:18, :2]], ['Ag:0.237 Cu:0.141 v:0.0525', 'Ag:0.237 Cu:0.141 v:0.0593', 'Ag:0.237 Cu:0.141 v:0.0773', 'Ag:0.237 Cu:0.141 v:0.0844', 'Ag:0.239 Cu:0.138 v:0.0791', 'Ag:0.239 Cu:0.138 v:0.0525', 'Ag:0.237 Cu:0.141 v:0.0914', 'Ag:0.237 Cu:0.141 v:0.0512'])
def test_n_components_change(): from pymks import MKSHomogenizationModel, DiscreteIndicatorBasis dbasis = DiscreteIndicatorBasis(n_states=2) model = MKSHomogenizationModel(basis=dbasis) model.n_components = 27 assert model.n_components == 27
def test_default_dimension_reducer(): from sklearn.decomposition import RandomizedPCA from pymks import MKSHomogenizationModel model = MKSHomogenizationModel() assert isinstance(model.dimension_reducer, RandomizedPCA)
x_corr_flat[row_ctr] = row.flatten() print x.shape flat_len = (x.shape[0],) + (np.prod(x.shape[1:]),) X_train, X_test, y_train, y_test = train_test_split(x.reshape(flat_len), y, test_size=0.2, random_state=3) print(x_corr.shape) print(X_test.shape) # uncomment to view one containers #draw_correlations(x_corr[0].real) # Reduce all 2-pt Stats via PCA # Try linear reg on inputs and outputs reducer = PCA(n_components=3) linker = LinearRegression() model = MKSHomogenizationModel(basis=prim_basis, compute_correlations=False) #model.fit(x_corr, y, periodic_axes=[0, 1]) # set up parameters to optimize params_to_tune = {'degree': np.arange(1, 4), 'n_components': np.arange(1, 8)} fit_params = {'size':x_corr_flat.shape, 'periodic_axes': [0, 1]} loo_cv = LeaveOneOut(samples) gs = GridSearchCV(model, params_to_tune, cv=loo_cv, n_jobs=6, fit_params=fit_params).fit(x_corr_flat, y) # Manual fit #model.fit(x_corr, y, periodic_axes=[0, 1]) #print model.reduced_fit_data # Draw the plot containing the PCA variance accumulation #draw_component_variance(model.dimension_reducer.explained_variance_ratio_)
# Get a representative slice from the block (or ave or whatever we decide on) best_slice = get_best_slice(metadatum['data']) # Get 2-pt Stats for the best slice print "--->Getting 2pt stats" metadatum['stats'] = get_correlations_for_slice(best_slice) print metadata[0]['stats'].shape # Construct X and Y for PCA and linkage print "-->Creating X and Y" i = 0 for metadatum in metadata: x[i,0:6*metadatum['x']**2] = metadatum['stats'].flatten() prim_basis = PrimitiveBasis(n_states=3, domain=[0,2]) x_ = prim_basis.discretize(metadata[0]['data']) x_corr = correlate(x_) draw_correlations(x_corr.real) quit() # Reduce all 2-pt Stats via PCA # Try linear reg on inputs and outputs reducer = PCA(n_components=3) linker = LinearRegression() model = MKSHomogenizationModel(dimension_reducer=reducer, property_linker=linker, compute_correlations=False) model.n_components = 40 model.fit(metadatum['stats'], y, periodic_axes=[0, 1]) print model.reduced_fit_data
def test_default_property_linker(): from sklearn.linear_model import LinearRegression from pymks import MKSHomogenizationModel, PrimitiveBasis prim_basis = PrimitiveBasis(n_states=2) model = MKSHomogenizationModel(basis=prim_basis) assert isinstance(model.property_linker, LinearRegression)
def test_degree_change(): from pymks import MKSHomogenizationModel, DiscreteIndicatorBasis dbasis = DiscreteIndicatorBasis(n_states=2) model = MKSHomogenizationModel(basis=dbasis) model.degree = 4 assert model.degree == 4
def test_default_n_components(): from pymks import MKSHomogenizationModel, DiscreteIndicatorBasis dbasis = DiscreteIndicatorBasis(n_states=2) model = MKSHomogenizationModel(basis=dbasis) assert model.n_components == 5
def test_default_dimension_reducer(): from sklearn.decomposition import PCA from pymks import MKSHomogenizationModel model = MKSHomogenizationModel(compute_correlations=False) assert isinstance(model.dimension_reducer, PCA)
# generateAbaqusInp('Abaqus File', dataset, # elastic_modulus=(120, 80), # poissions_ratio=(0.3, 0.3)) # dataset = Long_fiber_x #print dataset.shape im = dataset[1, :, :] s = np.sum(im) print s # examples = dataset[::sample_size] # print examples.shape #draw_microstructures((examples)) #Define Model P_basis = PrimitiveBasis(n_states=2, domain=[0, 1]) model = MKSHomogenizationModel(basis=P_basis, correlations=[(0, 0), (1, 1), (0, 1)]) # Draw 2 point statisitics ''' data_ = P_basis.discretize(dataset) data_auto = autocorrelate(data_, periodic_axes=(0, 1)) labs = [('Fiber', 'Fiber'), ('Matrix', 'Matrix')] draw_autocorrelations(data_auto[0], autocorrelations=labs) ''' # Split testing and training segments flat_shape = (dataset.shape[0],) + (np.prod(dataset.shape[1:]),) data_train, data_test, stress_train, stress_test = train_test_split( dataset.reshape(flat_shape), stresses, test_size=0.2, random_state=3) # print data_test.shape