yhat = model3.transform(tmpX[n_train:], tmpY[n_train:]) model3.zscore(yhat, tmpY[n_train:]) print(f"{c*10} End Model3 <-- new, hyperparams {c*10}\n") # ## TODO: classifier/regressor/clusterer/etc Mixin requirements # piper = Pipeline(['model', model2]) # print( piper ) # piper.fit_transform(tmpX, tmpY) print(f"\n{c*10} Starting TrainingManager with Grid Search {c*10}\n") import preprocess, extract from sklearn.preprocessing import StandardScaler, PowerTransformer from sklearn.linear_model import LogisticRegression from sklearn import svm dpipez = [Pipeline([('scaler', StandardScaler()), ]), Pipeline([('power', PowerTransformer()),]) ] mpipez = [ ( Pipeline([ ('flatten', preprocess.Flattenor()), ('svm', svm.SVC() ) ]), {'kernel':('linear', 'rbf'), 'C':[1, 10]}) , ## ( Pipeline([ ('flatten', preprocess.Flattenor()),('logit', LogisticRegression() ) ]), {'C':[1,10]} ), ## (Pipeline([('reshaper', preprocess.Reshapeor( (1, -1)) ), ('tensorfy', preprocess.ToTensor() ),('zmodel', model2)]), {}) ] #*tmpX[0].shape print( mpipez) mgr = ZTrainingManager() mgr.build_permutationz(data_pipez=dpipez, model_pipez=mpipez) mgr.run( [x.cpu().numpy().ravel() for x in tmpX], [y.cpu().numpy().ravel() for y in tmpY] , train_test_split=1.) print(f"{c*10} End ZTrainingManager {c*10}\n")
print(f"\n{c*10} Starting TrainingManager with Grid Search {c*10}\n") import preprocess, extract from sklearn.preprocessing import StandardScaler, PowerTransformer from sklearn.linear_model import LogisticRegression from sklearn import svm dpipez = [ Pipeline([ ('scaler', StandardScaler()), ]), Pipeline([ ('power', PowerTransformer()), ]) ] mpipez = [ (Pipeline([('flatten', preprocess.Flattenor()), ('svm', svm.SVC())]), { 'kernel': ('linear', 'rbf'), 'C': [1, 10] }), ## (Pipeline([('flatten', preprocess.Flattenor()), ('logit', LogisticRegression())]), { 'C': [1, 10] }), ## (Pipeline([('reshaper', preprocess.Reshapeor((1, -1))), ('tensorfy', preprocess.ToTensor()), ('zmodel', model2)]), {}) ] #*tmpX[0].shape print(mpipez) mgr = ZTrainingManager()
ftype=zdata.PdDataStats.TYPE_TXT_LINES_FILE) dframe = pdstats.dframe.sample(n=130) X_data = dframe y_data = dframe['Normal'].values.astype(np.float32) ##TODO: 'dcodez_short' print("Loaded into PdFrame data of size: ", len(dframe), " and into y_data of size ", len(y_data)) print(dframe.columns) ### Setup y_label : n-ary classification ## 2. PIPELINEZ loader_p = [ ('fetch_img', preprocess.LoadImageFileTransform('fpath', crop_ratio=0.75)), ] reshapeor_1 = [ ('flatten', preprocess.Flattenor()), ] funduzor_1 = [ ('funduzor', extract.FundusColorChannelz()), ] scaler_p = [ ('scaler', StandardScaler()), ] tmpz = Pipeline(loader_p + funduzor_1).transform(X_data) print(len(tmpz), tmpz[0].shape) # _ = [print(f"{t.shape}") for t in tmpz] utilz.Image.plot_images_list([t[:, :, 1:] for t in tmpz], nc=5, cmap=None)