def my_supervised_mnist(X, y=None, l1=.1, **kwargs): if (X.shape[1] != 28 * 28): print("Error Dataset") return range(len(X.shape[1])) rrfs = RRFS(28 * 28, hidden=2) codes = getCodes(X.reshape(X.shape[0], 28, 28, 1)) score = rrfs.train_fs_network(X, rep=codes, l1=l1, epochs=300, loss='mse') idx = np.argsort(score)[::-1] return idx
def my_se(X, y=None, l1=.1, n_components=2, **kwargs): rrfs = RRFS(X.shape[1], hidden=n_components) model = SpectralEmbedding(n_components=n_components) codes = model.fit_transform(X) codes = (codes - np.min(codes)) / (np.max(codes) - np.min(codes)) #rrfs.train_representation_network(x_train, name=dataset+'_rep.hd5', epochs=1000) score = rrfs.train_fs_network(X, rep=codes, l1=l1, epochs=300, loss='mse') # sort the feature scores in an ascending order according to the feature scores idx = np.argsort(score)[::-1] return idx
def my_autoencoder(X, y=None, l1=.1, n_components=2, **kwargs): rrfs = RRFS(X.shape[1], hidden=n_components) input_tensor = Input(shape=(X.shape[1], )) l1 = Dense(10, activation='relu')(input_tensor) l2 = Dense(n_components, activation='relu')(l1) l3 = Dense(10, activation='relu')(l2) output_tensor = Dense(X.shape[1])(l3) model = Model(input_tensor, output_tensor) encoder = Model(input_tensor, l2) model.compile(optimizer='adam', loss='mse') model.fit(X, X, epochs=300) codes = encoder.predict(X) codes = (codes - np.min(codes)) / (np.max(codes) - np.min(codes)) #rrfs.train_representation_network(x_train, name=dataset+'_rep.hd5', epochs=1000) score = rrfs.train_fs_network(X, rep=codes, l1=l1, epochs=300, loss='mse') # sort the feature scores in an ascending order according to the feature scores idx = np.argsort(score)[::-1] return idx
test_indexes += list(c_test_indexes) x_test = X[test_indexes,:] y_test = Y[test_indexes] x_train = X[train_indexes,:] y_train = Y[train_indexes] t_max = np.max(x_train) t_min = np.min(x_train) #scaler = preprocessing.StandardScaler() x_train = (x_train.astype(float)-t_min)/(t_max-t_min) x_test = (x_test.astype(float)-t_min)/(t_max-t_min) #x_test = scaler.transform(x_test) rrfs = RRFS(dim, loss='mse') rrfs.train_representation_network(x_train, name=dataset+'_rep.hd5', epochs=1000) ps = [2, 4, 6, 8, 10, 20 , 30, 40 , 50, 60, 70, 80, 100] l1s = [0.0001,.001,.005,.01,.1,1] accs_ps = np.zeros(len(ps)) max_features = [] all_features ={} for i, p in enumerate(ps): num_features = int(p*dim/100) print('NUM === %d'%num_features) accs_l1 = np.zeros(len(l1s)) acc_max = 0 for index,l1 in enumerate(l1s):
x_test = X[test_indexes, :] y_test = Y[test_indexes] x_train = X[train_indexes, :] y_train = Y[train_indexes] #scaler = preprocessing.MinMaxScaler() #x_train = scaler.fit_transform(x_train) #x_test = scaler.transform(x_test) mmax = np.max(x_train) mmin = np.min(x_train) x_train = (x_train - mmin) / (mmax - mmin) x_test = (x_test - mmin) / (mmax - mmin) rrfs = RRFS(dim, hidden=2) tsne = TSNE() tsne_codes = tsne.fit_transform(x_train) tsne_codes = (tsne_codes - np.min(tsne_codes)) / (np.max(tsne_codes) - np.min(tsne_codes)) #rrfs.train_representation_network(x_train, name=dataset+'_rep.hd5', epochs=1000) ps = [2, 4, 6, 8, 10, 20, 30, 40, 50, 60, 70, 80, 100] l1s = [1e-5, 0.0001, .001, .005, .01, .05, .1] accs_ps = np.zeros(len(ps)) fatures_ps_l1s = {} accs_ps_l1s = {} for i, p in enumerate(ps): num_features = int(p * dim / 100) accs_l1 = np.zeros(len(l1s))