Exemple #1
0
def sdae_syn(X_s, P, h_layer, activations, noise, epoch, loss, batch_size):
    """Generate synthetic samples using stacked De-noising Encoders
	Parameters
	----------
	X_s: positive class sample (Numpy Array) (Input Must be in within range of 0 to 1)
	P: Over Sampling Percentage
	h_layer: hidden layer (list)
	activation: activation functions list (same length as hidden layer)
	noise : [None,Gaussian,mask]
	epoch: epoch for each layer (list with same size as hidden layer)
	loss: 'rmse' or 'cross-entropy'
	batch_size = mini_batch size

	For more detaisl on input parameters https://github.com/rajarsheem/libsdae 
	"""
    n_samples = int(X_s.shape[0] * P / 100)
    print "generating %d samples" % (n_samples)
    X_init = np.random.standard_normal(size=(n_samples, X_s.shape[1]))
    scaler = MinMaxScaler()
    X_init = scaler.fit_transform(X_init)
    model = StackedAutoEncoder(dims=h_layer,
                               activations=activations,
                               noise=noise,
                               epoch=epoch,
                               loss=loss,
                               batch_size=batch_size,
                               lr=0.007,
                               print_step=2000)
    model.fit(X_s)
    syn_Z = model.transform(X_init)
    return syn_Z
scaler=scaler()
trX=scaler.fit_transform(trX)
teX=scaler.fit_transform(teX)
from mlxtend.tf_classifier import TfSoftmaxRegression
trY=trY.astype(int)

print trX.shape[1],"Input Feature Space"

print "Enter Layers"
layer=input()
print "Enter the leyer no after smote to be performed"
l_s=int(input())
l_encoder=layer[:l_s]

model_bs = StackedAutoEncoder(dims=l_encoder, activations=['tanh' for i in range(len(l_encoder))], noise='gaussian', 
	epoch=[10000 for i in range(len(l_encoder))],loss='rmse', 
	lr=0.007, batch_size=20, print_step=2000)

S1=model_bs.fit_transform(trX)

X0,X1=_class_split(S1,trY)
print X0.shape[1],"Feature Space Before smote"
warnings.filterwarnings("ignore", category=DeprecationWarning)
print "Enter oversampling percent"
P=int(input())
syn_X=SMOTE(X1, P, 5)
X1=np.vstack((X1,syn_X))
X1=np.column_stack((X1,np.ones(X1.shape[0])))
X0=np.column_stack((X0,np.zeros(X0.shape[0])))
Xy=np.vstack((X0,X1))
np.random.shuffle(Xy)
Exemple #3
0
from mlxtend.tf_classifier import TfSoftmaxRegression
trY = trY.astype(int)

print trX.shape[1], "Input Feature Space"

print "Enter Layers"
layer = input()
print "Enter the leyer no after smote to be performed"
l_s = int(input())
l_encoder = layer[:l_s]

model_bs = StackedAutoEncoder(
    dims=l_encoder,
    activations=['tanh' for i in range(len(l_encoder))],
    noise='gaussian',
    epoch=[10000 for i in range(len(l_encoder))],
    loss='rmse',
    lr=0.007,
    batch_size=20,
    print_step=2000)

S1 = model_bs.fit_transform(trX)

X0, X1 = _class_split(S1, trY)
print X0.shape[1], "Feature Space Before smote"
warnings.filterwarnings("ignore", category=DeprecationWarning)
print "Enter oversampling percent"
P = int(input())
syn_X = SMOTE(X1, P, 5)
X1 = np.vstack((X1, syn_X))
X1 = np.column_stack((X1, np.ones(X1.shape[0])))
Exemple #4
0
val = dataset.values

# verify if all data is float
val = val.astype('float32')

# normalize only the input features
scaler = MinMaxScaler(feature_range=(0, 1))
val[:, :-1] = scaler.fit_transform(val[:, :-1])

# split into train and test data
train_to_test_ratio = 0.7
spl = np.random.rand(val.shape[0]) < train_to_test_ratio
train_X, train_y = val[spl, :-1], val[spl, -1]
test_X, test_y = val[~spl, :-1], val[~spl, -1]

model = StackedAutoEncoder(dims=sda_dims,
                           activations=acti_func_arr,
                           epoch=epochs,
                           loss='rmse',
                           lr=0.007,
                           batch_size=100,
                           print_step=print_step,
                           noise='gaussian')
model.fit(train_X, test_X)

test_X_latent = model.transform(test_X)

joblib.dump(model, 'SDAmodel.sav')

print('End')
Exemple #5
0
from deepautoencoder import StackedAutoEncoder
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np

mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
data, target = mnist.train.images, mnist.train.labels

# train / test  split
idx = np.random.rand(data.shape[0]) < 0.8
train_X, train_Y = data[idx], target[idx]
test_X, test_Y = data[~idx], target[~idx]

model = StackedAutoEncoder(dims=[200, 200],
                           activations=['linear', 'linear'],
                           epoch=[500, 500],
                           loss='rmse',
                           lr=0.007,
                           batch_size=100,
                           print_step=100)
model.fit(train_X, finetune=True)
test_X_ = model.transform(test_X)
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
data, target = mnist.train.images, mnist.train.labels
test_x, test_s = mnist.test.images, mnist.test.labels
# train / test  split
#idx = np.random.rand(data.shape[0]) < 0.8
train_X, train_Y = data, target
#test_X, test_Y = data[~idx], target[~idx]

model = StackedAutoEncoder(dims=[784, 784, 784],
                           activations=['relu', 'linear', 'softmax'],
                           epoch=[3000, 3000, 3000],
                           loss='rmse',
                           lr=0.007,
                           batch_size=100,
                           print_step=200)
model.fit(train_X)
test_X_trans = model.transform(test_x)
residual_error = abs(test_X_trans - test_x)

#PCA 5 components
pca = PCA(n_components=5)
pca_error = pca.fit_transform(residual_error.T)

#mean error
mean_error = np.mean(residual_error, axis=0)

Exemple #7
0
from sklearn import datasets

from deepautoencoder import StackedAutoEncoder

iris = datasets.load_iris().data

model2 = StackedAutoEncoder(dims=[5, 4],
                            activations=['relu', 'relu'],
                            epoch=[1000, 500],
                            loss='rmse',
                            lr=0.007,
                            batch_size=50,
                            print_step=200)

model2.fit(iris)
Exemple #8
0
from sklearn import datasets

from deepautoencoder import StackedAutoEncoder

iris = datasets.load_iris().data

model2 = StackedAutoEncoder(dims=[5, 4], activations=['relu', 'relu'], epoch=[1000, 500],
                            loss='rmse', lr=0.007, batch_size=50, print_step=200)

model2.fit(iris)
from deepautoencoder import StackedAutoEncoder
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np

mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
data, target = mnist.train.images, mnist.train.labels

# train / test  split
idx = np.random.rand(data.shape[0]) < 0.8
train_X, train_Y = data[idx], target[idx]
val_X, val_Y = data[~idx], target[~idx]

model = StackedAutoEncoder(dims=[200, 200],
                           activations=['linear', 'linear'],
                           epoch=[3000, 3000],
                           loss='rmse',
                           lr=0.007,
                           batch_size=100,
                           print_step=200)
model.fit(train_X, val_X)

# Alternative: without global validation loss during training
#model.fit(train_X)

val_X_latent = model.transform(val_X)

# Fit and train model with one method
#train_X_latent = model.fit_transform(train_X)
from deepautoencoder import StackedAutoEncoder
import numpy as np

x = np.random.rand(100, 5)
x2 = np.random.rand(100, 5)

model = StackedAutoEncoder(dims=[5, 6],
                           activations=['relu', 'relu'],
                           noise='gaussian',
                           epoch=[10000, 500],
                           loss='rmse',
                           lr=0.007,
                           batch_size=50,
                           print_step=200)

# usage 1: encoding same data
result = model.fit_transform(x)
print(result.shape)

# usage 2
model.fit(x)
result2 = model.transform(x2)
print(result2)
Exemple #11
0
from deepautoencoder import StackedAutoEncoder
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np

mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
data, target = mnist.train.images, mnist.train.labels

# train / test  split
idx = np.random.rand(data.shape[0]) < 0.8
train_X, train_Y = data[idx], target[idx]
test_X, test_Y = data[~idx], target[~idx]

model = StackedAutoEncoder(dims=[200, 200], activations=['linear', 'linear'], epoch=[
                           3000, 3000], loss='rmse', lr=0.007, batch_size=100, print_step=200)
model.fit(train_X)
test_X_ = model.transform(test_X)
Exemple #12
-1
def sdae_syn(X_s,P,h_layer,activations,noise,epoch,loss,batch_size):
	"""Generate synthetic samples using stacked De-noising Encoders
	Parameters
	----------
	X_s: positive class sample (Numpy Array) (Input Must be in within range of 0 to 1)
	P: Over Sampling Percentage
	h_layer: hidden layer (list)
	activation: activation functions list (same length as hidden layer)
	noise : [None,Gaussian,mask]
	epoch: epoch for each layer (list with same size as hidden layer)
	loss: 'rmse' or 'cross-entropy'
	batch_size = mini_batch size

	For more detaisl on input parameters https://github.com/rajarsheem/libsdae 
	"""
	n_samples=int(X_s.shape[0]*P/100)
	print "generating %d samples" %(n_samples)
	X_init=np.random.standard_normal(size=(n_samples,X_s.shape[1]))
	scaler=MinMaxScaler()
	X_init=scaler.fit_transform(X_init)
	model = StackedAutoEncoder(dims=h_layer, activations=activations, noise=noise, 
		epoch=epoch,loss=loss, 
		batch_size=batch_size, lr=0.007, print_step=2000)
	model.fit(X_s)
	syn_Z=model.transform(X_init)
	return syn_Z