Esempio n. 1
0
File: RCAE.py Progetto: kiminh/AMAD
	def __init__(self,instance_dim,hidden_dim):
		
		self.instance_dim = instance_dim
		self.hidden_dim = hidden_dim 
		
		hidden_layer = None
		decode_layer = None
		# Building the autoencoder model
		net = tflearn.input_data(shape=[None,self.instance_dim], name="data")
		net = tflearn.reshape(net,[-1,1,1,net.shape[1]])#turn to 4 D
		[net,hidden_layer] = self.encoder(net,hidden_layer)
		[net,decode_layer] = self.decoder(net,decode_layer)
		mue = 0.1
		net = tflearn.regression_RobustAutoencoder(net,mue,hidden_layer,decode_layer, optimizer='adam', learning_rate=0.001,
						loss='rPCA_autoencoderLoss', metric=None,name="vanilla_autoencoder")
		#rPCA_autoencoderLoss_FobsquareLoss
		#rPCA_autoencoderLoss
		#net = tflearn.regression(net, optimizer='adam', loss='mean_square', metric=None)
		model = tflearn.DNN(net, tensorboard_verbose=0, tensorboard_dir='tensorboard/')
                                        strides=2, padding='same', activation='sigmoid',
                                        name='DecConvT2')
    decode_layer = net
    print "========================"
    print "output layer",net.get_shape()
    print "========================"
    return [net,decode_layer]

hidden_layer = None
decode_layer = None
# Building the autoencoder model
net = tflearn.input_data(shape=[None, side, side, channel])
[net,hidden_layer] = encoder(net,hidden_layer)
[net,decode_layer] = decoder(net,decode_layer)
mue = 0.1
net = tflearn.regression_RobustAutoencoder(net,mue,hidden_layer,decode_layer, optimizer='adam', learning_rate=0.001,
                         loss='rPCA_autoencoderLoss', metric=None,name="vanilla_autoencoder")
#rPCA_autoencoderLoss_FobsquareLoss
#rPCA_autoencoderLoss
#net = tflearn.regression(net, optimizer='adam', loss='mean_square', metric=None)
model = tflearn.DNN(net, tensorboard_verbose=0, tensorboard_dir='tensorboard/')



def addNoise(original, noise_factor):
    noisy = original + np.random.normal(loc=0.0, scale=noise_factor, size=original.shape)
    return np.clip(noisy, 0., 1.)
def add_Salt_Pepper_Noise(original, noise_factor):
    #noisy = original + np.random.normal(loc=0.0, scale=noise_factor, size=original.shape)
    noisy = skimage.util.random_noise(original, mode='s&p',clip=False,amount=0.1)
    return np.clip(noisy, 0., 1.)
def prepare_cifar_data_with_anamolies(original,original_labels,image_and_anamolies):
d = 3072
lamda_in_cost = 0.01
N_to_costfunc = np.zeros((200,d ))
# Define the convoluted ae architecture
net = tflearn.input_data(shape=[None, 32, 32, 3])
net = tflearn.fully_connected(net, 256)
hidden_layer = tflearn.fully_connected(net, nb_feature)
net = tflearn.fully_connected(hidden_layer, 256)
decoder = tflearn.fully_connected(net, 32*32*3,activation='sigmoid')
net = tflearn.reshape(decoder, (-1, 32, 32, 3))

# net = tflearn.regression(net, optimizer='adam', learning_rate=0.001,
#                          loss='mean_square', metric=None)

mue = 0.1
net = tflearn.regression_RobustAutoencoder(net,mue,hidden_layer,decoder, optimizer='adam', learning_rate=0.001,
                         loss='rPCA_autoencoderLoss_FobsquareLoss', metric=None,name="vanilla_autoencoder")

model = tflearn.DNN(net, tensorboard_verbose=0)



def addNoise(original, noise_factor):
    noisy = original + np.random.normal(loc=0.0, scale=noise_factor, size=original.shape)
    return np.clip(noisy, 0., 1.)
def add_Salt_Pepper_Noise(original, noise_factor):
    #noisy = original + np.random.normal(loc=0.0, scale=noise_factor, size=original.shape)
    noisy = skimage.util.random_noise(original, mode='s&p',clip=False,amount=0.1)
    return np.clip(noisy, 0., 1.)
def prepare_cifar_data_with_anamolies(original,original_labels,image_and_anamolies):

    imagelabel = image_and_anamolies['image']