-
Notifications
You must be signed in to change notification settings - Fork 0
/
reg_tSNE.py
251 lines (208 loc) · 8.87 KB
/
reg_tSNE.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
'''
Regularized Parametric t-SNE
'''
from matplotlib import pyplot as plt
import numpy as np
import keras
from tqdm import tqdm
from keras import backend as K
from keras.layers import Input, LeakyReLU
from keras.models import Sequential, load_model, Model
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D, Conv2D
from keras.optimizers import Adam, SGD
from keras.datasets import mnist
from math import ceil
from RBM import RBM
from keras.losses import mse, binary_crossentropy
import keras.losses
from utils import *
from pretrain_autoencoder import Autoencoder
class neuralREG_tSNE:
'''
Class to make a model for parametric t-SNE, RP t-SNE, and regular autoencoders
depending on value of theta
'''
def __init__(self, data_name ='',d_components=2, perplexity=40., epochs=100, lr=0.001, random_state=0, batch_size=100,encoder=None, decoder=None,
model=None, theta=0.99):
self.d_components = d_components
self.perplexity = perplexity
self.data_name = data_name
self.epochs = epochs
self.lr = lr
self.random_state = random_state
np.random.seed(self.random_state)
self.batch_size = batch_size
self.model = model # gonna be the autoencoder
self.encoder = encoder
self.decoder = decoder
self.theta = theta
def build(self, n_input, layer_sizes = np.array([500, 500, 2000]), activations= np.array(['sigmoid','sigmoid','sigmoid'])):
''''
builds the structure for the regularized parametric t-sne network: autoencoder
'''
if self.model is not None:
self.model = None
print('Deleting current model for new model...')
input = Input(shape=(n_input,), name='input')# input layer
# hidden layers for encoder
encoded = Dense(layer_sizes[0], activation=activations[0])(input)
for size, activation in zip(layer_sizes[1:], activations[1:]):
encoded = Dense(size, activation=activation)(encoded)
encoded = Dense(self.d_components,activation='linear', name='encoded')(encoded) # low dimensional representation
decoded = Dense(layer_sizes[-1], activation=activations[-1])(encoded) # start of decoder
for size, activation in zip(np.flip(layer_sizes)[1:], np.flip(activations)[1:]): # hidden layers for decoder
decoded = Dense(size, activation=activation)(decoded)
decoded = Dense(n_input,activation=activations[0], name='decoded')(decoded)
# define autoencoder,encoder and decoder models
autoencoder = Model(input, outputs=[encoded,decoded])
self.encoder = Model(input, encoded)
encoded_input = Input(shape=(layer_sizes[0],))
decoder_layer = autoencoder._layers[-1]
self.decoder = Model(encoded_input, decoder_layer(encoded_input))
self.model = autoencoder
decoder_input = Input(shape=(self.d_components,))
decoded = decoder_input
n_encoder_layers = int(len(self.model._layers) / 2) + 1
for i in range(n_encoder_layers-1):
decoded = self.model._layers[n_encoder_layers+i](decoded)
self.decoder = Model(decoder_input, decoded)
self.set_compiler()
autoencoder.summary()
self.encoder.summary()
self.decoder.summary()
def train(self, X_train, noisy = False):
"""
Train the regularized parametric t-SNE network
"""
print('Start training the neural network...')
X_train = X_train.copy()
y_train = X_train
if noisy:
noise = np.random.normal(0, 0.01 ** 0.5, (X_train.shape))
X_train = X_train + noise
begin = time()
losses = []
n_sample, n_feature = X_train.shape
nBatches = int(n_sample/self.batch_size)
for epoch in range(self.epochs):
new_indices = np.random.permutation(n_sample) # shuffle data for new random batches
X = X_train[new_indices]
Y = y_train[new_indices]
loss = 0
for i in range(nBatches):
batch_y = Y[i*self.batch_size:(i+1)*self.batch_size]
batch = X[i*self.batch_size:(i+1)*self.batch_size]
if self.theta > 0: # runs faster this way
blockPrint()
cond_p, _ = cond_probs(batch.copy(), perplexity=self.perplexity)
P = joint_average_P(cond_p)
enablePrint()
if self.theta == 1: # parametric t-sne
all_losses = self.encoder.train_on_batch(x=batch, y={'encoded': P})
else: # regularized parametric t-sne
all_losses = self.model.train_on_batch(x=batch, y={'encoded': P, 'decoded': batch_y})
else: # autoencoder with mse loss
all_losses = self.model.train_on_batch(x=batch, y={'decoded': batch_y})
loss += np.array(all_losses)
losses.append(loss/nBatches)
#losses[epoch] = loss/nBatches
print('Epoch: %.d elapsed time: %.2f losses: %s ' % (epoch + 1,time() - begin, losses[epoch]))
return losses
def predict(self, X):
"""
Makes encoded prediction for a given data set X nxD with the autoencoder
"""
if self.model == None:
print("Train the model first!")
return
Y = self.model.predict(X)
return Y[0]
def predict_encoder(self, X):
"""
Makes encoded prediction for a given data set X nxD with the autoencoder
"""
if self.encoder == None:
print("Load the encoder first!")
return
Y = self.model.predict(X)
return Y[0]
def predict_decoder(self, X):
'''
Predicts reconstructed output from input data (NOT PROJECTIONS)
'''
if self.encoder == None:
print("Train the decoder first!")
return
Y = self.model.predict(X)
return Y[1]
# loading functions:
def load_model(self, file_path):
'''
load the finetuned autoencoder network
'''
# setting up the autoencoder
self.model = load_model(file_path, custom_objects={'kl_loss': self.kl_loss, 'mse_loss': self.mse_loss})
self.set_compiler()
# acces the encoder and decoder of the autoencoder:
n_encoder_layers = int(len(self.model._layers)/2) + 1
self.encoder = Model(self.model.input, self.model.layers[n_encoder_layers-1].output)
self.encoder.compile(loss={'encoded': self.kl_loss}, optimizer=Adam(self.lr))
decoder_input = Input(shape=(self.d_components,))
decoded = decoder_input
for i in range(n_encoder_layers-1):
decoded = self.model._layers[n_encoder_layers+i](decoded)
self.decoder = Model(decoder_input, decoded)
def load_RBM(self, file_path, layer_sizes):
'''
load the autoencoder via the RBMs
'''
RBM = Autoencoder(layer_sizes)
RBM = RBM.pretrained_from_file(file_path)
self.model, self.encoder, self.decoder = RBM.unroll()
self.set_compiler()
self.encoder.compile(loss={'encoded': self.kl_loss}, optimizer=Adam(self.lr))
# losses:
def mse_loss(self, X, Y):
'''
mse loss
'''
return mse(X,Y)
def kl_loss(self,P, Y):
'''
KL divergence of t-SNE
'''
# calculate neighbor distribution Q (t-distribution) from Y
d = self.d_components
dof = d - 1. # degrees of freedom for student t distribution
n = self.batch_size
eps = K.variable(10e-15) # needs to be at least 10e-8 to get anything after Q /= K.sum(Q)
sum_act = K.sum(K.square(Y), axis=1)
Q = K.reshape(sum_act, [-1, 1]) + -2 * K.dot(Y, K.transpose(Y))
Q = (sum_act + Q) / dof
Q = K.pow(1 + Q, -(dof + 1) / 2)
# delete diagonals: only pairwise similarrities are considered
Q *= K.variable(1 - np.eye(n))
#normalize
Q /= K.sum(Q)
Q = K.maximum(Q, eps)
C = K.log((P + eps) / (Q + eps))
C = K.sum(P * C)
return C
def set_compiler(self):
'''
Sets the compiler for our model depending on the theta
'''
if self.theta == 1:
self.model.compile(loss={'encoded': self.kl_loss}, optimizer=Adam(self.lr))
elif self.theta == 0:
self.model.compile(loss = {'decoded': 'mse'}, optimizer=Adam(self.lr))
else:
self.model.compile(loss={'encoded': self.kl_loss, 'decoded': 'binary_crossentropy'}, optimizer=Adam(self.lr),
loss_weights=[self.theta, (1-self.theta)])
def save(self, file_path=None):
'''
simple function to save the model
'''
make_dir(file_path)
self.model.save(file_path)