def __init__(self, D, hidden_layer_sizes, loss_fn='sigmoid_cross_entropy'): self.hidden_layer_sizes = hidden_layer_sizes # input batch of training data (batch_size x D): self.X = tf.placeholder(tf.float32, shape=(None, D)) # create hidden layers: self.hidden_layers = [] M1 = D for i, M2 in enumerate(self.hidden_layer_sizes): h = HiddenLayer(M1, M2, i) self.hidden_layers.append(h) M1 = M2 # hidden --> output layer parameters: Wo, bo = init_weights_and_biases(M2, D) self.Wo = tf.Variable(Wo, name='Wo') # (M2 x D) self.bo = tf.Variable(bo, name='bo') # D # collect all network parameters: self.params = [] for h in self.hidden_layers: self.params += h.params self.params += [self.Wo, self.bo] # print('self.params:', self.params) # get output - our reconstruction: logits = self.forward(self.X) if loss_fn == 'sigmoid_cross_entropy': # assuming inputs and outputs to be Bernoulli probabilities self.output = tf.nn.sigmoid(logits) # define the cost function: self.cost = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits( labels=self.X, logits=logits ) ) elif loss_fn == 'mse': # assuming the difference (error) between inputs and outputs to be Gaussian self.output = tf.nn.sigmoid(logits) # assuming output is in range [0, 1] # self.output = logits self.cost = tf.reduce_mean( tf.losses.mean_squared_error( labels=self.X, predictions=self.output ) ) # define session: self.sess = tf.InteractiveSession()
def __init__(self, D, hidden_layer_sizes, loss_fn='sigmoid_cross-entropy'): self.hidden_layer_sizes = hidden_layer_sizes # input batch of training data (batch_size x D): self.X = T.matrix('X') # create hidden layers: self.hidden_layers = [] M1 = D for i, M2 in enumerate(self.hidden_layer_sizes): h = HiddenLayer(M1, M2, i) self.hidden_layers.append(h) M1 = M2 # hidden --> output layer parameters: Wo, bo = init_weights_and_biases(M2, D) self.Wo = theano.shared(Wo, name='Wo') # (M2 x D) self.bo = theano.shared(bo, name='bo') # D # collect all network parameters: self.params = [] for h in self.hidden_layers: self.params += h.params self.params += [self.Wo, self.bo] # print('self.params:', self.params) # get output - our reconstruction: self.output = self.forward(self.X) if loss_fn == 'sigmoid_cross-entropy': # assuming inputs and outputs to be Bernoulli probabilities # define the cost function: self.output = T.nnet.sigmoid(self.output) # self.cost = -T.mean(self.X*T.log(self.output) + (1 - self.X)*T.log(1 - self.output)) self.cost = T.mean( T.nnet.binary_crossentropy(output=self.output, target=self.X)) elif loss_fn == 'mse': # assuming the difference (error) between inputs and outputs to be Gaussian self.output = T.nnet.sigmoid( self.output) # assuming output is in range [0, 1] self.cost = T.mean((self.X - self.output)**2) self.predict = theano.function(inputs=[self.X], outputs=self.output)
def __init__(self, M1, M2, an_id, f=T.nnet.relu): W0, b0 = init_weights_and_biases(M1, M2) self.W = theano.shared(W0, name='W%s' % an_id) self.b = theano.shared(b0, name='b%s' % an_id) self.f = f self.params = [self.W, self.b]
def __init__(self, M1, M2, an_id): W0, b0 = init_weights_and_biases(M1, M2) self.W = tf.Variable(W0, name='W%s'%an_id) self.b = tf.Variable(b0, name='b%s'%an_id) self.params = [self.W, self.b]
def __init__(self, M1, M2, an_id, f=tf.nn.relu): W0, b0 = init_weights_and_biases(M1, M2) self.W = tf.Variable(W0, name="W%s" % an_id) self.b = tf.Variable(b0, name="b%s" % an_id) self.f = f self.params = [self.W, self.b]
def __init__(self, M1, M2, an_id, f=tf.nn.relu): W0, b0 = init_weights_and_biases(M1, M2) self.W = tf.Variable(W0, name='W%s' % an_id) self.b = tf.Variable(b0, name='b%s' % an_id) self.f = f