def get_hidden_values(self, input): """ Computes the values of the hidden layer. Arguments: input: the data to feed through the input layer to the hidden layer. Returns: The encoded (lower dimensional) feature vector for a given asin in the form of a 1 hot encoding vector. """ if type(input) == T.TensorVariable: sparse_input = basic.csc_from_dense(input) else: sparse_input = input lin_output = basic.dot(sparse_input, self.W) if self.activation is None and self.b is None: return lin_output elif self.activation is None: return lin_output + self.b elif self.b is None: return self.activation(lin_output) else: return self.activation(lin_output + self.b)
def reconstruct_input(samples, labels, c, count): # feed through the network and write out samples = basic.csc_from_dense(samples.toarray()) h = da.get_hidden_values(samples) z = h.eval() #da.get_reconstructed_input(h).eval() # write out to file dataDict = dict() with open(get_reconstruction_output_path(dataset), "a+") as handle: idx = count - MINI_BATCH_SIZE + 1 for sample in z: dataDict[idx] = dict() dataDict[idx]["x"] = sample.tolist() dataDict[idx]["y"] = sample.tolist() idx = idx + 1 handle.write(json.dumps(dataDict)+"\n") return c
def get_hidden_values(self, input): #if type(input) != basic.SparseVariable: if type(input) == T.TensorVariable: sparse_input = basic.csc_from_dense(input) else: sparse_input = input #x_is_sparse_variable = basic._is_sparse_variable(input) #w_is_sparse_variable = basic._is_sparse_variable(self.W) #if not x_is_sparse_variable and not w_is_sparse_variable: # fn = T.dot #else: # fn = basic.dot lin_output = basic.dot(sparse_input, self.W) if self.activation is None and self.b is None: return lin_output elif self.activation is None: return lin_output + self.b elif self.b is None: return self.activation(lin_output) else: return self.activation(lin_output + self.b)