def main(*args): dataset_dir = "../data/mnist-tf" mnist = input_data.read_data_sets(dataset_dir) X_train = mnist.train.images.astype('float32') / 255. X_eval = mnist.validation.images.astype('float32') / 255. X_test = mnist.test.images.astype('float32') / 255. X_train = X_train.reshape((len(X_train), total_tensor_depth(X_train))) X_eval = X_eval.reshape((len(X_eval), total_tensor_depth(X_eval))) X_test = X_test.reshape((len(X_test), total_tensor_depth(X_test))) xp = experiment_fn("/tmp/polyaxon_logs/vae", {'images': X_train}, mnist.train.labels, {'images': X_eval}, mnist.validation.labels) xp.continuous_train_and_evaluate() encode(xp.estimator, X_test, mnist.test.labels) generate(xp.estimator)
def _build(self, incoming, *args, **kwargs): """ Args: incoming: (2+)-D Tensor [samples, input dim]. If not 2D, input will be flatten. Returns: 2D Tensor [samples, num_units]. """ self._declare_dependencies() input_shape = get_shape(incoming) incoming = validate_dtype(incoming) assert len( input_shape) > 1, 'Incoming Tensor shape must be at least 2-D' n_inputs = total_tensor_depth(tensor_shape=input_shape) regularizer = getters.get_regularizer(self.regularizer, scale=self.scale, collect=True) self._w = variable(name='w', shape=[n_inputs, self.num_units], dtype=incoming.dtype, regularizer=regularizer, initializer=getters.get_initializer( self.weights_init), trainable=self.trainable, restore=self.restore) track(self._w, tf.GraphKeys.LAYER_VARIABLES, self.module_name) inference = incoming # If input is not 2d, flatten it. if len(input_shape) > 2: inference = tf.reshape(tensor=inference, shape=[-1, n_inputs]) inference = tf.matmul(a=inference, b=self._w) self._b = None if self.bias: self._b = variable(name='b', shape=[self.num_units], dtype=incoming.dtype, initializer=getters.get_initializer( self.bias_init), trainable=self.trainable, restore=self.restore) track(self._b, tf.GraphKeys.LAYER_VARIABLES, self.module_name) inference = tf.nn.bias_add(value=inference, bias=self._b) if self.activation: inference = getters.get_activation(self.activation, collect=True)(inference) if self._dropout: inference = self._dropout(inference) track(inference, tf.GraphKeys.LAYER_TENSOR, self.module_name) return inference
def _build(self, incoming, *args, **kwargs): """ Args: incoming: (2+)-D Tensor [samples, input dim]. If not 2D, input will be flatten. Returns: 2D Tensor [samples, num_units]. """ self._declare_dependencies() input_shape = get_shape(incoming) assert len(input_shape) > 1, 'Incoming Tensor shape must be at least 2-D' n_inputs = total_tensor_depth(tensor_shape=input_shape) regularizer = getters.get_regularizer(self.regularizer, scale=self.scale, collect=True) initializer = getters.get_initializer(self.weights_init) self._w = variable(name='w', shape=[n_inputs, self.num_units], regularizer=regularizer, initializer=initializer, trainable=self.trainable, restore=self.restore) track(self._w, tf.GraphKeys.LAYER_VARIABLES, self.module_name) self._b = variable(name='b', shape=[self.num_units], initializer=getters.get_initializer(self.bias_init), trainable=self.trainable, restore=self.restore) track(self._b, tf.GraphKeys.LAYER_VARIABLES, self.module_name) # Weight and bias for the transform gate self._w_t = variable(name='w_t', shape=[n_inputs, self.num_units], regularizer=None, initializer=initializer, trainable=self.trainable, restore=self.restore) track(self._w_t, tf.GraphKeys.LAYER_VARIABLES, self.module_name) self._b_t = variable(name='b_t', shape=[self.num_units], initializer=tf.constant_initializer(-1), trainable=self.trainable, restore=self.restore) track(self._b_t, tf.GraphKeys.LAYER_VARIABLES, self.module_name) # If input is not 2d, flatten it. if len(input_shape) > 2: incoming = tf.reshape(tensor=incoming, shape=[-1, n_inputs]) H = getters.get_activation(self.activation)(tf.matmul(a=incoming, b=self._w) + self._b) T = tf.sigmoid(tf.matmul(a=incoming, b=self._w_t) + self._b_t) if self._transform_dropout: T = self._transform_dropout(T) C = tf.subtract(x=1.0, y=T) inference = tf.add(x=tf.multiply(x=H, y=T), y=tf.multiply(x=incoming, y=C)) track(inference, tf.GraphKeys.ACTIVATIONS) track(inference, tf.GraphKeys.LAYER_TENSOR, self.module_name) return inference
def _build(self, incoming, *args, **kwargs): """ Args: incoming: (2+)-D `Tensor`. Returns: 2-D `Tensor` [batch, flatten_dims]. """ input_shape = get_shape(incoming) assert len(input_shape) > 1, 'Incoming Tensor shape must be at least 2-D' dims = total_tensor_depth(tensor_shape=input_shape) x = tf.reshape(tensor=incoming, shape=[-1, dims]) track(x, tf.GraphKeys.LAYER_TENSOR, self.name) return x
def _build(self, incoming, *args, **kwargs): """ Args: incoming: (2+)-D Tensor [samples, input dim]. If not 2D, input will be flatten. Returns: 2D Tensor [samples, num_units]. """ self._declare_dependencies() input_shape = get_shape(incoming) incoming = validate_dtype(incoming) assert len(input_shape) > 1, 'Incoming Tensor shape must be at least 2-D' n_inputs = total_tensor_depth(tensor_shape=input_shape) regularizer = getters.get_regularizer(self.regularizer, scale=self.scale, collect=True) self._w = variable( name='w', shape=[n_inputs, self.num_units], dtype=incoming.dtype, regularizer=regularizer, initializer=getters.get_initializer(self.weights_init), trainable=self.trainable, restore=self.restore) track(self._w, tf.GraphKeys.LAYER_VARIABLES, self.module_name) inference = incoming # If input is not 2d, flatten it. if len(input_shape) > 2: inference = tf.reshape(tensor=inference, shape=[-1, n_inputs]) inference = tf.matmul(a=inference, b=self._w) self._b = None if self.bias: self._b = variable(name='b', shape=[self.num_units], dtype=incoming.dtype, initializer=getters.get_initializer(self.bias_init), trainable=self.trainable, restore=self.restore) track(self._b, tf.GraphKeys.LAYER_VARIABLES, self.module_name) inference = tf.nn.bias_add(value=inference, bias=self._b) if self.activation: inference = getters.get_activation(self.activation, collect=True)(inference) if self._dropout: inference = self._dropout(inference) track(inference, tf.GraphKeys.LAYER_TENSOR, self.module_name) return inference
def _build(self, incoming, *args, **kwargs): """ Args: incoming: 1-D Tensor [samples]. If not 2D, input will be flatten. Returns: 1-D Tensor [samples]. """ input_shape = get_shape(incoming) n_inputs = total_tensor_depth(tensor_shape=input_shape) initializer = tf.constant_initializer(value=np.random.randn()) self._w = variable(name='w', shape=[n_inputs], dtype=incoming.dtype, initializer=initializer, trainable=self.trainable, restore=self.restore) track(self._w, tf.GraphKeys.LAYER_VARIABLES, self.module_name) inference = incoming # If input is not 2d, flatten it. if len(input_shape) > 1: inference = tf.reshape(tensor=inference, shape=[-1]) inference = tf.multiply(x=inference, y=self._w) self._b = None if self.bias: self._b = variable(name='b', shape=[n_inputs], dtype=incoming.dtype, initializer=initializer, trainable=self.trainable, restore=self.restore) inference = tf.add(inference, self._b) track(self._b, tf.GraphKeys.LAYER_VARIABLES, self.module_name) if self.activation: inference = getters.get_activation(self.activation, collect=True)(inference) track(inference, tf.GraphKeys.LAYER_TENSOR, self.module_name) return inference