def _forward(self): """ _forward() uses defined convolutional neural networks with initial input :return: """ if self.task_parameter is None: self.task_parameter = self.create_initial_parameter( primary_outerparameter=self.outer_param_dict) for i in range(len(self.dim_hidden)): if self.use_t: self + network_utils.conv_block_t( self, self.task_parameter["conv" + str(i)], self.task_parameter["bias" + str(i)], self.model_param_dict["conv" + str(i) + "_z"], ) elif self.use_warp: self + network_utils.conv_block_warp( self, self.task_parameter["conv" + str(i)], self.task_parameter["bias" + str(i)], self.model_param_dict["conv" + str(i) + "_z"], self.model_param_dict["bias" + str(i) + "_z"], ) else: self + network_utils.conv_block( self, self.task_parameter["conv" + str(i)], self.task_parameter["bias" + str(i)], ) if self.max_pool: self + tf.reshape( self.out, [-1, np.prod([int(dim) for dim in self.out.get_shape()[1:]])]) self + tf.add( tf.matmul( self.out, self.task_parameter["w" + str(len(self.dim_hidden))]), self.task_parameter["bias" + str(len(self.dim_hidden))], ) else: self + tf.add( tf.matmul( tf.reduce_mean(self.out, [1, 2]), self.task_parameter["w" + str(len(self.dim_hidden))], ), self.task_parameter["bias" + str(len(self.dim_hidden))], ) if self.use_t: self + tf.matmul( self.out, self.model_param_dict["w" + str(len(self.dim_hidden)) + "_z"])
def _forward(self): """ for i in range(4): self.conv_layer(filters=self.dim_hidden[i],stride=self.stride, max_pool=self.max_pool) flattened_shape = reduce(lambda a, v: a * v, self.layers[-1].get_shape().as_list()[1:]) self + tf.reshape(self.out, shape=(-1, flattened_shape), name='representation') """ for i in range(len(self.dim_hidden)): if self.use_T: self + network_utils.conv_block_t( self, self.outer_param_dict["conv" + str(i)], self.outer_param_dict["bias" + str(i)], self.model_param_dict["conv" + str(i) + "_z"], ) elif self.use_Warp: self + network_utils.conv_block_warp( self, self.outer_param_dict["conv" + str(i)], self.outer_param_dict["bias" + str(i)], self.model_param_dict["conv" + str(i) + "_z"], self.model_param_dict["bias" + str(i) + "_z"], ) else: self + network_utils.conv_block( self, self.outer_param_dict["conv" + str(i)], self.outer_param_dict["bias" + str(i)], ) if self.flatten: flattened_shape = reduce(lambda a, v: a * v, self.layers[-1].get_shape().as_list()[1:]) self + tf.reshape( self.out, shape=(-1, flattened_shape), name="representation") else: if self.max_pool: self + tf.reshape( self.out, [ -1, np.prod([int(dim) for dim in self.out.get_shape()[1:]]) ], ) else: self + tf.reduce_mean(self.out, [1, 2])
def _forward(self): """ _forward() uses defined convolutional neural networks with initial input :return: """ for i in range(len(self.dim_hidden)): if self.use_t: self + network_utils.conv_block_t( self, self.outer_param_dict["conv" + str(i)], self.outer_param_dict["bias" + str(i)], self.model_param_dict["conv" + str(i) + "_z"], ) elif self.use_warp: self + network_utils.conv_block_warp( self, self.outer_param_dict["conv" + str(i)], self.outer_param_dict["bias" + str(i)], self.model_param_dict["conv" + str(i) + "_z"], self.model_param_dict["bias" + str(i) + "_z"], ) else: self + network_utils.conv_block( self, self.outer_param_dict["conv" + str(i)], self.outer_param_dict["bias" + str(i)], ) if self.flatten: flattened_shape = reduce(lambda a, v: a * v, self.layers[-1].get_shape().as_list()[1:]) self + tf.reshape( self.out, shape=(-1, flattened_shape), name="representation") else: if self.max_pool: self + tf.reshape( self.out, [ -1, np.prod([int(dim) for dim in self.out.get_shape()[1:]]) ], ) else: self + tf.reduce_mean(self.out, [1, 2])