コード例 #1
0
ファイル: model.py プロジェクト: xlnwel/model-free-algorithms
    def _setup_stats_logs(self, stats_info, name=None):
        """
        stats_info are not in self._build_graph to avoid being included in self.graph_summary
        """
        def setup():
            stats = {}
            stats['counter'] = counter = tf.Variable(0,
                                                     trainable=False,
                                                     name='counter')
            step_op = tf.assign(counter, counter + 1, name='counter_update')
            merge_inputs = []
            for info in stats_info:
                stats[info] = info_ph = tf.placeholder(tf.float32, name=info)
                stats[f'{info}_log'] = log = tf.compat.v1.summary.scalar(
                    f'{info}_', info_ph)
                merge_inputs.append(log)

            with tf.control_dependencies([step_op]):
                stats['log_op'] = tf.summary.merge(merge_inputs, name='log_op')

            self.sess.run(tf.variables_initializer([counter]))

            return stats

        """ Function Body """
        with self.graph.as_default():
            with tf.variable_scope('stats'):
                # stats logs for each worker
                return wrap_scope(name, lambda: setup())
コード例 #2
0
    def dense_norm_activation(self, x, units, use_bias=True, kernel_initializer=tf_utils.kaiming_initializer(),
                               norm=tc.layers.layer_norm, activation=tf.nn.relu, name=None):
        def layer_imp():
            y = self.dense(x, units, use_bias=use_bias, kernel_initializer=kernel_initializer)
            y = tf_utils.norm_activation(y, norm=norm, activation=activation, 
                                        training=self.training)

            return y

        x = tf_utils.wrap_scope(name, layer_imp)

        return x
コード例 #3
0
    def layer_norm_activation(self, x, layer, norm=None, activation=tf.nn.relu, name=None):
        """ This function implicitly handle training for batch normalization if self._training is defined """
        def layer_imp():
            y = layer(x)
            y = tf_utils.norm_activation(y, norm=norm, activation=activation, 
                                        training=self.training)

            return y

        x = tf_utils.wrap_scope(name, layer_imp)

        return x
コード例 #4
0
    def noisy_norm_activation(self, x, units, kernel_initializer=tf_utils.kaiming_initializer(),
                               norm=tc.layers.layer_norm, activation=tf.nn.relu, 
                               name=None, sigma=.4):
        def layer_imp():
            o, y = self.noisy(x, units, kernel_initializer=kernel_initializer, 
                            name=name, sigma=sigma, return_noise=True)
            y = tf_utils.norm_activation(y, norm=norm, activation=activation, 
                                         training=self.training)
            
            return o + y

        result = tf_utils.wrap_scope(name, layer_imp)

        return result
コード例 #5
0
    def convtrans_norm_activation(self, x, filters, kernel_size, strides, padding='same', 
                                   kernel_initializer=tf_utils.kaiming_initializer(), 
                                   norm=tf.layers.batch_normalization, 
                                   activation=tf.nn.relu, name=None):
        def layer_imp():
            y = self.convtrans(x, filters, kernel_size, 
                                strides=strides, padding=padding, 
                                kernel_initializer=kernel_initializer)
            y = tf_utils.norm_activation(y, norm=norm, activation=activation, 
                                            training=self.training)

            return y

        x = tf_utils.wrap_scope(name, layer_imp)

        return x