示例#1
0
    def __init__(self, **kwargs):
        self.config = kwargs.pop(str('config'), None)
        self.layer_type = self.class_name
        self.batch_size = self.config.getint('simulation', 'batch_size')
        self.dt = self.config.getfloat('simulation', 'dt')
        self.duration = self.config.getint('simulation', 'duration')
        self.tau_refrac = self.config.getfloat('cell', 'tau_refrac')
        self._v_thresh = self.config.getfloat('cell', 'v_thresh')
        self.v_thresh = None
        self.time = None
        self.mem = self.spiketrain = self.impulse = None
        self.refrac_until = None
        self.last_spiketimes = None

        allowed_kwargs = {
            'input_shape',
            'batch_input_shape',
            'batch_size',
            'dtype',
            'name',
            'trainable',
            'weights',
            'input_dtype',  # legacy
        }
        for kwarg in kwargs.copy():
            if kwarg not in allowed_kwargs:
                kwargs.pop(kwarg)
        Layer.__init__(self, **kwargs)
        self.stateful = True
示例#2
0
文件: layers.py 项目: sedghi/neuron
    def __init__(self,
                 shape,
                 my_initializer='RandomNormal',
                 dtype=None,
                 name=None,
                 mult=1.0,
                 **kwargs):

        
        # some input checking
        if not name:
            prefix = 'local_param'
            name = prefix + '_' + str(backend.get_uid(prefix))
            
        if not dtype:
            dtype = backend.floatx()
        
        self.shape = [1, *shape]
        self.my_initializer = my_initializer
        self.mult = mult

        if not name:
            prefix = 'param'
            name = '%s_%d' % (prefix, K.get_uid(prefix))
        Layer.__init__(self, name=name, **kwargs)

        # Create a trainable weight variable for this layer.
        with K.name_scope(self.name):
            self.kernel = self.add_weight(name='kernel', 
                                            shape=shape,
                                            initializer=self.my_initializer,
                                            dtype=dtype,
                                            trainable=True)

        # prepare output tensor, which is essentially the kernel.
        output_tensor = K.expand_dims(self.kernel, 0) * self.mult
        output_tensor._keras_shape = self.shape
        output_tensor._uses_learning_phase = False
        output_tensor._keras_history = base_layer.KerasHistory(self, 0, 0)
        output_tensor._batch_input_shape = self.shape

        self.trainable = True
        self.built = True    
        self.is_placeholder = False

        # create new node
        Node(self,
            inbound_layers=[],
            node_indices=[],
            tensor_indices=[],
            input_tensors=[],
            output_tensors=[output_tensor],
            input_masks=[],
            output_masks=[None],
            input_shapes=[],
            output_shapes=self.shape)
    def __init__(self, **kwargs):
        self.config = kwargs.pop(str('config'), None)
        if self.config is None:
            from snntoolbox.bin.utils import load_config
            # Todo: Enable loading config here. Needed when trying to load a
            #       converted SNN from disk. For now we specify a dummy path.
            try:
                self.config = load_config('wdir/log/gui/test/.config')
            except FileNotFoundError:
                raise NotImplementedError
        self.layer_type = self.class_name
        self.dt = self.config.getfloat('simulation', 'dt')
        self.duration = self.config.getint('simulation', 'duration')
        self.tau_refrac = self.config.getfloat('cell', 'tau_refrac')
        self._v_thresh = self.config.getfloat('cell', 'v_thresh')
        self.v_thresh = None
        self.time = None
        self.mem = self.spiketrain = self.impulse = self.spikecounts = None
        self.mem_input = None  # Used in MaxPooling layers
        self.refrac_until = self.max_spikerate = None
        if clamp_var:
            self.spikerate = self.var = None

        from snntoolbox.utils.utils import get_abs_path
        path, filename = \
            get_abs_path(self.config.get('paths', 'filename_clamp_indices'),
                         self.config)
        if filename != '':
            filepath = os.path.join(path, filename)
            assert os.path.isfile(filepath), \
                "File with clamp indices not found at {}.".format(filepath)
            self.filename_clamp_indices = filepath
            self.clamp_idx = None

        self.payloads = None
        self.payloads_sum = None
        self.online_normalization = self.config.getboolean(
            'normalization', 'online_normalization')

        allowed_kwargs = {
            'input_shape',
            'batch_input_shape',
            'batch_size',
            'dtype',
            'name',
            'trainable',
            'weights',
            'input_dtype',  # legacy
        }
        for kwarg in kwargs.copy():
            if kwarg not in allowed_kwargs:
                kwargs.pop(kwarg)
        Layer.__init__(self, **kwargs)
        self.stateful = True
        self._floatx = tf.keras.backend.floatx()
示例#4
0
    def __init__(self, units, rbf_units_trainable=False, use_gaussian_kernel=True, **kwargs):
        self.__init_centers = None
        self.__init_radius = None
        self.__bias = None
        self.__rbf_trainable = rbf_units_trainable
        self.__GaussianKernel = use_gaussian_kernel
        self.__input_dimension = 0
        self.__input_batchsize = 0

        if isinstance(units, int):
            self.__rbf_kernel_n = units
        else:
            raise Exception('Only int can be set as num of rbf kernels.')

        Layer.__init__(self, **kwargs)
示例#5
0
 def add_layer(self,
               layer: Layer,
               flops: float = 0,
               repeat_count: int = 1,
               num_params: typing.Optional[int] = None,
               output_shape: typing.Optional[typing.Tuple] = None) -> None:
     if not isinstance(layer, Layer):
         raise ValueError(
             "Invalid argument type: '{}' (must be 'Layer').".format(
                 type(layer)))
     try:
         out_shape = (output_shape or layer.output_shape)[1:]
     except AttributeError as e:
         print("Cannot get shape for layer '{}'.".format(layer.name))
         raise e
     num_activations = repeat_count * np.prod(out_shape)
     if isinstance(layer, (Conv1D, Conv2D, Dense)):
         if layer.activation != linear:
             num_activations *= 2
     num_params = num_params or layer.count_params()
     self.add(
         Summary(name=layer.name,
                 out_shape=out_shape,
                 flops=repeat_count * flops,
                 num_params=num_params,
                 num_activations=num_activations))
示例#6
0
def compute_time(x: layers.Layer, n: int):
    print("computing time...")
    if n == 1:
        x_test = np.ones((2000, x.input_shape[1]))
    elif n == 3:
        x_test = np.ones(
            (2000, x.input_shape[1], x.input_shape[2], x.input_shape[3]))
    arg = tf.convert_to_tensor(x_test, dtype=tf.float32)
    steps = 10000
    tt = np.zeros(steps)
    start_time = time.time()
    for i in range(steps):
        x.call(arg)
        end_time = time.time()
        tt[i] = end_time - start_time
        start_time = end_time
    return np.mean(tt), np.std(tt)
    def __init__(self, **kwargs):
        self.config = kwargs.pop(str('config'), None)
        self.layer_type = self.class_name
        self.dt = self.config.getfloat('simulation', 'dt')
        self.duration = self.config.getint('simulation', 'duration')
        self.tau_refrac = self.config.getfloat('cell', 'tau_refrac')
        self._v_thresh = self.config.getfloat('cell', 'v_thresh')
        self.v_thresh = None
        self.time = None
        self.mem = self.spiketrain = self.impulse = self.spikecounts = None
        self.refrac_until = self.max_spikerate = None
        if clamp_var:
            self.spikerate = self.var = None

        from snntoolbox.utils.utils import get_abs_path
        path, filename = \
            get_abs_path(self.config.get('paths', 'filename_clamp_indices'),
                         self.config)
        if filename != '':
            filepath = os.path.join(path, filename)
            assert os.path.isfile(filepath), \
                "File with clamp indices not found at {}.".format(filepath)
            self.filename_clamp_indices = filepath
            self.clamp_idx = None

        self.payloads = None
        self.payloads_sum = None
        self.online_normalization = self.config.getboolean(
            'normalization', 'online_normalization')

        allowed_kwargs = {
            'input_shape',
            'batch_input_shape',
            'batch_size',
            'dtype',
            'name',
            'trainable',
            'weights',
            'input_dtype',  # legacy
        }
        for kwarg in kwargs.copy():
            if kwarg not in allowed_kwargs:
                kwargs.pop(kwarg)
        Layer.__init__(self, **kwargs)
        self.stateful = True
        self._floatx = tf.keras.backend.floatx()
示例#8
0
    def feedforward_layers(self, final_activation=None):
        '''Iterate layers of dropout-dense-batch norm'''
        X = Input(shape=(self.X_train.shape[1], ))

        layer = Layer(name='identity')(X)

        n_layers = len(self.params['layers'])

        for i, units in enumerate(self.params['layers']):

            drop_rate = self.params.get('drop_rates', [0.0] * n_layers)[i]
            if drop_rate > 0.0:
                layer = Dropout(drop_rate,
                                noise_shape=None,
                                seed=None,
                                name='drop_' + str(i+1))(layer)

            layer = Dense(units,
                        activation=self.params.get('activation', None),
                        kernel_initializer=self.initializer,
                        bias_initializer='zeros',
                        kernel_regularizer=self.regularizer,
                        bias_regularizer=None,
                        activity_regularizer=None,
                        kernel_constraint=None,
                        bias_constraint=None,
                        name='dense_' + str(i+1))(layer)

            if self.params.get('use_batch_norm', [False] * n_layers)[i]:
                layer = BatchNormalization(axis=-1,
                                           momentum=self.params.get('batch_norm_momentum', 0.99),
                                           epsilon=0.001,
                                           center=True,
                                           scale=True,
                                           beta_initializer='zeros',
                                           gamma_initializer='ones',
                                           moving_mean_initializer='zeros',
                                           moving_variance_initializer='ones',
                                           beta_regularizer=None,
                                           gamma_regularizer=None,
                                           beta_constraint=None,
                                           gamma_constraint=None,
                                           name='bn_'+str(i+1))(layer)

        outputs = Dense(1,
                    activation=final_activation,
                    kernel_initializer=self.initializer,
                    bias_initializer='zeros',
                    kernel_regularizer=None,
                    bias_regularizer=None,
                    activity_regularizer=None,
                    kernel_constraint=None,
                    bias_constraint=None,
                    name='outputs')(layer)

        return X, outputs
示例#9
0
def reduce_encoder_output(encoder_output, encoder_reduction):
    if encoder_reduction == EncoderReduction.GA_POOLING:
        reduced = GlobalAveragePooling2D()(encoder_output)
    elif encoder_reduction == EncoderReduction.FLATTEN:
        reduced = Flatten()(encoder_output)
    elif encoder_reduction == EncoderReduction.GA_ATTENTION:
        reduced = Layer()(attention_ga_pooling(encoder_output))
    else:
        raise ValueError()
    return reduced
示例#10
0
    def __init__(self, num_head, 
                 mu_shared_nodes, mu_branched_nodes=None, 
                 sigma_shared_nodes=None, sigma_branched_nodes=None, activation='relu',
                 single_head_multi_out=False):
        Layer.__init__(self)
        mu_shared_layers = []
        for msn in mu_shared_nodes:
            mu_shared_layers.append(Dense(msn, activation=activation))
            
        mu_branched_layers = []
        if mu_branched_nodes is not None:
            for mbn in mu_branched_nodes:
                mu_branched_layers.append([Dense(mbn, activation=activation) for _ in num_head])
                
        if sigma_shared_nodes is None:
            sigma_shared_layers = mu_shared_layers
        else:
            sigma_shared_layers = []
            for ssn in sigma_shared_nodes:
                sigma_shared_layers.append(Dense(ssn, activation=activation))
                
        if sigma_branched_nodes is None:
            sigma_branched_layers = mu_branched_layers
        else:
            sigma_branched_layers = []
            for sbn in sigma_branched_nodes:
                sigma_branched_layers.append([Dense(sbn, activation=activation) for _ in num_head])

        self.num_head = num_head
        self.mu_shared_layers = mu_shared_layers
        self.mu_branched_layers = mu_branched_layers
        self.sigma_shared_layers = sigma_shared_layers
        self.sigma_branched_layers = sigma_branched_layers
        self.single_head_multi_out = single_head_multi_out
        if single_head_multi_out:
            assert mu_branched_nodes is None and sigma_branched_nodes is None
            self.mu_outs = [Dense(num_head)]
            self.sigma_outs = [Dense(num_head)]
        else:
            self.mu_outs = [Dense(1) for _ in range(num_head)]
            self.sigma_outs = [Dense(1) for _ in range(num_head)]
示例#11
0
def globFeatRep(layer: Layer):

	"""Applies a Global Feature Representation Learning on a given
		Keras layer

		Params
		------
		layer : tf.keras.Layer
			Layer in whichi the global feature representation learning will be
			applied
	"""

	## Compute alpha
	layer_wghts, layer_bias = layer.get_weights()

	exp_alpha_wghts = np.exp(layer_wghts)
	sum_exp = np.sum(exp_alpha_wghts, axis=-1)

	alpha_wghts = exp_alpha_wghts / np.expand_dims(sum_exp, axis=-1)

	## Compute the global feature representation of layer
	layer.set_weights([layer_wghts*alpha_wghts, layer_bias])
示例#12
0
def create_right_singular_vector(layer: Layer):
    """
    Adds a non-trainable parameter to the layer that tracks
    estimates of the right singular vector of the layer's
    weight matrix.
    """
    return layer.add_weight(
        shape=tuple([1, layer.kernel.shape.as_list()[-1]]),
        initializer=initializers.RandomNormal(0, 1),
        name="sn",
        trainable=False,
        synchronization=tf.VariableSynchronization.ON_READ,
        aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA,
    )
示例#13
0
 def __init__(self, **kwargs):
     self.config = kwargs.pop(str('config'), None)
     self.layer_type = self.class_name
     self.spikerates = None
     self.num_bits = self.config.getint('conversion', 'num_bits')
     self.powers = tf.constant([2**-(i + 1) for i in range(self.num_bits)])
     self._x_binary = None
     self._a = None
     allowed_kwargs = {
         'input_shape',
         'batch_input_shape',
         'batch_size',
         'dtype',
         'name',
         'trainable',
         'weights',
         'input_dtype',  # legacy
     }
     for kwarg in kwargs.copy():
         if kwarg not in allowed_kwargs:
             kwargs.pop(kwarg)
     Layer.__init__(self, **kwargs)
     self.stateful = True
示例#14
0
def split_output_into_instance_seg(model,
                                   n_classes,
                                   spacing=1.,
                                   class_activation=True):
    '''Splits the output of model into instance semi-conv embeddings and semantic class.
    
    Args:
        model: A base model that outputs at least n_classes + n_spatial-dimensions channels
        n_classes: number semantic classes
        spacing: pixel/voxel spacing of the semi-conv embeddings
    '''

    spatial_dims = len(model.inputs[0].shape) - 2
    spacing = tuple(
        float(val) for val in np.broadcast_to(spacing, spatial_dims))
    y_preds = model.outputs[0]

    if y_preds.shape[-1] < n_classes + spatial_dims:
        raise ValueError(
            'model has less than n_classes + n_spatial_dims channels: {} < {} + {}'
            .format(y_preds.shape[-1], n_classes, spatial_dims))

    vfield = y_preds[..., 0:spatial_dims]
    coords = generate_coordinate_grid(tf.shape(vfield), spatial_dims) * spacing
    embeddings = coords + vfield

    semantic_class = y_preds[..., spatial_dims:spatial_dims + n_classes]
    if class_activation:
        semantic_class = tf.nn.softmax(semantic_class, axis=-1)

    # rename outputs
    embeddings = Layer(name='embeddings')(embeddings)
    semantic_class = Layer(name='semantic_class')(semantic_class)

    return Model(inputs=model.inputs,
                 outputs=[embeddings, semantic_class],
                 name=model.name)
示例#15
0
文件: thop.py 项目: sbl1996/hanser
def count_mac(layer: Layer, input_shape=None):
    if isinstance(layer, (Model, Sequential)):
        total = 0
        input_shape = layer.layers[0].input_shape
        for l in layer.layers:
            total += count_mac(l, input_shape)
            input_shape = get_output_shape(l)
        return total
    elif type(layer) in register_hooks:
        typ = type(layer)
        ops = register_hooks[typ](layer, input_shape)
        layer_table[typ] += ops
        return ops
    else:
        total = 0
        for l in layer._flatten_layers(recursive=False, include_self=False):
            total += count_mac(l, input_shape)
            input_shape = get_output_shape(l)
        return total
示例#16
0
    def build(self, input_shape=None):
        self.check_initialization()

        # State value V
        if self.v_h_size is not None:
            if self.noise_std_init == 0:
                self.v_h = Dense(self.v_h_size, name='latent_V')
            else:
                self.v_h = NoisyDense(self.v_h_size,
                                      std_init=self.noise_std_init,
                                      name='latent_V')
        else:
            self.v_h = Layer()

        if self.noise_std_init == 0:
            self.v = Dense(1, name='V')
        else:
            self.v = NoisyDense(1, self.noise_std_init, name='V')

        # Advantage A
        if self.a_h_size is not None:
            if self.noise_std_init == 0:
                self.a_h = Dense(self.a_h_size, name='latent_A')
            else:
                self.a_h = NoisyDense(self.a_h_size,
                                      std_init=self.noise_std_init,
                                      name='latent_A')
        else:
            self.a_h = Layer()

        if self.noise_std_init == 0:
            self.a = Dense(self.num_actions, name='A')
        else:
            self.a = NoisyDense(self.num_actions,
                                self.noise_std_init,
                                name='A')

        super(DoubleQNetwork, self).build(input_shape)
示例#17
0
 def __init__(self, num_channels, name, use_1x1conv=False, strides=1):
     super().__init__()
     self.conv1 = Convolution2D(num_channels,
                                kernel_size=3,
                                padding='same',
                                strides=strides,
                                name=name + "_conv1",
                                kernel_initializer=GlorotNormal)
     self.conv2 = Convolution2D(num_channels,
                                kernel_size=3,
                                padding='same',
                                name=name + "_conv2",
                                kernel_initializer=GlorotNormal)
     self.bn1 = BatchNormalization(name=name + "_bn1")
     self.bn2 = BatchNormalization(name=name + "_bn2")
     self.skip_conv = None
     if use_1x1conv:
         self.skip_conv = Convolution2D(num_channels,
                                        kernel_size=1,
                                        strides=strides,
                                        name=name + "_skip_conv",
                                        kernel_initializer=GlorotNormal)
     else:
         self.skip_conv = Layer()
示例#18
0
    def build_generator(self) -> tf.keras.Model:
        inputs = Input((self.z_dims, ))

        x = Dense(4 * 4 * 4 * self.z_dims)(inputs)
        x = BatchNormalization()(x)
        x = ReLU()(x)

        x = Reshape((4, 4, 4 * self.z_dims))(x)

        for i in range(3):
            x = Conv2DTranspose(self.z_dims * 4 // (2**i),
                                kernel_size=5,
                                strides=2,
                                padding='same')(x)
            x = BatchNormalization()(x)
            x = ReLU()(x)

        x = Conv2DTranspose(self.n_channels,
                            kernel_size=5,
                            strides=1,
                            padding='same')(x)
        x = Layer('tanh')(x)

        return Model(inputs, x, name='generator')
示例#19
0
    def base_network(self):
        mob_net = tf.keras.applications.MobileNetV2(weights='imagenet',
                                                    include_top=False,
                                                    input_shape=self.in_shape)
        fmg = Layer(name="Feature_map_G_1")(
            mob_net.layers[-self.end_layer].output)
        fmg = Conv2D(self.out_features, (1, 1),
                     name='Feature_map_G_2',
                     activation='relu')(fmg)
        fmg = tf.keras.layers.BatchNormalization(axis=1,
                                                 trainable=False,
                                                 name='Feature_map_G_3')(fmg)
        x = GlobalAveragePooling2D()(fmg)
        if self.l2_norm:
            x = Lambda(lambda a: tf.math.l2_normalize(a, axis=1),
                       name='l2_norm')(x)

        outmodel = Model(inputs=mob_net.input,
                         outputs=x,
                         name='base_FE_network')
        self.map_G = Model(inputs=mob_net.input,
                           outputs=fmg,
                           name='base_FE_network')
        return outmodel
示例#20
0
 def __init__(self):
     Layer.__init__(self)
     self.frontend_name = 'keras'
示例#21
0
        Convolution2D(256, kernel, kernel, 'valid'),
        BatchNormalization(),
        UpSampling2D(size=(pool_size, pool_size)),
        ZeroPadding2D(padding=(pad, pad)),
        Convolution2D(128, kernel, kernel, 'valid'),
        BatchNormalization(),
        UpSampling2D(size=(pool_size, pool_size)),
        ZeroPadding2D(padding=(pad, pad)),
        Convolution2D(filter_size, kernel, kernel, 'valid'),
        BatchNormalization(),
    ]


segnet_basic = models.Sequential()

segnet_basic.add(Layer(input_shape=(3, 360, 480)))

segnet_basic.encoding_layers = create_encoding_layers()

for l in segnet_basic.encoding_layers:
    segnet_basic.add(l)

# Note: it this looks weird, that is because of adding Each Layer using that for loop
# instead of re-writting mode.add(somelayer+params) everytime.

segnet_basic.decoding_layers = create_decoding_layers()
for l in segnet_basic.decoding_layers:
    segnet_basic.add(l)

segnet_basic.add(Convolution2D(
    12,
示例#22
0
def conrec_model(input_shape=(256, 256, 1),
                 basemap=32,
                 activation='sigmoid',
                 depth=4,
                 p_dropout=None,
                 batch_normalization=True,
                 projection_dim=128,
                 projection_head_layers=3,
                 skip_connections=None,
                 encoder_reduction=EncoderReduction.GA_POOLING,
                 decoder_type=DecoderType.UPSAMPLING,
                 sc_strength=1):
    def _pool_and_dropout(pool_size, p_dropout, inp):
        """helper fcn to easily add optional dropout"""
        if p_dropout:
            pool = MaxPooling2D(pool_size=pool_size)(inp)
            return Dropout(p_dropout)(pool)
        else:
            return MaxPooling2D(pool_size=pool_size)(inp)

    if skip_connections is None:
        skip_connections = depth - 1

    inputs = Input(input_shape)
    current_layer = inputs
    levels = list()

    for layer_depth in range(depth):
        x = current_layer
        for _ in range(2):
            x = _create_convolution_block(
                input_layer=x,
                n_filters=basemap * (2**layer_depth),
                kernel=(3, 3),
                batch_normalization=batch_normalization,
                use_bias=True)
        if layer_depth < depth - 1:
            x = Layer(name='sc-' + str(layer_depth))(x)
            skip = _create_convolution_block(
                input_layer=x,
                n_filters=x.shape[-1] * sc_strength,
                kernel=(1, 1),
                batch_normalization=batch_normalization,
                use_bias=True)
            levels.append(skip)
            current_layer = _pool_and_dropout(pool_size=(2, 2),
                                              p_dropout=p_dropout,
                                              inp=x)
        else:
            x = Dropout(p_dropout)(x) if p_dropout else x
            current_layer = x

    reduced = reduce_encoder_output(encoder_output=current_layer,
                                    encoder_reduction=encoder_reduction)
    reduced = Layer(name=ENCODER_OUTPUT_NAME)(reduced)

    con_output = add_contrastive_output(
        input=reduced,
        projection_dim=projection_dim,
        projection_head_layers=projection_head_layers)

    for layer_depth in range(depth - 2, -1, -1):
        if decoder_type == DecoderType.TRANSPOSE:
            x = Conv2DTranspose(basemap * (2**layer_depth), (2, 2),
                                strides=(2, 2),
                                padding='same')(current_layer)
        elif decoder_type == DecoderType.UPSAMPLING:
            x = UpSampling2D(size=(2, 2))(current_layer)
        else:
            raise ValueError('Unknown decoder type')
        if skip_connections > layer_depth:
            x = concatenate([x, levels[layer_depth]], axis=3)
        else:
            print('No skip connection')
        for _ in range(2):
            x = _create_convolution_block(
                input_layer=x,
                n_filters=basemap * (2**layer_depth),
                kernel=(3, 3),
                batch_normalization=batch_normalization,
                use_bias=True)
        current_layer = Dropout(p_dropout)(x) if p_dropout else x

    reconstruction_out = Conv2D(input_shape[-1], (1, 1),
                                activation=activation,
                                name=RECONSTRUCTION_OUTPUT)(current_layer)

    return Model(inputs, [reconstruction_out, con_output])
示例#23
0
class DoubleQNetwork(QNetwork):
    def __init__(self, latent_v_dim=None, latent_a_dim=None, noise_std_init=0):
        super().__init__(name="double_Q_network")
        self.v_h_size = latent_v_dim
        self.a_h_size = latent_a_dim
        self.noise_std_init = noise_std_init
        self.v_h = None
        self.v = None
        self.a_h = None
        self.a = None

    def build(self, input_shape=None):
        self.check_initialization()

        # State value V
        if self.v_h_size is not None:
            if self.noise_std_init == 0:
                self.v_h = Dense(self.v_h_size, name='latent_V')
            else:
                self.v_h = NoisyDense(self.v_h_size,
                                      std_init=self.noise_std_init,
                                      name='latent_V')
        else:
            self.v_h = Layer()

        if self.noise_std_init == 0:
            self.v = Dense(1, name='V')
        else:
            self.v = NoisyDense(1, self.noise_std_init, name='V')

        # Advantage A
        if self.a_h_size is not None:
            if self.noise_std_init == 0:
                self.a_h = Dense(self.a_h_size, name='latent_A')
            else:
                self.a_h = NoisyDense(self.a_h_size,
                                      std_init=self.noise_std_init,
                                      name='latent_A')
        else:
            self.a_h = Layer()

        if self.noise_std_init == 0:
            self.a = Dense(self.num_actions, name='A')
        else:
            self.a = NoisyDense(self.num_actions,
                                self.noise_std_init,
                                name='A')

        super(DoubleQNetwork, self).build(input_shape)

    def get_config(self):
        config = {
            "latent_v_dim": self.v_h_size,
            "latent_a_dim": self.a_h_size,
            "noise_std_init": self.noise_std_init
        }
        return config

    def call(self, inputs, training=False, mask=None):
        v = self.v(ReLU()(self.v_h(inputs)))
        a = self.a(ReLU()(self.a_h(inputs)))
        a_avg = tf.reduce_mean(a,
                               axis=self.q_value_axis,
                               keepdims=True,
                               name='A_mean')
        # State-action values: Q(s, a) = V(s) + A(s, a) - A_mean(s, a)
        return v + a - a_avg

    def reset_noise(self):
        if self.noise_std_init > 0:
            self.v_h.reset_noise()
            self.v.reset_noise()
            self.a_h.reset_noise()
            self.a.reset_noise()

    def set_noisy(self, active):
        if self.noise_std_init > 0:
            self.v_h.set_noisy(active)
            self.v.set_noisy(active)
            self.a_h.set_noisy(active)
            self.a.set_noisy(active)
示例#24
0
def plot_custom_layer(layer: Layer, input_shape: tuple, plot_filepath: str):
    tmp_input = Input(shape=input_shape)
    layer.build(input_shape)
    tmp_output = layer.call(tmp_input)
    tmp_model = Model(inputs=[tmp_input], outputs=[tmp_output])
    return plot_model(tmp_model, to_file=plot_filepath, show_shapes=True)
示例#25
0
 def build(self, input_shape):
     self.shape = input_shape
     Layer.build(self, input_shape)