Example #1
0
 def init_from_pretrained_ssd(self, weights_path):
     self.ssd.load_weights(weights_path, by_name=True)
     for layer in self.ssd.layers:
         if layer in self.classification_headers or layer in self.regression_headers:
             if type(layer) == Sequential:
                 for sub_layer in layer.layers:
                     w = sub_layer.get_weights()
                     if w:
                         if len(w) == 2:  # Layer has bias
                             new_weights = []
                             new_weights.append(glorot_uniform()(w[0].shape))
                             new_weights.append(Zeros()(w[1].shape))
                         elif len(w) == 4:
                             new_weights = []
                             new_weights.append(Ones()(w[0].shape))
                             new_weights.append(Zeros()(w[1].shape))
                             new_weights.append(Zeros()(w[2].shape))
                             new_weights.append(Ones()(w[3].shape))
                         else:
                             new_weights = glorot_uniform()(w.shape)
                         sub_layer.set_weights(new_weights)
             else:
                 w = layer.get_weights()
                 if len(w) > 1:  # Layer has bias
                     new_weights = []
                     new_weights.append(glorot_uniform()(w[0].shape))
                     new_weights.append(Zeros()(w[1].shape))
                 else:
                     new_weights = glorot_uniform()(w.shape)
                 layer.set_weights(new_weights)
Example #2
0
 def build(self, input_shape):
     self.gamma = self.add_weight(name='gamma',
                                  shape=input_shape[-1:],
                                  initializer=Ones(),
                                  trainable=True)
     self.beta = self.add_weight(name='beta',
                                 shape=input_shape[-1:],
                                 initializer=Zeros(),
                                 trainable=True)
     super(LayerNormalization, self).build(input_shape)