Beispiel #1
0
 def __init__(self, n_clusters, weights=None, alpha=1.0, **kwargs):
     if 'input_shape' not in kwargs and 'input_dim' in kwargs:
         kwargs['input_shape'] = (kwargs.pop('input_dim'), )
     Layer.__init__(self, **kwargs)
     self.n_clusters = n_clusters
     self.alpha = alpha
     self.initial_weights = weights
     self.input_spec = InputSpec(ndim=2)
Beispiel #2
0
 def get_config(self):
     base_config = Layer.get_config(self)
     config = {'sphereCoords': self.sphereCoords,
               'vec_start': self.vec_start,
               'weight_output' : self.weight_output}
     
     return dict(list(base_config.items()) + list(config.items()))
 def get_config(self):
     base_config = Layer.get_config(self)
     config = {
         'std_stats': self.std_stats,
         'take_particles': self.take_particles,
         'take_HLF': self.take_HLF
     }
     return dict(list(base_config.items()) + list(config.items()))
Beispiel #4
0
 def get_config(self):
     config = {
         'rep': self.rep,
         'axis': self.axis,
         'batch_size': self.batch_size
     }
     base_config = Layer.get_config(self)
     # dict.items() will return a set-like object
     return dict(base_config.items() | config.items())
Beispiel #5
0
    def get_config(self):
        base_config = Layer.get_config(self)
        config = {'split_str': self.split_str}
        return dict(list(base_config.items()) + list(config.items()))


# s = Slice("[:2 ,3:4 ,1 , 25:, 0:10:2]")
# out = s.get_output_shape_for((100,5,3, 50, 50,50))
# print(out)
Beispiel #6
0
 def get_config(self):
     base_config = Layer.get_config(self)
     config = {
         'attention_width': self.attention_width,
         'implementation': self.implementation,
         'seq_len': self.seq_len,
         'sparsity_coeff': self.sparsity_coeff
     }
     return dict(list(base_config.items()) + list(config.items()))
Beispiel #7
0
	def __init__(self, output_dim, hidden_dim, output_length, depth=1,bidirectional=True, dropout=0.1, **kwargs):
		if bidirectional and hidden_dim % 2 != 0:
			raise Exception ("hidden_dim for AttentionSeq2seq should be even (Because of bidirectional RNN).")
		super(AttentionSeq2seq, self).__init__()
		if type(depth) not in [list, tuple]:
			depth = (depth, depth)
		if 'batch_input_shape' in kwargs:
			shape = kwargs['batch_input_shape']
			del kwargs['batch_input_shape']
		elif 'input_shape' in kwargs:
			shape = (None,) + tuple(kwargs['input_shape'])
			del kwargs['input_shape']
		elif 'input_dim' in kwargs:
			if 'input_length' in kwargs:
				input_length = kwargs['input_length']
			else:
				input_length = None
			shape = (None, input_length, kwargs['input_dim'])
			del kwargs['input_dim']
		self.add(Layer(batch_input_shape=shape))
		if bidirectional:
			self.add(Bidirectional(LSTMEncoder(output_dim=int(hidden_dim / 2), state_input=False, return_sequences=True, **kwargs)))
		else:
			self.add(LSTMEncoder(output_dim=hidden_dim, state_input=False, return_sequences=True, **kwargs))
		for i in range(0, depth[0] - 1):
			self.add(Dropout(dropout))
			if bidirectional:
				self.add(Bidirectional(LSTMEncoder(output_dim=int(hidden_dim / 2), state_input=False, return_sequences=True, **kwargs)))
			else:
				self.add(LSTMEncoder(output_dim=hidden_dim, state_input=False, return_sequences=True, **kwargs))
		encoder = self.layers[-1]
		self.add(Dropout(dropout))
		self.add(TimeDistributed(Dense(hidden_dim if depth[1] > 1 else output_dim)))
		decoder = AttentionDecoder(hidden_dim=hidden_dim, output_length=output_length, state_input=False, **kwargs)
		self.add(Dropout(dropout))
		self.add(decoder)
		for i in range(0, depth[1] - 1):
			self.add(Dropout(dropout))
			self.add(LSTMEncoder(output_dim=hidden_dim, state_input=False, return_sequences=True, **kwargs))
		self.add(Dropout(dropout))
		self.add(TimeDistributed(Dense(output_dim)))
		self.encoder = encoder
		self.decoder = decoder
Beispiel #8
0
inp = Input(shape=(maxlen,))
x = Embedding(max_features, embed_size)(inp)
x = Bidirectional(GRU(64, return_sequences=True))(x)
avg_pool = GlobalAveragePooling1D()(x)
max_pool = GlobalMaxPooling1D()(x)
conc = concatenate([avg_pool, max_pool])
conc = Dense(64, activation="relu")(conc)
conc = Dropout(0.1)(conc)
outp = Dense(1, activation="sigmoid")(conc)

model = Model(inputs=inp, outputs=outp)
model.compile(loss='binary_crossentropy', optimizer='adam')

model.summary()

"""
____________________________________________________________________________________________________
Layer (type)                     Output Shape          Param #     Connected to                     
====================================================================================================
input_1 (InputLayer)             (None, 50)            0                                            
____________________________________________________________________________________________________
embedding_1 (Embedding)          (None, 50, 300)       30000000    input_1[0][0]                    
____________________________________________________________________________________________________
bidirectional_1 (Bidirectional)  (None, 50, 128)       140160      embedding_1[0][0]                
____________________________________________________________________________________________________
global_average_pooling1d_1 (Glob (None, 128)           0           bidirectional_1[0][0]            
____________________________________________________________________________________________________
global_max_pooling1d_1 (GlobalMa (None, 128)           0           bidirectional_1[0][0]            
____________________________________________________________________________________________________
concatenate_1 (Concatenate)      (None, 256)           0           global_average_pooling1d_1[0][0] 
                                                                   global_max_pooling1d_1[0][0]     
Beispiel #9
0
 def get_config(self):
     base_config = Layer.get_config(self)
     config = {'seq_len': self.seq_len}
     return dict(list(base_config.items()) + list(config.items()))
Beispiel #10
0
 def __init__(self, rep, axis, batch_size, **kwargs):
     Layer.__init__(self, **kwargs)
     self.rep = rep
     self.axis = axis
     self.batch_size = batch_size
Beispiel #11
0
 def build(self, input_shape):
     assert len(input_shape) > self.axis >= 0
     self.input_rank = len(input_shape)
     Layer.build(self, input_shape)
Beispiel #12
0
 def build(self, input_shape):
     self._input_rank = len(input_shape)
     Layer.build(self, input_shape)
Beispiel #13
0
 def get_config(self):
     base_config = Layer.get_config(self)
     config = {'mapping': self.mapping_name,
               'initial_beta': self.initial_beta,
               'nb_out': self.nb_out}
     return dict(list(base_config.items()) + list(config.items()))