def _make_multi_io_model(): ipt1 = Input((40, 8)) ipt2 = Input((40, 16)) ipts = concatenate([ipt1, ipt2]) out1 = GRU(6, return_sequences=True)(ipts) out2 = GRU(12, return_sequences=True)(ipts) model = Model([ipt1, ipt2], [out1, out2]) model.compile('adam', 'mse') return model
def _make_model(opt, batch_shape): ipt = Input(batch_shape=batch_shape) x = Dense(batch_shape[-1])(ipt) out = Dense(batch_shape[-1])(x) model = Model(ipt, out) model.compile(opt, 'mse') return model
def _make_softmax_model(): ipt = Input(batch_shape=(batch_size, 8)) x = Dense(n_classes)(ipt) out = Activation('softmax')(x) model = Model(ipt, out) model.compile('adam', 'categorical_crossentropy') return model
def make_model(batch_shape, layer_kw={}): """Conv1D autoencoder""" dim = batch_shape[-1] bdim = dim // 2 ipt = Input(batch_shape=batch_shape) x = Conv1D(dim, 8, activation='relu', **layer_kw)(ipt) x = Conv1D(bdim, 1, activation='relu', **layer_kw)(x) # bottleneck out = Conv1D(dim, 8, activation='linear', **layer_kw)(x) model = Model(ipt, out) model.compile('adam', 'mse') return model
def _make_model(batch_shape, l1_reg=None, l2_reg=None, bidirectional=True, dense_constraint=None, embed_input_dim=None, sparse=False): def _make_reg(l1_reg, l2_reg): if l1_reg is not None and l2_reg is None: return l1(l1_reg) elif l1_reg is None and l2_reg is not None: return l2(l2_reg) elif l1_reg is not None and l2_reg is not None: return l1_l2(l1_reg, l2_reg) else: return None reg = _make_reg(l1_reg, l2_reg) if dense_constraint is not None: dense_constraint = maxnorm(dense_constraint) ipt = Input(batch_shape=batch_shape) if sparse: x = Embedding(embed_input_dim, embed_input_dim * 3 + 1, mask_zero=True)(ipt) else: x = ipt gru = GRU(4, recurrent_regularizer=reg, bias_regularizer=reg) if bidirectional: x = Bidirectional(gru)(x) else: x = gru(x) x = Dense(2, kernel_regularizer=reg, kernel_constraint=dense_constraint)(x) if sparse: out = Dense(2, activation='softmax')(x) else: out = Dense(1, activation='sigmoid')(x) return Model(ipt, out)