w_embed = embed_layer(input_w) v_embed = embed_layer(input_v) bias_layer = Embedding(input_dim=len(code_cat), output_dim=1, name='bias') w_bias = bias_layer(input_w) v_bias = bias_layer(input_v) inner = Multiply()([w_embed, v_embed]) inner = Lambda(lambda x: K.sum(x, axis=-1, keepdims=True))(inner) merged = Concatenate(axis=-1)([inner, w_bias, v_bias]) output_cooccur = Lambda(lambda x: K.sum(x, axis=-1, keepdims=False), name='cooccur')(merged) input_DX = Input(shape=(len(DXs), )) DX_embed = embed_layer(input_DX) DX_embed = MaskedDense(md_width, activation='relu')(DX_embed) DX_embed = MaskedSum()(DX_embed) input_PR = Input(shape=(len(PRs), )) PR_embed = embed_layer(input_PR) PR_embed = MaskedDense(md_width, activation='relu')(PR_embed) PR_embed = MaskedSum()(PR_embed) input_other = Input(shape=(8, )) merged = Concatenate(axis=1)([DX_embed, PR_embed, input_other]) merged = Dense(fc_width, activation='relu')(merged) merged = Dropout(dropout)(merged) output_readm = Dense(2, activation='softmax', name='readm')(merged) model = Model(inputs=[input_DX, input_PR, input_other, input_w, input_v], outputs=[output_readm, output_cooccur]) adam = Adam(lr=lr) model.compile(loss={
model = Model(inputs=[input_DX1, input_DX, input_PR, input_hosp, input_other], outputs=prediction) if model_name=='att_lr': input_DX1 = Input(shape=(1,)) DX1_embed = Embedding(input_dim=len(code_cat), output_dim=code_embed_dim, mask_zero=True, embeddings_initializer=embed_initializer, name='DX1_embed')(input_DX1) input_DX = Input(shape = (len(DXs),)) DX_embed = Embedding(input_dim=len(code_cat), output_dim=code_embed_dim, mask_zero=True, embeddings_initializer=embed_initializer, name='DX_embed')(input_DX) input_PR = Input(shape = (len(PRs),)) PR_embed = Embedding(input_dim=len(code_cat), output_dim=code_embed_dim, mask_zero=True, embeddings_initializer=embed_initializer, name='PR_embed')(input_PR) code_embed = Concatenate(axis=1)([DX1_embed, DX_embed, PR_embed]) for j in range(n_att_layers): code_embed = MultiHeadAttention(head_num=n_heads, use_bias=False, activation='linear')(code_embed) code_embed = MaskedSum()(code_embed) input_hosp = Input(shape=(1,)) hosp_embed = Embedding(input_dim=len(hosp_cat), output_dim=hosp_embed_dim, input_length=1)(input_hosp) hosp_embed = Reshape((hosp_embed_dim, ))(hosp_embed) input_other = Input(shape=(other_mat_train.shape[1], )) merged = Concatenate(axis=1)([code_embed, hosp_embed, input_other]) prediction = Dense(2, activation='softmax')(merged) model = Model(inputs=[input_DX1, input_DX, input_PR, input_hosp, input_other], outputs=prediction) if model_name=='att_sum_lr': input_DX1 = Input(shape=(1,)) DX1_embed = Embedding(input_dim=len(code_cat), output_dim=code_embed_dim, mask_zero=True, embeddings_initializer=embed_initializer, name='DX1_embed')(input_DX1) input_DX = Input(shape = (len(DXs),)) DX_embed = Embedding(input_dim=len(code_cat), output_dim=code_embed_dim, mask_zero=True, embeddings_initializer=embed_initializer, name='DX_embed')(input_DX)