Exemple #1
0
            param.MODE, w2v_step, param.USE_LSI, pse, le, targets_train,
            pre_seq_train, post_seq_train, active_meds_train,
            active_classes_train, depa_train, param.W2V_EMBEDDING_DIM,
            param.SEQUENCE_LENGTH, param.N_ENCODER_LOSS_SAMPLES)
        sample_test_generator = TransformedGenerator(
            param.MODE, w2v_step, param.USE_LSI, pse, le, targets_test,
            pre_seq_test, post_seq_test, active_meds_test, active_classes_test,
            depa_test, param.W2V_EMBEDDING_DIM, param.SEQUENCE_LENGTH,
            param.N_ENCODER_LOSS_SAMPLES)
        batch_data_X_val, batch_data_y_val = sample_test_generator.__getitem__(
            0)
        samples_X_val = batch_data_X_val['pse_input']
        samples_y_val = batch_data_y_val['main_output']

    # Define the network and train
    n = neural_network(param.MODE, param.L1L2RATIO)

    if param.MODE == 'retrospective-gan':

        custom_objects_dict = {
            'autoencoder_accuracy': n.autoencoder_accuracy,
            'autoencoder_false_neg_rate': n.autoencoder_false_neg_rate,
            'Sampling': Sampling
        }
        val_monitor_losses = []
        # TODO load previously saved models and resume
        # The code to save and resume training the gan is buggy and doesn't proprely restore the model weights. Do not use for now.
        # TODO fix model saving and resuming training with gan
        '''
        if i == initial_fold:
            try:
Exemple #2
0
                                     le,
                                     targets_val,
                                     seq_val,
                                     active_meds_val,
                                     active_classes_val,
                                     depa_val,
                                     W2V_EMBEDDING_DIM,
                                     SEQUENCE_LENGTH,
                                     BATCH_SIZE,
                                     shuffle=False)

#%%[markdown]
# #### Instantiate the model

#%%
n = neural_network()
callbacks = n.callbacks(SAVE_PATH)
model = n.define_model(LSTM_SIZE, N_LSTM, DENSE_PSE_SIZE, CONCAT_SIZE,
                       DENSE_SIZE, DROPOUT, L2_REG, SEQUENCE_LENGTH,
                       W2V_EMBEDDING_DIM, pse_shape, N_PSE_DENSE, N_DENSE,
                       output_n_classes)
print(model.summary())
tf.keras.utils.plot_model(model, to_file=os.path.join(SAVE_PATH, 'model.png'))

#%%[markdown]
# #### Train the model
#
# Use in_ipynb to check if running in Jupyter or not to print
# progress bars if in terminal and log only at epoch level if
# in Jupyter. This is a bug of Jupyter or Keras where progress
# bars will flood stdout slowing down and eventually crashing
Exemple #3
0
                                      filtered_targets,
                                      filtered_pre_seqs,
                                      filtered_post_seqs,
                                      filtered_active_meds,
                                      filtered_active_classes,
                                      filtered_depas,
                                      param.W2V_EMBEDDING_DIM,
                                      param.SEQUENCE_LENGTH,
                                      param.BATCH_SIZE,
                                      shuffle=False)

#%%[markdown]
# ### Instantiate the model

#%%
n = neural_network(param.MODE)
if param.MODE in ['retrospective-autoenc', 'retrospective-gan']:
    custom_objects_dict = {
        'autoencoder_accuracy': n.autoencoder_accuracy,
        'autoencoder_false_neg_rate': n.autoencoder_false_neg_rate,
        'combined_l1l2loss': n.combined_l1l2loss
    }
else:
    custom_objects_dict = {
        'sparse_top10_accuracy': n.sparse_top10_accuracy,
        'sparse_top30_accuracy': n.sparse_top30_accuracy
    }

model = tf.keras.models.load_model(os.path.join(save_path, 'model.h5'),
                                   custom_objects=custom_objects_dict)