Beispiel #1
0
def _training_model(vec, ac_weights, rl_weights, output_folder, args):
    """Example function with types documented in the docstring.
    Args:
        param1 (int): The first parameter.
        param2 (str): The second parameter.
    Returns:
        bool: The return value. True for success, False otherwise.
    """

    print('Build model...')
    print(args)
# =============================================================================
#     Input layer
# =============================================================================
    ac_input = Input(shape=(vec['prefixes']['activities'].shape[1], ),
                     name='ac_input')
    rl_input = Input(shape=(vec['prefixes']['roles'].shape[1], ),
                     name='rl_input')
    t_input = Input(shape=(vec['prefixes']['times'].shape[1], 1),
                    name='t_input')
    inter_input = Input(shape=(vec['prefixes']['inter_attr'].shape[1],
                            vec['prefixes']['inter_attr'].shape[2]),
                        name='inter_input')

# =============================================================================
#    Embedding layer for categorical attributes
# =============================================================================
    ac_embedding = Embedding(ac_weights.shape[0],
                             ac_weights.shape[1],
                             weights=[ac_weights],
                             input_length=(vec['prefixes']['activities']
                                           .shape[1]),
                             trainable=False, name='ac_embedding')(ac_input)

    rl_embedding = Embedding(rl_weights.shape[0],
                             rl_weights.shape[1],
                             weights=[rl_weights],
                             input_length=vec['prefixes']['roles'].shape[1],
                             trainable=False, name='rl_embedding')(rl_input)
# =============================================================================
#    Concatenation layer
# =============================================================================

    merged1 = Concatenate(name='conc_categorical',
                          axis=2)([ac_embedding, rl_embedding])
    merged2 = Concatenate(name='conc_continuous', axis=2)([t_input, inter_input])

# =============================================================================
#    Layer 1
# =============================================================================

    l1_c1 = LSTM(args['l_size'],
                 kernel_initializer='glorot_uniform',
                 return_sequences=True,
                 dropout=0.2,
                 implementation=args['imp'])(merged1)

    l1_c1d_1 = Conv1D(filters=1,
                   kernel_size=round(vec['prefixes']['times'].shape[1]/3),
                   kernel_initializer='glorot_uniform',
                   padding='same',
                   activation=args['lstm_act'],
                   strides=1)(inter_input)

    l1_c1d_2 = Conv1D(filters=1,
                   kernel_size=round(vec['prefixes']['times'].shape[1]/3),
                   kernel_initializer='glorot_uniform',
                   padding='same',
                   activation=args['lstm_act'],
                   strides=1)(merged2)

    pooling1 = MaxPooling1D(pool_size=2)(l1_c1d_1)
    pooling2 = MaxPooling1D(pool_size=2)(l1_c1d_2)

    l1_c2 = LSTM(args['l_size'],
                 activation=args['lstm_act'],
                 kernel_initializer='glorot_uniform',
                 return_sequences=True,
                 dropout=0.2,
                 implementation=args['imp'])(pooling1)

    l1_c3 = LSTM(args['l_size'],
                 activation=args['lstm_act'],
                 kernel_initializer='glorot_uniform',
                 return_sequences=True,
                 dropout=0.2,
                 implementation=args['imp'])(pooling2)

# =============================================================================
#    Batch Normalization Layer
# =============================================================================
    batch1 = BatchNormalization()(l1_c1)
    batch2 = BatchNormalization()(l1_c2)
    batch3 = BatchNormalization()(l1_c3)


# =============================================================================
# The layer specialized in prediction
# =============================================================================
    l2_c1 = LSTM(args['l_size'],
                 kernel_initializer='glorot_uniform',
                 return_sequences=False,
                 dropout=0.2,
                 implementation=args['imp'])(batch1)

#   The layer specialized in role prediction
    l2_c2 = LSTM(args['l_size'],
                 kernel_initializer='glorot_uniform',
                 return_sequences=False,
                 dropout=0.2,
                 implementation=args['imp'])(batch1)

#   The layer specialized in intercase prediction
    l2_c3 = LSTM(args['l_size'],
                 kernel_initializer='glorot_uniform',
                 return_sequences=False,
                 dropout=0.2,
                 implementation=args['imp'])(batch2)


#   The layer specialized in time prediction
    l2_c4 = LSTM(args['l_size'],
                 activation=args['lstm_act'],
                 kernel_initializer='glorot_uniform',
                 return_sequences=False,
                 dropout=0.2,
                 implementation=args['imp'])(batch3)

# =============================================================================
# Output Layer
# =============================================================================
    act_output = Dense(vec['next_evt']['activities'].shape[1],
                       activation='softmax',
                       kernel_initializer='glorot_uniform',
                       name='act_output')(l2_c1)

    role_output = Dense(vec['next_evt']['roles'].shape[1],
                        activation='softmax',
                        kernel_initializer='glorot_uniform',
                        name='role_output')(l2_c2)
    if ('dense_act' in args) and (args['dense_act'] is not None):
        inter_output = Dense(vec['next_evt']['inter_attr'].shape[1],
                             activation=args['dense_act'],
                             kernel_initializer='glorot_uniform',
                             name='inter_output')(l2_c3)
    else:
        inter_output = Dense(vec['next_evt']['inter_attr'].shape[1],
                             kernel_initializer='glorot_uniform',
                             name='inter_output')(l2_c3)

    if ('dense_act' in args) and (args['dense_act'] is not None):
        time_output = Dense(1, activation=args['dense_act'],
                            kernel_initializer='glorot_uniform',
                            name='time_output')(l2_c4)
    else:
        time_output = Dense(1,
                            kernel_initializer='glorot_uniform',
                            name='time_output')(l2_c4)

    model = Model(inputs=[ac_input, rl_input, t_input, inter_input],
                  outputs=[act_output, role_output, time_output, inter_output])

    if args['optim'] == 'Nadam':
        opt = Nadam(learning_rate=0.002, beta_1=0.9, beta_2=0.999)
    elif args['optim'] == 'Adam':
        opt = Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999,
                   amsgrad=False)
    elif args['optim'] == 'SGD':
        opt = SGD(learning_rate=0.01, momentum=0.0, nesterov=False)
    elif args['optim'] == 'Adagrad':
        opt = Adagrad(learning_rate=0.01)

    model.compile(loss={'act_output': 'categorical_crossentropy',
                        'role_output': 'categorical_crossentropy',
                        'time_output': 'mae',
                        'inter_output': 'mae'}, optimizer=opt)

    model.summary()
    early_stopping = EarlyStopping(monitor='val_loss', patience=50)
    cb = tc.TimingCallback(output_folder)
    clean_models = cm.CleanSavedModelsCallback(output_folder, 2)

    # Output file
    output_file_path = os.path.join(output_folder,
                                    'model_' + str(args['model_type']) +
                                    '_{epoch:02d}-{val_loss:.2f}.h5')

    # Saving
    model_checkpoint = ModelCheckpoint(output_file_path,
                                       monitor='val_loss',
                                       verbose=0,
                                       save_best_only=True,
                                       save_weights_only=False,
                                       mode='auto')
    lr_reducer = ReduceLROnPlateau(monitor='val_loss',
                                   factor=0.5,
                                   patience=10,
                                   verbose=0,
                                   mode='auto',
                                   min_delta=0.0001,
                                   cooldown=0,
                                   min_lr=0)

    batch_size = vec['prefixes']['activities'].shape[1]
    model.fit({'ac_input': vec['prefixes']['activities'],
               'rl_input': vec['prefixes']['roles'],
               't_input': vec['prefixes']['times'],
               'inter_input': vec['prefixes']['inter_attr']},
              {'act_output': vec['next_evt']['activities'],
               'role_output': vec['next_evt']['roles'],
               'time_output': vec['next_evt']['times'],
               'inter_output': vec['next_evt']['inter_attr']},
              validation_split=0.2,
              verbose=2,
              callbacks=[early_stopping, model_checkpoint,
                         lr_reducer, cb, clean_models],
              batch_size=batch_size, epochs=200)
def _training_model(train_vec,
                    valdn_vec,
                    ac_weights,
                    rl_weights,
                    output_folder,
                    args,
                    log_path=None):
    """Example function with types documented in the docstring.
    Args:
        param1 (int): The first parameter.
        param2 (str): The second parameter.
    Returns:
        bool: The return value. True for success, False otherwise.
    """

    print('Build model...')
    print(args)
    # =============================================================================
    #     Input layer
    # =============================================================================
    ac_input = Input(shape=(train_vec['prefixes']['activities'].shape[1], ),
                     name='ac_input')
    rl_input = Input(shape=(train_vec['prefixes']['roles'].shape[1], ),
                     name='rl_input')
    t_input = Input(shape=(train_vec['prefixes']['times'].shape[1],
                           train_vec['prefixes']['times'].shape[2]),
                    name='t_input')
    inter_input = Input(shape=(train_vec['prefixes']['inter_attr'].shape[1],
                               train_vec['prefixes']['inter_attr'].shape[2]),
                        name='inter_input')

    # =============================================================================
    #    Embedding layer for categorical attributes
    # =============================================================================
    ac_embedding = Embedding(
        ac_weights.shape[0],
        ac_weights.shape[1],
        weights=[ac_weights],
        input_length=train_vec['prefixes']['activities'].shape[1],
        trainable=False,
        name='ac_embedding')(ac_input)

    rl_embedding = Embedding(
        rl_weights.shape[0],
        rl_weights.shape[1],
        weights=[rl_weights],
        input_length=train_vec['prefixes']['roles'].shape[1],
        trainable=False,
        name='rl_embedding')(rl_input)

    # =============================================================================
    #    Layer 1
    # =============================================================================
    concatenate = Concatenate(name='concatenated', axis=2)(
        [ac_embedding, rl_embedding, t_input, inter_input])

    if args['lstm_act'] is not None:
        l1_c1 = LSTM(args['l_size'],
                     activation=args['lstm_act'],
                     kernel_initializer='glorot_uniform',
                     return_sequences=True,
                     dropout=0.2,
                     implementation=args['imp'])(concatenate)
    else:
        l1_c1 = LSTM(args['l_size'],
                     kernel_initializer='glorot_uniform',
                     return_sequences=True,
                     dropout=0.2,
                     implementation=args['imp'])(concatenate)

# =============================================================================
#    Batch Normalization Layer
# =============================================================================
    batch1 = BatchNormalization()(l1_c1)

    # =============================================================================
    # The layer specialized in prediction
    # =============================================================================
    l2_c1 = LSTM(args['l_size'],
                 kernel_initializer='glorot_uniform',
                 return_sequences=False,
                 dropout=0.2,
                 implementation=args['imp'])(batch1)

    #   The layer specialized in role prediction
    l2_c2 = LSTM(args['l_size'],
                 kernel_initializer='glorot_uniform',
                 return_sequences=False,
                 dropout=0.2,
                 implementation=args['imp'])(batch1)

    #   The layer specialized in role prediction
    l2_c3 = LSTM(args['l_size'],
                 activation=args['lstm_act'],
                 kernel_initializer='glorot_uniform',
                 return_sequences=False,
                 dropout=0.2,
                 implementation=args['imp'])(batch1)

    # =============================================================================
    # Output Layer
    # =============================================================================
    act_output = Dense(ac_weights.shape[0],
                       activation='softmax',
                       kernel_initializer='glorot_uniform',
                       name='act_output')(l2_c1)

    role_output = Dense(rl_weights.shape[0],
                        activation='softmax',
                        kernel_initializer='glorot_uniform',
                        name='role_output')(l2_c2)

    if ('dense_act' in args) and (args['dense_act'] is not None):
        time_output = Dense(train_vec['next_evt']['times'].shape[1],
                            activation=args['dense_act'],
                            kernel_initializer='glorot_uniform',
                            name='time_output')(l2_c3)
    else:
        time_output = Dense(train_vec['next_evt']['times'].shape[1],
                            kernel_initializer='glorot_uniform',
                            name='time_output')(l2_c3)
    model = Model(inputs=[ac_input, rl_input, t_input, inter_input],
                  outputs=[act_output, role_output, time_output])

    if args['optim'] == 'Nadam':
        opt = Nadam(learning_rate=0.002, beta_1=0.9, beta_2=0.999)
    elif args['optim'] == 'Adam':
        opt = Adam(learning_rate=0.001,
                   beta_1=0.9,
                   beta_2=0.999,
                   amsgrad=False)
    elif args['optim'] == 'SGD':
        opt = SGD(learning_rate=0.01, momentum=0.0, nesterov=False)
    elif args['optim'] == 'Adagrad':
        opt = Adagrad(learning_rate=0.01)

    model.compile(loss={
        'act_output': 'categorical_crossentropy',
        'role_output': 'categorical_crossentropy',
        'time_output': 'mae'
    },
                  optimizer=opt)

    model.summary()

    early_stopping = EarlyStopping(monitor='val_loss', patience=40)
    if log_path:
        cb = tc.TimingCallback(output_folder, log_path=log_path)
    else:
        cb = tc.TimingCallback(output_folder)

    # Output file
    output_file_path = os.path.join(output_folder,
                                    os.path.splitext(args['file'])[0] + '.h5')

    # Saving
    model_checkpoint = ModelCheckpoint(output_file_path,
                                       monitor='val_loss',
                                       verbose=0,
                                       save_best_only=True,
                                       save_weights_only=False,
                                       mode='auto')
    lr_reducer = ReduceLROnPlateau(monitor='val_loss',
                                   factor=0.5,
                                   patience=10,
                                   verbose=0,
                                   mode='auto',
                                   min_delta=0.0001,
                                   cooldown=0,
                                   min_lr=0)

    batch_size = args['batch_size']
    model.fit(
        {
            'ac_input': train_vec['prefixes']['activities'],
            'rl_input': train_vec['prefixes']['roles'],
            't_input': train_vec['prefixes']['times'],
            'inter_input': train_vec['prefixes']['inter_attr']
        }, {
            'act_output': train_vec['next_evt']['activities'],
            'role_output': train_vec['next_evt']['roles'],
            'time_output': train_vec['next_evt']['times']
        },
        validation_data=({
            'ac_input': valdn_vec['prefixes']['activities'],
            'rl_input': valdn_vec['prefixes']['roles'],
            't_input': valdn_vec['prefixes']['times'],
            'inter_input': valdn_vec['prefixes']['inter_attr']
        }, {
            'act_output': valdn_vec['next_evt']['activities'],
            'role_output': valdn_vec['next_evt']['roles'],
            'time_output': valdn_vec['next_evt']['times']
        }),
        verbose=2,
        callbacks=[early_stopping, model_checkpoint, lr_reducer, cb],
        batch_size=batch_size,
        epochs=args['epochs'])
    return model
def _training_model(ac_weights, train_vec, valdn_vec, parms):
    """Example function with types documented in the docstring.
    Args:
        param1 (int): The first parameter.
        param2 (str): The second parameter.
    Returns:
        bool: The return value. True for success, False otherwise.
    """

    print('Build model...')

    early_stopping = EarlyStopping(monitor='val_loss', patience=50)
    cb = tc.TimingCallback(parms['output'])

    batch_size = parms['batch_size']

    # Output route
    path = parms['output']
    fname = parms['file'].split('.')[0]
    if parms['all_r_pool']:
        proc_model_file = os.path.join(path, fname + '_dpiapr.h5')
        waiting_model_file = os.path.join(path, fname + '_dwiapr.h5')
    else:
        proc_model_file = os.path.join(path, fname + '_dpispr.h5')
        waiting_model_file = os.path.join(path, fname + '_dwispr.h5')

    # Train models
    # with g1.as_default():
    proc_model = create_model(ac_weights, train_vec['proc_model'], parms)

    # Saving
    model_checkpoint = ModelCheckpoint(proc_model_file,
                                       monitor='val_loss',
                                       verbose=0,
                                       save_best_only=True,
                                       save_weights_only=False,
                                       mode='auto')
    lr_reducer = ReduceLROnPlateau(monitor='val_loss',
                                   factor=0.5,
                                   patience=10,
                                   verbose=0,
                                   mode='auto',
                                   min_delta=0.0001,
                                   cooldown=0,
                                   min_lr=0)

    proc_model.fit(
        {
            'ac_input': train_vec['proc_model']['pref']['ac_index'],
            'features': train_vec['proc_model']['pref']['features']
        }, {'time_output': train_vec['proc_model']['next']},
        validation_data=({
            'ac_input':
            valdn_vec['proc_model']['pref']['ac_index'],
            'features':
            valdn_vec['proc_model']['pref']['features']
        }, {
            'time_output': valdn_vec['proc_model']['next']
        }),
        verbose=2,
        callbacks=[early_stopping, model_checkpoint, lr_reducer, cb],
        batch_size=batch_size,
        epochs=parms['epochs'])

    waiting_model = create_model(ac_weights, train_vec['waiting_model'], parms)
    # Saving
    model_checkpoint = ModelCheckpoint(waiting_model_file,
                                       monitor='val_loss',
                                       verbose=0,
                                       save_best_only=True,
                                       save_weights_only=False,
                                       mode='auto')
    lr_reducer = ReduceLROnPlateau(monitor='val_loss',
                                   factor=0.5,
                                   patience=10,
                                   verbose=0,
                                   mode='auto',
                                   min_delta=0.0001,
                                   cooldown=0,
                                   min_lr=0)

    waiting_model.fit(
        {
            'ac_input': train_vec['waiting_model']['pref']['ac_index'],
            'features': train_vec['waiting_model']['pref']['features']
        }, {'time_output': train_vec['waiting_model']['next']},
        validation_data=({
            'ac_input':
            valdn_vec['waiting_model']['pref']['ac_index'],
            'features':
            valdn_vec['waiting_model']['pref']['features']
        }, {
            'time_output': valdn_vec['waiting_model']['next']
        }),
        verbose=2,
        callbacks=[early_stopping, model_checkpoint, lr_reducer, cb],
        batch_size=batch_size,
        epochs=parms['epochs'])

    return {
        'proc_model': {
            'model': proc_model
        },
        'wait_model': {
            'model': waiting_model
        }
    }
def _training_model(ac_weights, train_vec, valdn_vec, parms):
    """Example function with types documented in the docstring.
    Args:
        param1 (int): The first parameter.
        param2 (str): The second parameter.
    Returns:
        bool: The return value. True for success, False otherwise.
    """

    print('Build model...')
    # =============================================================================
    #     Input layer
    # =============================================================================
    ac_input = Input(shape=(train_vec['pref']['ac_index'].shape[1], ),
                     name='ac_input')
    n_ac_input = Input(shape=(train_vec['pref']['n_ac_index'].shape[1], ),
                       name='n_ac_input')
    features = Input(shape=(train_vec['pref']['features'].shape[1],
                            train_vec['pref']['features'].shape[2]),
                     name='features')

    # =============================================================================
    #    Embedding layer for categorical attributes
    # =============================================================================
    ac_embedding = Embedding(
        ac_weights.shape[0],
        ac_weights.shape[1],
        weights=[ac_weights],
        input_length=train_vec['pref']['ac_index'].shape[1],
        trainable=False,
        name='ac_embedding')(ac_input)
    n_ac_embedding = Embedding(
        ac_weights.shape[0],
        ac_weights.shape[1],
        weights=[ac_weights],
        input_length=train_vec['pref']['n_ac_index'].shape[1],
        trainable=False,
        name='n_ac_embedding')(n_ac_input)

    # =============================================================================
    #    Layer 1
    # =============================================================================

    merged = Concatenate(name='concatenated',
                         axis=2)([ac_embedding, n_ac_embedding, features])

    l1_c1 = LSTM(parms['l_size'],
                 kernel_initializer='glorot_uniform',
                 return_sequences=True,
                 dropout=0.2,
                 implementation=parms['imp'])(merged)

    # =============================================================================
    #    Batch Normalization Layer
    # =============================================================================
    batch1 = BatchNormalization()(l1_c1)

    # =============================================================================
    # The layer specialized in prediction
    # =============================================================================
    l2_c1 = LSTM(parms['l_size'],
                 activation=parms['lstm_act'],
                 kernel_initializer='glorot_uniform',
                 return_sequences=False,
                 dropout=0.2,
                 implementation=parms['imp'])(batch1)

    # =============================================================================
    # Output Layer
    # =============================================================================
    times_output = Dense(train_vec['next'].shape[1],
                         activation=parms['dense_act'],
                         kernel_initializer='glorot_uniform',
                         name='time_output')(l2_c1)

    model = Model(inputs=[ac_input, n_ac_input, features],
                  outputs=[times_output])

    if parms['optim'] == 'Nadam':
        opt = Nadam(learning_rate=0.002, beta_1=0.9, beta_2=0.999)
    elif parms['optim'] == 'Adam':
        opt = Adam(learning_rate=0.001,
                   beta_1=0.9,
                   beta_2=0.999,
                   amsgrad=False)
    elif parms['optim'] == 'SGD':
        opt = SGD(learning_rate=0.01, momentum=0.0, nesterov=False)
    elif parms['optim'] == 'Adagrad':
        opt = Adagrad(learning_rate=0.01)

    model.compile(loss={'time_output': 'mae'}, optimizer=opt)

    model.summary()

    early_stopping = EarlyStopping(monitor='val_loss', patience=50)
    cb = tc.TimingCallback(parms['output'])
    # clean_models = cm.CleanSavedModelsCallback(parms['output'], 2)

    # Output file
    output_file_path = os.path.join(parms['output'],
                                    parms['file'].split('.')[0] + '.h5')
    # Saving
    model_checkpoint = ModelCheckpoint(output_file_path,
                                       monitor='val_loss',
                                       verbose=0,
                                       save_best_only=True,
                                       save_weights_only=False,
                                       mode='auto')
    lr_reducer = ReduceLROnPlateau(monitor='val_loss',
                                   factor=0.5,
                                   patience=10,
                                   verbose=0,
                                   mode='auto',
                                   min_delta=0.0001,
                                   cooldown=0,
                                   min_lr=0)

    batch_size = parms['batch_size']
    model.fit(
        {
            'ac_input': train_vec['pref']['ac_index'],
            'n_ac_input': train_vec['pref']['n_ac_index'],
            'features': train_vec['pref']['features']
        }, {'time_output': train_vec['next']},
        validation_data=({
            'ac_input': valdn_vec['pref']['ac_index'],
            'n_ac_input': valdn_vec['pref']['n_ac_index'],
            'features': valdn_vec['pref']['features']
        }, {
            'time_output': valdn_vec['next']
        }),
        verbose=2,
        callbacks=[early_stopping, model_checkpoint, lr_reducer, cb],
        batch_size=batch_size,
        epochs=parms['epochs'])
    return model
def _training_model(vec, ac_weights, rl_weights, output_folder, args):
    """Example function with types documented in the docstring.
    Args:
        param1 (int): The first parameter.
        param2 (str): The second parameter.
    Returns:
        bool: The return value. True for success, False otherwise.
    """

    print('Build model...')
    # =============================================================================
    #    Encoder Input layer
    # =============================================================================
    ac_input = Input(
        shape=(vec['encoder_input_data']['activities'].shape[1], ),
        name='ac_input')
    rl_input = Input(shape=(vec['encoder_input_data']['roles'].shape[1], ),
                     name='rl_input')
    t_input = Input(shape=(vec['encoder_input_data']['times'].shape[1], 1),
                    name='t_input')

    # =============================================================================
    #    Embedding layer for categorical attributes
    # =============================================================================
    ac_embedding = Embedding(
        ac_weights.shape[0],
        ac_weights.shape[1],
        weights=[ac_weights],
        input_length=vec['encoder_input_data']['activities'].shape[1],
        trainable=False,
        name='ac_embedding')(ac_input)

    rl_embedding = Embedding(
        rl_weights.shape[0],
        rl_weights.shape[1],
        weights=[rl_weights],
        input_length=vec['encoder_input_data']['roles'].shape[1],
        trainable=False,
        name='rl_embedding')(rl_input)

    # =============================================================================
    #    LSTM Encoder Layer
    # =============================================================================
    merged = Concatenate(name='concatenated',
                         axis=2)([ac_embedding, rl_embedding])

    l1_c1, state_l1_c1, state_l1_c1 = LSTM(args['l_size'],
                                           kernel_initializer='glorot_uniform',
                                           return_state=True,
                                           dropout=0.2,
                                           implementation=args['imp'])(merged)

    l1_c3, state_l1_c3, state_l1_c3 = LSTM(args['l_size'],
                                           activation=args['lstm_act'],
                                           kernel_initializer='glorot_uniform',
                                           return_state=True,
                                           dropout=0.2,
                                           implementation=args['imp'])(t_input)

    # We discard `encoder_outputs` and only keep the states.
    encoder_states_l1_c1 = [state_l1_c1, state_l1_c1]
    encoder_states_l1_c3 = [state_l1_c3, state_l1_c3]

    # =============================================================================
    #    Decoder Input layer
    # =============================================================================
    dec_ac_input = Input(
        shape=(vec['decoder_input_data']['activities'].shape[1], ),
        name='dec_ac_input')
    dec_rl_input = Input(shape=(vec['decoder_input_data']['roles'].shape[1], ),
                         name='dec_rl_input')
    dec_t_input = Input(shape=(vec['decoder_input_data']['times'].shape[1], 1),
                        name='dec_t_input')

    # =============================================================================
    #    Embedding layer for categorical attributes
    # =============================================================================
    dec_ac_embedding = Embedding(
        ac_weights.shape[0],
        ac_weights.shape[1],
        weights=[ac_weights],
        input_length=vec['decoder_input_data']['activities'].shape[1],
        trainable=False,
        name='dec_ac_embedding')(dec_ac_input)

    dec_rl_embedding = Embedding(
        rl_weights.shape[0],
        rl_weights.shape[1],
        weights=[rl_weights],
        input_length=vec['decoder_input_data']['roles'].shape[1],
        trainable=False,
        name='dec_rl_embedding')(dec_rl_input)

    dec_merged = Concatenate(name='dec_concatenated',
                             axis=2)([dec_ac_embedding, dec_rl_embedding])

    l2_c1, _, _ = LSTM(args['l_size'],
                       return_sequences=True,
                       return_state=True,
                       implementation=args['imp'])(
                           dec_merged, initial_state=encoder_states_l1_c1)

    l2_c3, _, _ = LSTM(args['l_size'],
                       activation=args['lstm_act'],
                       return_sequences=True,
                       return_state=True,
                       dropout=0.2,
                       implementation=args['imp'])(
                           dec_t_input, initial_state=encoder_states_l1_c3)

    # =============================================================================
    #    Output
    # =============================================================================
    dec_act_output = Dense(len(args['index_ac']),
                           activation='softmax',
                           name='act_output')(l2_c1)
    dec_rl_output = Dense(len(args['index_rl']),
                          activation='softmax',
                          name='rl_output')(l2_c1)
    if ('dense_act' in args) and (args['dense_act'] is not None):
        dec_time_output = Dense(1,
                                activation=args['dense_act'],
                                kernel_initializer='glorot_uniform',
                                name='time_output')(l2_c3)
    else:
        dec_time_output = Dense(1,
                                kernel_initializer='glorot_uniform',
                                name='time_output')(l2_c3)

    model = Model(inputs=[
        ac_input, rl_input, t_input, dec_ac_input, dec_rl_input, dec_t_input
    ],
                  outputs=[dec_act_output, dec_rl_output, dec_time_output])

    if args['optim'] == 'Nadam':
        opt = Nadam(learning_rate=0.002, beta_1=0.9, beta_2=0.999)
    elif args['optim'] == 'Adam':
        opt = Adam(learning_rate=0.001,
                   beta_1=0.9,
                   beta_2=0.999,
                   amsgrad=False)
    elif args['optim'] == 'SGD':
        opt = SGD(learning_rate=0.01, momentum=0.0, nesterov=False)
    elif args['optim'] == 'Adagrad':
        opt = Adagrad(learning_rate=0.01)

    model.compile(loss={
        'act_output': 'categorical_crossentropy',
        'rl_output': 'categorical_crossentropy',
        'time_output': 'mae'
    },
                  metrics=['accuracy'],
                  optimizer=opt)

    print(model.summary())

    early_stopping = EarlyStopping(monitor='val_loss', patience=42)
    cb = tc.TimingCallback(output_folder)
    clean_models = cm.CleanSavedModelsCallback(output_folder, 2)

    # Output file
    output_file_path = os.path.join(
        output_folder,
        'model_' + str(args['model_type']) + '_{epoch:02d}-{val_loss:.2f}.h5')
    # Saving
    model_checkpoint = ModelCheckpoint(output_file_path,
                                       monitor='val_loss',
                                       verbose=0,
                                       save_best_only=True,
                                       save_weights_only=False,
                                       mode='auto')
    lr_reducer = ReduceLROnPlateau(monitor='val_loss',
                                   factor=0.5,
                                   patience=40,
                                   verbose=0,
                                   mode='auto',
                                   min_delta=0.0001,
                                   cooldown=0,
                                   min_lr=0)

    batch_size = vec['encoder_input_data']['activities'].shape[1]
    model.fit(
        {
            'ac_input': vec['encoder_input_data']['activities'],
            'rl_input': vec['encoder_input_data']['roles'],
            't_input': vec['encoder_input_data']['times'],
            'dec_ac_input': vec['decoder_input_data']['activities'],
            'dec_rl_input': vec['decoder_input_data']['roles'],
            'dec_t_input': vec['decoder_input_data']['times']
        }, {
            'act_output': vec['decoder_target_data']['activities'],
            'rl_output': vec['decoder_target_data']['roles'],
            'time_output': vec['decoder_target_data']['times']
        },
        batch_size=batch_size,
        epochs=500,
        validation_split=0.2,
        verbose=2,
        callbacks=[
            early_stopping, model_checkpoint, lr_reducer, cb, clean_models
        ])