def custom_constant_layer(const_size, name, np_constant=None):
    if np_constant is None:
        np_constant = np.zeros((const_size, 1))
    else:
        np_constant = np.reshape(np_constant, (*np_constant.shape, 1))

    const_fake_extender = keras_constant_layer(np_constant, name=name)
    # tf_constant = tf.keras.backend.constant(np_constant, dtype='float32')
    # const_fake_extender = Input(tensor=tf_constant, shape=np_constant.shape, dtype='float32', name=name)
    # TODO: reshaping constant input??
    return tf.keras.layers.Lambda(lambda x: tf.keras.backend.reshape(x, const_fake_extender.shape))(
        const_fake_extender), const_fake_extender
def build_active_passive_network(roles, dual_roles, fillers, tree_shape):
    filler_len = fillers[0].shape[0]
    max_depth = len(tree_shape) - 1

    branch = build_classification_branch(roles=dual_roles,
                                         fillers=fillers,
                                         tree_shape=tree_shape,
                                         role_extraction_order=[1, 0, 0],
                                         stop_level=0)
    classification_const_inputs, apnet_variable_input, classification_output, _ = branch

    scalar_mul = tf.keras.layers.Lambda(lambda tensors: tensors[0] * tensors[1])(
        [apnet_variable_input, classification_output])
    passive_branch_const_inputs, passive_branch_output = extract_semantic_tree_from_passive_voice_branch(
        input_layer=scalar_mul,
        roles=roles,
        dual_roles=dual_roles,
        filler_len=filler_len,
        max_depth=max_depth)

    np_constant = np.array([-1])
    const_neg_1 = keras_constant_layer(np_constant, name='active_voice_neg_1')
    sum_is_passive_const_neg_1 = tf.keras.layers.Add()([classification_output, const_neg_1])
    is_active = tf.keras.layers.Multiply()([sum_is_passive_const_neg_1, const_neg_1])
    active_branch_input = tf.keras.layers.Lambda(lambda tensors: tensors[0] * tensors[1])(
        [apnet_variable_input, is_active])
    active_branch_const_inputs, active_branch_output = extract_semantic_tree_from_active_voice_branch(
        input_layer=active_branch_input,
        roles=roles,
        dual_roles=dual_roles,
        filler_len=filler_len,
        max_depth=max_depth)

    sum_branches = tf.keras.layers.Add()([passive_branch_output, active_branch_output])
    return tf.keras.Model(
        inputs=[
            *classification_const_inputs,
            *passive_branch_const_inputs,
            *active_branch_const_inputs,
            const_neg_1,
            apnet_variable_input,
        ],
        outputs=sum_branches)
def constant_input(role, filler_len, max_depth, name, matrix_creator):
    np_constant = matrix_creator(role, filler_len, max_depth, name)
    return keras_constant_layer(np_constant, name=name)
示例#4
0
def extract_semantic_tree_from_passive_voice_branch(input_layer, roles,
                                                    dual_roles, filler_len,
                                                    max_depth):
    stop_level_for_verb = 0
    verb_branch = build_one_level_extraction_branch(
        model_input=input_layer,
        roles=dual_roles,
        filler_len=filler_len,
        max_depth=max_depth,
        stop_level=stop_level_for_verb,
        role_extraction_order=[1, 0, 1],
        prefix='passive_verb_extract')
    verb_extraction_const_inputs, verb_extraction_output, _ = verb_branch

    stop_level_for_agent = 0
    agent_branch = build_one_level_extraction_branch(
        model_input=input_layer,
        roles=dual_roles,
        filler_len=filler_len,
        max_depth=max_depth,
        stop_level=stop_level_for_agent,
        role_extraction_order=[1, 1, 1],
        prefix='passive_agent_extract')
    agent_extraction_const_inputs, agent_extraction_output, _ = agent_branch

    stop_level_for_p = max_depth - 1
    p_branch = build_one_level_extraction_branch(model_input=input_layer,
                                                 roles=dual_roles,
                                                 filler_len=filler_len,
                                                 max_depth=max_depth,
                                                 stop_level=stop_level_for_p,
                                                 role_extraction_order=[0],
                                                 prefix='passive_p_extract')

    p_extraction_const_inputs, p_raw_output, current_num_elements = p_branch
    _, flattened_num_elements = unshift_matrix(roles[0], filler_len,
                                               stop_level_for_p).shape
    # TODO: insert cropping here
    reshape_for_crop = tf.keras.layers.Lambda(
        lambda x: tf.keras.backend.reshape(x, (1, flattened_num_elements +
                                               filler_len, 1)))(p_raw_output)
    clip_first_level = tf.keras.layers.Cropping1D(
        cropping=(0, flattened_num_elements))(reshape_for_crop)
    p_extraction_output = tf.keras.layers.Lambda(
        lambda x: tf.keras.backend.reshape(x, (1, filler_len, 1)))(
            clip_first_level)

    # TODO: define how to tackle extractions not till the bottom of structure
    # given that we have all fillers maximum joining depth is equal to 1
    agentxr0_pxr1_const_inputs, agentxr0_pxr1_output = build_join_branch(
        roles=roles,
        filler_len=filler_len,
        max_depth=1,
        inputs=[agent_extraction_output, p_extraction_output],
        prefix='passive_join(agent,p)')

    # later we have to join two subtrees of different depth. for that we have to
    # make filler of verb of the same depth - make fake constant layer
    np_constant = np.zeros((filler_len, 1))
    const_fake_extender = keras_constant_layer(
        np_constant, name='passive_fake_extender_verb_agent')
    concatenate_verb = tf.keras.layers.Concatenate(axis=0)(
        [verb_extraction_output, const_fake_extender, const_fake_extender])
    # TODO: why is there a constant 3?
    reshaped_verb = tf.keras.layers.Lambda(lambda x: tf.keras.backend.reshape(
        x, (1, filler_len * 3, 1)))(concatenate_verb)

    # TODO: reshape by 2, why is there a constant 2?
    tmp_reshaped_agentxr0_pxr1 = tf.keras.layers.Lambda(
        lambda x: tf.keras.backend.reshape(x, (filler_len * 2, 1)))(
            agentxr0_pxr1_output)
    # TODO: reshaping constant input??
    tmp_reshaped_fake = tf.keras.layers.Lambda(
        lambda x: tf.keras.backend.reshape(x, (filler_len, 1)))(
            const_fake_extender)
    concatenate_agentxr0_pxr1 = tf.keras.layers.Concatenate(axis=0)(
        [tmp_reshaped_fake, tmp_reshaped_agentxr0_pxr1])
    # TODO: why is there a constant 3?
    reshaped_agentxr0_pxr1 = tf.keras.layers.Lambda(
        lambda x: tf.keras.backend.reshape(x, (1, filler_len * 3, 1)))(
            concatenate_agentxr0_pxr1)

    semantic_tree_const_inputs, semantic_tree_output = build_join_branch(
        roles=roles,
        filler_len=filler_len,
        max_depth=2,
        inputs=[reshaped_verb, reshaped_agentxr0_pxr1],
        prefix='passive_join(verb, join(agent,p))')
    return [
        *verb_extraction_const_inputs, *agent_extraction_const_inputs,
        *p_extraction_const_inputs, *agentxr0_pxr1_const_inputs,
        *semantic_tree_const_inputs, const_fake_extender
    ], semantic_tree_output
示例#5
0
def check_if_zero_branch(flag_input, block_id):
    np_constant = np.array([-1])
    const_neg_1 = keras_constant_layer(np_constant, 'increment_neg_{}'.format(block_id))
    sum_is_zero_const = tf.keras.layers.Add()([flag_input, const_neg_1])
    return const_neg_1, tf.keras.layers.Multiply()([sum_is_zero_const, const_neg_1])