Example #1
0
def build_sum_network(roles,
                      fillers,
                      dual_roles,
                      max_depth,
                      number_sum_blocks=1):
    filler_len = fillers[0].shape[0]

    input_num_elements, flattened_tree_num_elements = unshift_matrix(
        roles[0], filler_len, max_depth - 1).shape
    shape = (flattened_tree_num_elements + filler_len, 1)
    flattened_decrementing_input = tf.keras.layers.Input(shape=(*shape, ),
                                                         batch_size=1,
                                                         name='left_operand')
    flattened_incrementing_input = tf.keras.layers.Input(shape=(*shape, ),
                                                         batch_size=1,
                                                         name='right_operand')

    block_id = 0
    shift_input, increment_input, filler_input = constant_inputs_for_increment_block(
        roles, fillers, max_depth, block_id)
    left_shift_input, right_shift_input = shift_input
    tmp_reshaped_increment, const_increment = increment_input
    tmp_reshaped_fake_filler, const_filler = filler_input

    target_elements, _ = unshift_matrix(roles[0], filler_len,
                                        max_depth - 1).shape
    tmp_reshaped_fake, const_one = custom_constant_layer(
        const_size=target_elements + filler_len, name='const_one')

    all_sum_const_inputs = []
    incremented = flattened_incrementing_input
    decremented = flattened_decrementing_input
    for i in range(number_sum_blocks):
        sum_const_inputs, incremented, decremented = sum_block(
            incrementing_input=incremented,
            decrementing_input=decremented,
            increment_value=tmp_reshaped_increment,
            roles=roles,
            dual_roles=dual_roles,
            filler_len=filler_len,
            max_depth=max_depth,
            block_id=block_id + i * 5,
            left_shift_input=left_shift_input,
            right_shift_input=right_shift_input,
            constant_input_filler=tmp_reshaped_fake_filler,
            constant_for_decrementing_input=tmp_reshaped_fake)
        all_sum_const_inputs.extend(sum_const_inputs)

    return tf.keras.Model(inputs=[
        left_shift_input,
        right_shift_input,
        const_increment,
        const_filler,
        const_one,
        *all_sum_const_inputs,
        flattened_decrementing_input,
        flattened_incrementing_input,
    ],
                          outputs=[decremented, incremented])
def crop_tensor(layer, role, filler_len, stop_level):
    _, flattened_num_elements = unshift_matrix(role, filler_len, stop_level).shape
    return custom_cropping_layer(input_layer=layer,
                                 crop_from_beginning=0,
                                 crop_from_end=flattened_num_elements,
                                 input_tensor_length=flattened_num_elements + filler_len,
                                 final_tensor_length=filler_len)
Example #3
0
def build_decode_model_2_tuple_network(filler_len, dual_roles, max_depth,
                                       model_2_tuple_has_weights):
    input_num_elements, flattened_tree_num_elements = unshift_matrix(
        dual_roles[0], filler_len, max_depth - 1).shape
    shape = (flattened_tree_num_elements + filler_len, 1)
    flattened_input = tf.keras.layers.Input(shape=(*shape, ))

    index_const_inputs, index_raw_output, _ = build_universal_extraction_branch(
        model_input=flattened_input,
        roles=dual_roles,
        filler_len=filler_len,
        max_depth=max_depth - 1,
        stop_level=max_depth - 1,
        role_extraction_order=[0],
        prefix='extracting_index')
    index_raw_output = tf.keras.layers.Lambda(
        lambda x: tf.keras.backend.reshape(x, (filler_len, )))(
            index_raw_output)

    alpha_const_inputs, alpha_raw_output, _ = build_universal_extraction_branch(
        model_input=flattened_input,
        roles=dual_roles,
        filler_len=filler_len,
        max_depth=max_depth - 1,
        stop_level=max_depth - 1,
        role_extraction_order=[1],
        prefix='extracting_alpha')
    alpha_raw_output = tf.keras.layers.Lambda(
        lambda x: tf.keras.backend.reshape(x, (filler_len, )))(
            alpha_raw_output)

    if model_2_tuple_has_weights:
        weight_const_inputs, weight_raw_output, _ = build_universal_extraction_branch(
            model_input=flattened_input,
            roles=dual_roles,
            filler_len=filler_len,
            max_depth=max_depth - 1,
            stop_level=max_depth - 1,
            role_extraction_order=[2],
            prefix='extracting_weight')
        weight_raw_output = tf.keras.layers.Lambda(
            lambda x: tf.keras.backend.reshape(x, (filler_len, )))(
                weight_raw_output)

        return tf.keras.Model(
            inputs=[
                *index_const_inputs, *alpha_const_inputs, *weight_const_inputs,
                flattened_input
            ],
            outputs=[index_raw_output, alpha_raw_output, weight_raw_output])

    return tf.keras.Model(
        inputs=[*index_const_inputs, *alpha_const_inputs, flattened_input],
        outputs=[
            index_raw_output,
            alpha_raw_output,
        ])
Example #4
0
def make_output_same_length_as_input(layer_to_crop, role, filler_len, max_depth):
    target_num_elements, flattened_num_elements = unshift_matrix(role, filler_len, max_depth).shape
    return custom_cropping_layer(
        input_layer=layer_to_crop,
        crop_from_beginning=0,
        crop_from_end=flattened_num_elements + filler_len - target_num_elements,
        input_tensor_length=flattened_num_elements + filler_len,
        final_tensor_length=target_num_elements
    )
def decode_most_nested_element_numpy(tree, depth):
    fillers, roles, dual_roles = input_data()
    current_input = tree
    max_depth = depth - 1
    stop_level = 0
    filler_len = fillers[0].shape[0]
    role_extraction_order = [1 for _ in range(max_depth)]

    for level_index, role_index in zip(range(max_depth, stop_level - 1, -1),
                                       role_extraction_order):
        left_shift_input = unshift_matrix(roles[role_index], filler_len,
                                          level_index)
        current_input = left_shift_input.dot(current_input[filler_len:])
    return current_input
Example #6
0
def check_if_not_zero_branch(decrementing_input, role, filler_len, max_depth, block_id):
    const_inputs, one_tensor_output = build_extract_branch(
        input_layer=decrementing_input,
        extract_role=role,
        filler_len=filler_len,
        max_depth=max_depth - 1,
        block_id=block_id
    )

    target_elements, _ = unshift_matrix(role, filler_len, max_depth - 1).shape
    reshape_for_pool = tf.keras.layers.Lambda(lambda x: tf.keras.backend.reshape(x, (1, target_elements, 1)))(
        one_tensor_output)
    global_max_pool = tf.keras.layers.GlobalMaxPooling1D()(reshape_for_pool)
    return const_inputs, tf.keras.layers.Lambda(normalization)(global_max_pool), one_tensor_output
Example #7
0
def build_decode_number_network(fillers, dual_roles, max_depth):
    filler_len = fillers[0].shape[0]

    input_num_elements, flattened_tree_num_elements = unshift_matrix(
        dual_roles[0], filler_len, max_depth - 1).shape
    shape = (flattened_tree_num_elements + filler_len, 1)
    flattened_input = tf.keras.layers.Input(shape=(*shape, ), batch_size=1)

    const_inputs, is_not_zero, unshifted = check_if_not_zero_branch(
        decrementing_input=flattened_input,
        role=dual_roles[0],
        filler_len=filler_len,
        max_depth=max_depth,
        block_id=1)

    return tf.keras.Model(inputs=[*const_inputs, flattened_input],
                          outputs=[unshifted, is_not_zero])
Example #8
0
def build_increment_network(roles, dual_roles, fillers, max_depth):
    filler_len = fillers[0].shape[0]

    # Number to be incremented
    input_num_elements, flattened_tree_num_elements = unshift_matrix(roles[0], filler_len, max_depth - 1).shape
    shape = (flattened_tree_num_elements + filler_len, 1)
    flattened_incrementing_input = tf.keras.layers.Input(shape=(*shape,), batch_size=1)

    block_id = 0
    shift_input, increment_input, filler_input = constant_inputs_for_increment_block(roles, fillers, max_depth,
                                                                                     block_id)
    left_shift_input, right_shift_input = shift_input
    tmp_reshaped_increment, const_increment = increment_input
    tmp_reshaped_fake_filler, const_filler = filler_input

    increment_const_inputs, output = increment_block(
        incrementing_input=flattened_incrementing_input,
        increment_value=tmp_reshaped_increment,
        roles=roles,
        dual_roles=dual_roles,
        filler_len=filler_len,
        max_depth=max_depth,
        block_id=block_id,
        left_shift_input=left_shift_input,
        right_shift_input=right_shift_input,
        constant_input_filler=tmp_reshaped_fake_filler
    )

    return tf.keras.Model(
        inputs=[
            left_shift_input,
            right_shift_input,
            const_increment,
            const_filler,
            *increment_const_inputs,
            flattened_incrementing_input,
        ],
        outputs=output)
Example #9
0
def extract_semantic_tree_from_passive_voice_branch(input_layer, roles,
                                                    dual_roles, filler_len,
                                                    max_depth):
    stop_level_for_verb = 0
    verb_branch = build_one_level_extraction_branch(
        model_input=input_layer,
        roles=dual_roles,
        filler_len=filler_len,
        max_depth=max_depth,
        stop_level=stop_level_for_verb,
        role_extraction_order=[1, 0, 1],
        prefix='passive_verb_extract')
    verb_extraction_const_inputs, verb_extraction_output, _ = verb_branch

    stop_level_for_agent = 0
    agent_branch = build_one_level_extraction_branch(
        model_input=input_layer,
        roles=dual_roles,
        filler_len=filler_len,
        max_depth=max_depth,
        stop_level=stop_level_for_agent,
        role_extraction_order=[1, 1, 1],
        prefix='passive_agent_extract')
    agent_extraction_const_inputs, agent_extraction_output, _ = agent_branch

    stop_level_for_p = max_depth - 1
    p_branch = build_one_level_extraction_branch(model_input=input_layer,
                                                 roles=dual_roles,
                                                 filler_len=filler_len,
                                                 max_depth=max_depth,
                                                 stop_level=stop_level_for_p,
                                                 role_extraction_order=[0],
                                                 prefix='passive_p_extract')

    p_extraction_const_inputs, p_raw_output, current_num_elements = p_branch
    _, flattened_num_elements = unshift_matrix(roles[0], filler_len,
                                               stop_level_for_p).shape
    # TODO: insert cropping here
    reshape_for_crop = tf.keras.layers.Lambda(
        lambda x: tf.keras.backend.reshape(x, (1, flattened_num_elements +
                                               filler_len, 1)))(p_raw_output)
    clip_first_level = tf.keras.layers.Cropping1D(
        cropping=(0, flattened_num_elements))(reshape_for_crop)
    p_extraction_output = tf.keras.layers.Lambda(
        lambda x: tf.keras.backend.reshape(x, (1, filler_len, 1)))(
            clip_first_level)

    # TODO: define how to tackle extractions not till the bottom of structure
    # given that we have all fillers maximum joining depth is equal to 1
    agentxr0_pxr1_const_inputs, agentxr0_pxr1_output = build_join_branch(
        roles=roles,
        filler_len=filler_len,
        max_depth=1,
        inputs=[agent_extraction_output, p_extraction_output],
        prefix='passive_join(agent,p)')

    # later we have to join two subtrees of different depth. for that we have to
    # make filler of verb of the same depth - make fake constant layer
    np_constant = np.zeros((filler_len, 1))
    const_fake_extender = keras_constant_layer(
        np_constant, name='passive_fake_extender_verb_agent')
    concatenate_verb = tf.keras.layers.Concatenate(axis=0)(
        [verb_extraction_output, const_fake_extender, const_fake_extender])
    # TODO: why is there a constant 3?
    reshaped_verb = tf.keras.layers.Lambda(lambda x: tf.keras.backend.reshape(
        x, (1, filler_len * 3, 1)))(concatenate_verb)

    # TODO: reshape by 2, why is there a constant 2?
    tmp_reshaped_agentxr0_pxr1 = tf.keras.layers.Lambda(
        lambda x: tf.keras.backend.reshape(x, (filler_len * 2, 1)))(
            agentxr0_pxr1_output)
    # TODO: reshaping constant input??
    tmp_reshaped_fake = tf.keras.layers.Lambda(
        lambda x: tf.keras.backend.reshape(x, (filler_len, 1)))(
            const_fake_extender)
    concatenate_agentxr0_pxr1 = tf.keras.layers.Concatenate(axis=0)(
        [tmp_reshaped_fake, tmp_reshaped_agentxr0_pxr1])
    # TODO: why is there a constant 3?
    reshaped_agentxr0_pxr1 = tf.keras.layers.Lambda(
        lambda x: tf.keras.backend.reshape(x, (1, filler_len * 3, 1)))(
            concatenate_agentxr0_pxr1)

    semantic_tree_const_inputs, semantic_tree_output = build_join_branch(
        roles=roles,
        filler_len=filler_len,
        max_depth=2,
        inputs=[reshaped_verb, reshaped_agentxr0_pxr1],
        prefix='passive_join(verb, join(agent,p))')
    return [
        *verb_extraction_const_inputs, *agent_extraction_const_inputs,
        *p_extraction_const_inputs, *agentxr0_pxr1_const_inputs,
        *semantic_tree_const_inputs, const_fake_extender
    ], semantic_tree_output