Esempio n. 1
0
def common_AE_encoder_tail(stack: Stacker,
                           latent_code_size,
                           bn=False,
                           activation='relu') -> Stacker:
    stack.linear(latent_code_size)
    if bn:
        stack.bn()
    stack.activation(activation)
    return stack
Esempio n. 2
0
def encoder_tail(stack: Stacker,
                 latent_code_size,
                 bn=False,
                 activation='none') -> Stacker:
    stack.linear(latent_code_size * 2)
    if bn:
        stack.bn()
    stack.activation(activation)
    return stack
Esempio n. 3
0
def common_linear_stack(stack: Stacker,
                        net_shapes,
                        bn=True,
                        activation='relu') -> Stacker:
    for shape in net_shapes:
        stack.linear(shape)
        if bn:
            stack.bn()
        stack.activation(activation)
    return stack
Esempio n. 4
0
def common_AE_decoder_tail(stack: Stacker,
                           flatten_size,
                           output_shape,
                           bn=False,
                           activation='sigmoid') -> Stacker:
    stack.linear(flatten_size)
    if bn:
        stack.bn()
    stack.activation(activation)
    stack.reshape(output_shape)
    return stack
Esempio n. 5
0
    def encoder_tail(stack: Stacker,
                     Y_flatten_size,
                     z_size,
                     bn=False,
                     activation='none'):
        stack.linear(z_size + Y_flatten_size)
        if bn:
            stack.bn()
        stack.activation(activation)

        zs = stack.last_layer[:, :z_size]
        Ys_gen = stack.last_layer[:, z_size:]
        hs = softmax(Ys_gen)
        return zs, Ys_gen, hs