예제 #1
0
def test_BiLSTM(merge_mode):
    with CustomObjectScope({'BiLSTM': sequence.BiLSTM}):
        layer_test(sequence.BiLSTM,
                   kwargs={
                       'merge_mode': merge_mode,
                       'units': EMBEDDING_SIZE
                   },
                   input_shape=(BATCH_SIZE, SEQ_LENGTH, EMBEDDING_SIZE))
예제 #2
0
def test_PredictionLayer(task, use_bias):
    with CustomObjectScope({'PredictionLayer': layers.PredictionLayer}):
        layer_test(layers.PredictionLayer,
                   kwargs={
                       'task': task,
                       'use_bias': use_bias
                   },
                   input_shape=(BATCH_SIZE, 1))
예제 #3
0
def test_FwFM(reg_strength):
    with CustomObjectScope({'FwFMLayer': layers.FwFMLayer}):
        layer_test(layers.FwFMLayer,
                   kwargs={
                       'num_fields': FIELD_SIZE,
                       'regularizer': reg_strength
                   },
                   input_shape=(BATCH_SIZE, FIELD_SIZE, EMBEDDING_SIZE))
예제 #4
0
def test_test_PredictionLayer_invalid():
    # with pytest.raises(ValueError):
    with CustomObjectScope({'PredictionLayer': layers.PredictionLayer}):
        layer_test(layers.PredictionLayer,
                   kwargs={
                       'use_bias': True,
                   },
                   input_shape=(BATCH_SIZE, 2, 1))
예제 #5
0
def test_MLP(hidden_size, use_bn):
    with CustomObjectScope({'MLP': layers.MLP}):
        layer_test(layers.MLP,
                   kwargs={
                       'hidden_size': hidden_size,
                       'use_bn': use_bn
                   },
                   input_shape=(BATCH_SIZE, EMBEDDING_SIZE))
예제 #6
0
def test_CIN(layer_size, split_half):
    with CustomObjectScope({'CIN': layers.CIN}):
        layer_test(layers.CIN,
                   kwargs={
                       "layer_size": layer_size,
                       "split_half": split_half
                   },
                   input_shape=(BATCH_SIZE, FIELD_SIZE, EMBEDDING_SIZE))
예제 #7
0
def test_Linear():
    with CustomObjectScope({'Linear': Linear}):
        layer_test(Linear,
                   kwargs={
                       'mode': 1,
                       'use_bias': True
                   },
                   input_shape=(BATCH_SIZE, EMBEDDING_SIZE))
예제 #8
0
def test_KMaxPooling():
    with CustomObjectScope({'KMaxPooling': sequence.KMaxPooling}):
        layer_test(sequence.KMaxPooling,
                   kwargs={
                       'k': 3,
                       'axis': 1
                   },
                   input_shape=(BATCH_SIZE, SEQ_LENGTH, EMBEDDING_SIZE, 2))
예제 #9
0
def test_LocalActivationUnit(hidden_units, activation):
    if tf.__version__ >= '1.13.0' and activation != 'sigmoid':
        return

    with CustomObjectScope({'LocalActivationUnit': layers.LocalActivationUnit}):
        layer_test(layers.LocalActivationUnit,
                   kwargs={'hidden_units': hidden_units, 'activation': activation, 'dropout_rate': 0.5},
                   input_shape=[(BATCH_SIZE, 1, EMBEDDING_SIZE), (BATCH_SIZE, SEQ_LENGTH, EMBEDDING_SIZE)])
예제 #10
0
def test_DNN(hidden_units, use_bn):
    with CustomObjectScope({'DNN': layers.DNN}):
        layer_test(layers.DNN,
                   kwargs={
                       'hidden_units': hidden_units,
                       'use_bn': use_bn,
                       'dropout_rate': 0.5
                   },
                   input_shape=(BATCH_SIZE, EMBEDDING_SIZE))
예제 #11
0
def test_Transformer():
    if tf.__version__ >= '2.0.0':
        tf.compat.v1.disable_eager_execution()  # todo
    with CustomObjectScope({'Transformer': sequence.Transformer}):
        layer_test(sequence.Transformer,
                   kwargs={'att_embedding_size': 1, 'head_num': 8, 'use_layer_norm': True, 'supports_masking': False,
                           'attention_type': 'additive', 'dropout_rate': 0.5, 'output_type': 'sum'},
                   input_shape=[(BATCH_SIZE, SEQ_LENGTH, EMBEDDING_SIZE), (BATCH_SIZE, SEQ_LENGTH, EMBEDDING_SIZE),
                                (BATCH_SIZE, 1), (BATCH_SIZE, 1)])
예제 #12
0
def test_CrossNet_invalid():
    with pytest.raises(ValueError):
        with CustomObjectScope({'CrossNet': layers.CrossNet}):
            layer_test(layers.CrossNet,
                       kwargs={
                           'layer_num': 1,
                           'l2_reg': 0
                       },
                       input_shape=(2, 3, 4))
예제 #13
0
def test_LocalActivationUnit(hidden_size, activation):
    with CustomObjectScope({'LocalActivationUnit':
                            layers.LocalActivationUnit}):
        layer_test(layers.LocalActivationUnit,
                   kwargs={
                       'hidden_size': hidden_size,
                       'activation': activation
                   },
                   input_shape=[(BATCH_SIZE, 1, EMBEDDING_SIZE),
                                (BATCH_SIZE, SEQ_LENGTH, EMBEDDING_SIZE)])
예제 #14
0
def test_AttentionSequencePoolingLayer(weight_normalization):
    with CustomObjectScope({
            'AttentionSequencePoolingLayer':
            sequence.AttentionSequencePoolingLayer
    }):
        layer_test(sequence.AttentionSequencePoolingLayer,
                   kwargs={'weight_normalization': weight_normalization},
                   input_shape=[(BATCH_SIZE, 1, EMBEDDING_SIZE),
                                (BATCH_SIZE, SEQ_LENGTH, EMBEDDING_SIZE),
                                (BATCH_SIZE, 1)])
예제 #15
0
def test_SequencePoolingLayer(mode, supports_masking, input_shape):
    with CustomObjectScope(
        {'SequencePoolingLayer': sequence.SequencePoolingLayer}):
        layer_test(sequence.SequencePoolingLayer,
                   kwargs={
                       'mode': mode,
                       'supports_masking': supports_masking
                   },
                   input_shape=input_shape,
                   supports_masking=supports_masking)
예제 #16
0
def test_InteractingLayer(
    head_num,
    use_res,
):
    with CustomObjectScope({'InteractingLayer': layers.InteractingLayer}):
        layer_test(layers.InteractingLayer,
                   kwargs={
                       "head_num": head_num,
                       "use_res": use_res,
                   },
                   input_shape=(BATCH_SIZE, FIELD_SIZE, EMBEDDING_SIZE))
예제 #17
0
def test_PositionEncoding(pos_embedding_trainable, zero_pad):
    with CustomObjectScope({
            'PositionEncoding': sequence.PositionEncoding,
            "tf": tf
    }):
        layer_test(sequence.PositionEncoding,
                   kwargs={
                       'pos_embedding_trainable': pos_embedding_trainable,
                       'zero_pad': zero_pad
                   },
                   input_shape=(BATCH_SIZE, SEQ_LENGTH, EMBEDDING_SIZE))
예제 #18
0
def test_CrossNet(
    layer_num,
    l2_reg,
):
    with CustomObjectScope({'CrossNet': layers.CrossNet}):
        layer_test(layers.CrossNet,
                   kwargs={
                       'layer_num': layer_num,
                       'l2_reg': l2_reg
                   },
                   input_shape=(2, 3))
예제 #19
0
def test_SequencePoolingLayer(mode, supports_masking, input_shape):
    if tf.__version__ >= '2.0.0' and mode != 'sum':  #todo check further version
        return
    with CustomObjectScope(
        {'SequencePoolingLayer': sequence.SequencePoolingLayer}):
        layer_test(sequence.SequencePoolingLayer,
                   kwargs={
                       'mode': mode,
                       'supports_masking': supports_masking
                   },
                   input_shape=input_shape,
                   supports_masking=supports_masking)
예제 #20
0
def test_Transformer():
    with CustomObjectScope({'Transformer': sequence.Transformer}):
        layer_test(sequence.Transformer,
                   kwargs={
                       'att_embedding_size': 1,
                       'head_num': 8,
                       'use_layer_norm': True,
                       'supports_masking': False
                   },
                   input_shape=[(BATCH_SIZE, SEQ_LENGTH, EMBEDDING_SIZE),
                                (BATCH_SIZE, SEQ_LENGTH, EMBEDDING_SIZE),
                                (BATCH_SIZE, 1), (BATCH_SIZE, 1)])
예제 #21
0
def test_FGCNNLayer():
    with CustomObjectScope({'FGCNNLayer': layers.FGCNNLayer}):
        layer_test(layers.FGCNNLayer,
                   kwargs={
                       'filters': (
                           4,
                           6,
                       ),
                       'kernel_width': (
                           7,
                           7,
                       )
                   },
                   input_shape=(BATCH_SIZE, FIELD_SIZE, EMBEDDING_SIZE))
예제 #22
0
def test_Hash(num_buckets, mask_zero, vocabulary_path, input_data,
              expected_output):
    if not hasattr(tf, 'version') or tf.version.VERSION < '2.0.0':
        return

    with CustomObjectScope({'Hash': Hash}):
        layer_test(Hash,
                   kwargs={
                       'num_buckets': num_buckets,
                       'mask_zero': mask_zero,
                       'vocabulary_path': vocabulary_path
                   },
                   input_dtype=tf.string,
                   input_data=np.array(input_data, dtype='str'),
                   expected_output_dtype=tf.int64,
                   expected_output=expected_output)
예제 #23
0
def test_dice():
    layer_test(activation.Dice, kwargs={'emb_size': 3, 'dim': 2},
               input_shape=(5, 3), expected_output_shape=(5,3))
    layer_test(activation.Dice, kwargs={'emb_size': 10, 'dim': 3},
               input_shape=(5, 3, 10), expected_output_shape=(5,3,10))
예제 #24
0
def test_AFMLayer():
    with CustomObjectScope({'AFMLayer': layers.AFMLayer}):
        layer_test(layers.AFMLayer,
                   kwargs={'dropout_rate': 0.5},
                   input_shape=[(BATCH_SIZE, 1, EMBEDDING_SIZE)] * FIELD_SIZE)
예제 #25
0
def test_dice():
    with CustomObjectScope({'Dice': activation.Dice}):
        layer_test(activation.Dice, kwargs={}, input_shape=(2, 3))
예제 #26
0
def test_FM():
    with CustomObjectScope({'FM': layers.FM}):
        layer_test(layers.FM,
                   kwargs={},
                   input_shape=(BATCH_SIZE, FIELD_SIZE, EMBEDDING_SIZE))
예제 #27
0
def test_BiInteractionPooling():
    with CustomObjectScope(
        {'BiInteractionPooling': layers.BiInteractionPooling}):
        layer_test(layers.BiInteractionPooling,
                   kwargs={},
                   input_shape=(BATCH_SIZE, FIELD_SIZE, EMBEDDING_SIZE))
예제 #28
0
def test_OutterProductLayer(kernel_type):
    with CustomObjectScope({'OutterProductLayer': layers.OutterProductLayer}):
        layer_test(layers.OutterProductLayer,
                   kwargs={'kernel_type': kernel_type},
                   input_shape=[(BATCH_SIZE, 1, EMBEDDING_SIZE)] * FIELD_SIZE)
예제 #29
0
def test_InnerProductLayer(reduce_sum):
    with CustomObjectScope({'InnerProductLayer': layers.InnerProductLayer}):
        layer_test(layers.InnerProductLayer,
                   kwargs={'reduce_sum': reduce_sum},
                   input_shape=[(BATCH_SIZE, 1, EMBEDDING_SIZE)] * FIELD_SIZE)
예제 #30
0
def test_test_CIN_invalid(layer_size):
    with pytest.raises(ValueError):
        with CustomObjectScope({'CIN': layers.CIN}):
            layer_test(layers.CIN,
                       kwargs={"layer_size": layer_size},
                       input_shape=(BATCH_SIZE, FIELD_SIZE, EMBEDDING_SIZE))