예제 #1
0
def capsule_model_A(X, num_classes):
    #input dimension:(25, 200, 300, 1)
    with tf.variable_scope('capsule_' + str(3)):
        nets = _conv2d_wrapper(X,
                               shape=[3, 300, 1, 32],
                               strides=[1, 2, 1, 1],
                               padding='VALID',
                               add_bias=True,
                               activation_fn=tf.nn.relu,
                               name='conv1')  #output shape: (25, 99, 1, 32)
        tf.logging.info('output shape: {}'.format(nets.get_shape()))
        nets = capsules_init(nets,
                             shape=[1, 1, 32, 16],
                             strides=[1, 1, 1, 1],
                             padding='VALID',
                             pose_shape=16,
                             add_bias=True,
                             name='primary')  # (25, 99, 1, 16, 16)
        nets = capsule_conv_layer(
            nets,
            shape=[3, 1, 16, 16],
            strides=[1, 1, 1, 1],
            iterations=3,
            name='conv2')  #25, 97, 1, 16, 16   #25, 97, 1, 16
        nets = capsule_flatten(nets)  #25, 97*16, 16   #25, 97*16
        poses, activations = capsule_fc_layer(nets, num_classes, 3, 'fc2')
    return poses, activations
예제 #2
0
def capsule_model_A(X, num_classes):
    with tf.variable_scope('capsule_' + str(3)):
        print('capsule_' + str(3))
        print('X ', X)
        cnnout = _conv2d_wrapper(X, shape=[3, 300, 1, 32], strides=[1, 2, 1, 1], padding='VALID',
                add_bias=True, activation_fn=tf.nn.relu, name='conv1')
        print('cnnout', cnnout.shape)
        tf.logging.info('output shape: {}'.format(cnnout.get_shape()))
        poses_init, activations_init = capsules_init(cnnout, shape=[1, 1, 32, 16], strides=[1, 1, 1, 1],
                             padding='VALID', pose_shape=16, add_bias=True, name='primary')
        print('poses_init', poses_init.shape)
        print('activations_init', activations_init.get_shape())
        poses_conv, activations_conv = capsule_conv_layer(poses_init, activations_init, shape=[3, 1, 16, 16], strides=[1, 1, 1, 1],
                                                          iterations=3, name='conv2')
        print('poses_conv', poses_conv.shape)
        print('activations_conv', activations_conv.shape)
        poses_flat, activations_flat = capsule_flatten(poses_conv, activations_conv)
        print('capsule_flatten', poses_flat.shape)
        print('activations_flat', activations_flat.shape)

        poses, activations = capsule_fc_layer(poses_flat, activations_flat, num_classes, 3, 'fc2')
        print('poses ', poses.shape)
        print('activations ', activations.shape)

    return poses, activations
예제 #3
0
def capsule_model_B(X, num_classes):
    poses_list = []
    for _, ngram in enumerate([3, 4, 5]):
        with tf.variable_scope('capsule_' + str(ngram)):
            nets = _conv2d_wrapper(X,
                                   shape=[ngram, 300, 1, 32],
                                   strides=[1, 2, 1, 1],
                                   padding='VALID',
                                   add_bias=True,
                                   activation_fn=tf.nn.relu,
                                   name='conv1')
            tf.logging.info('output shape: {}'.format(nets.get_shape()))
            nets = capsules_init(nets,
                                 shape=[1, 1, 32, 16],
                                 strides=[1, 1, 1, 1],
                                 padding='VALID',
                                 pose_shape=16,
                                 add_bias=True,
                                 name='primary')
            nets = capsule_conv_layer(nets,
                                      shape=[3, 1, 16, 16],
                                      strides=[1, 1, 1, 1],
                                      iterations=3,
                                      name='conv2')
            nets = capsule_flatten(nets)
            poses, activations = capsule_fc_layer(nets, num_classes, 3, 'fc2')
            poses_list.append(poses)

    poses = tf.reduce_mean(tf.convert_to_tensor(poses_list), axis=0)
    activations = K.sqrt(K.sum(K.square(poses), 2))
    return poses, activations
예제 #4
0
def capsule_model_B(X, num_classes):
    print('X.shape', X.shape)  # (25, 200, 300, 1)
    print('num_classes: ', num_classes.shape)
    poses_list = []
    for _, ngram in enumerate([3, 4, 5]):
        with tf.variable_scope('capsule_' + str(ngram)):
            print('capsule_' + str(ngram))
            cnnout = _conv2d_wrapper(
                X, shape=[ngram, 300, 1, 32], strides=[1, 2, 1, 1], padding='VALID', 
                add_bias=True, activation_fn=tf.nn.relu, name='conv1'
            )
            print('cnnout', cnnout.shape)  # (25, 99, 1, 32)

            tf.logging.info('output shape: {}'.format(cnnout.get_shape()))
            poses_init, activations_init = capsules_init(cnnout, shape=[1, 1, 32, 16], strides=[1, 1, 1, 1],
                                 padding='VALID', pose_shape=16, add_bias=True, name='primary')
            print('poses_init', poses_init.shape)  # (25, 99, 1, 16, 16)
            print('activations_init', activations_init.get_shape())  # (25, 99, 1, 16)
            poses_conv, activations_conv = capsule_conv_layer(poses_init, activations_init, shape=[3, 1, 16, 16], strides=[1, 1, 1, 1],
                                      iterations=3, name='conv2')
            print('poses_conv', poses_conv.shape)  # (25, 97, 1, 16, 16)
            print('activations_conv', activations_conv.shape)  # (25, 97, 1, 16)
            poses_flat, activations_flat = capsule_flatten(poses_conv, activations_conv)
            print('capsule_flatten', poses_flat.shape)  # (25, 1552, 16)
            print('activations_flat', activations_flat.shape)  # (25, 1552)

            poses, activations = capsule_fc_layer(poses_flat, activations_flat, num_classes, 3, 'fc2')
            print('poses ', poses.shape)  # (25, 9, 16)
            print('activations ', activations.shape)  # (25, 9)
            poses_list.append(poses)
    print('-------------------------------')
    poses = tf.reduce_mean(tf.convert_to_tensor(poses_list), axis=0)
    print('poses ', poses.shape)
    activations = K.sqrt(K.sum(K.square(poses), 2))
    print('activations ', activations.shape)
    return poses, activations