Пример #1
0
def conv_2d_test(th):
    assert isinstance(th, Config)
    # Initiate model
    th.mark = 'cnn_2d' + th.mark

    def data_dim(sample_rate=44100, duration=2, n_mfcc=40):
        audio_length = sample_rate * duration
        dim = (n_mfcc, 1 + int(np.floor(audio_length / 512)), 1)
        return dim

    dim = data_dim()

    model = Classifier(mark=th.mark)

    # Add input layer
    model.add(Input(sample_shape=[dim[0], dim[1], 1]))
    # Add hidden layers
    model.add(Conv2D(32, (4, 10), padding='same'))
    model.add(BatchNorm())
    model.add(Activation('relu'))
    model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
    # model.add(Dropout(0.7))

    model.add(Conv2D(32, (4, 10), padding='same'))
    model.add(BatchNorm())
    model.add(Activation('relu'))
    model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
    # model.add(Dropout(0.7))

    model.add(Conv2D(32, (4, 10), padding='same'))
    model.add(BatchNorm())
    model.add(Activation('relu'))
    model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
    # model.add(Dropout(0.7))

    model.add(Conv2D(32, (4, 10), padding='same'))
    model.add(BatchNorm())
    model.add(Activation('relu'))
    model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
    # model.add(Dropout(0.7))

    model.add(Flatten())
    model.add(Linear(output_dim=64))
    model.add(BatchNorm())
    model.add(Activation('relu'))

    # Add output layer
    model.add(Linear(output_dim=41))
    model.add(Activation('softmax'))

    # Build model
    optimizer = tf.train.AdamOptimizer(learning_rate=th.learning_rate)
    model.build(optimizer=optimizer)

    return model
Пример #2
0
def vanilla(mark):
    model = GAN(z_dim=100,
                sample_shape=[3072],
                mark=mark,
                classes=10,
                output_shape=[32, 32, 3])

    # Define generator
    model.G.add(Linear(output_dim=128))
    model.G.add(BatchNorm())
    model.G.add(Activation('relu'))

    model.G.add(Linear(output_dim=256))
    model.G.add(BatchNorm())
    model.G.add(Activation('relu'))

    model.G.add(Linear(output_dim=512))
    model.G.add(Activation('relu'))

    model.G.add(Linear(output_dim=3072))
    model.G.add(Activation('tanh'))

    model.G.add(Rescale(from_scale=[-1., 1.], to_scale=[0., 1.]))

    # ===========================================================================

    # Define discriminator
    model.D.add(Rescale(from_scale=[0., 1.], to_scale=[-1., 1.]))

    model.D.add(Linear(output_dim=256))
    model.D.add(Activation('lrelu'))

    model.D.add(Linear(output_dim=128))
    model.D.add(BatchNorm())
    model.D.add(Activation('lrelu'))

    model.D.add(Linear(output_dim=1))
    model.D.add(Activation('sigmoid'))

    # Build model
    D_optimizer = None
    D_optimizer = tf.train.AdamOptimizer(learning_rate=0.0001)
    G_optimizer = None
    G_optimizer = tf.train.AdamOptimizer(learning_rate=0.0002)
    model.build(loss=pedia.cross_entropy,
                D_optimizer=D_optimizer,
                G_optimizer=G_optimizer,
                smooth_factor=0.9)

    return model
Пример #3
0
 def ConvLayer(filters, bn=False):
     model.add(
         Conv2D(filters=filters,
                kernel_size=5,
                padding='same',
                kernel_regularizer=regularizers.L2(strength=strength)))
     if bn:
         model.add(BatchNorm())
     model.add(Activation.ReLU())
Пример #4
0
 def conv_bn_relu(filters, twod=True, bn=True):
     if twod:
         subsubnet.add(
             Conv2D(filters=filters, kernel_size=(4, 10), padding='same'))
     else:
         subsubnet.add(
             Conv1D(filters=filters, kernel_size=9, padding='valid'))
     if bn: subsubnet.add(BatchNorm())
     subsubnet.add(Activation('relu'))
Пример #5
0
def dcgan(mark):
    # Initiate model
    model = GAN(z_dim=100, sample_shape=[28, 28, 1], mark=mark, classes=10)

    # Define generator
    model.G.add(Linear(output_dim=7 * 7 * 128))
    model.G.add(Reshape(shape=[7, 7, 128]))
    model.G.add(BatchNorm())
    model.G.add(Activation.ReLU())

    model.G.add(Deconv2D(filters=128, kernel_size=5, strides=2,
                         padding='same'))
    model.G.add(BatchNorm())
    model.G.add(Activation.ReLU())

    model.G.add(Deconv2D(filters=1, kernel_size=5, strides=2, padding='same'))
    model.G.add(Activation('sigmoid'))
    # model.G.add(Activation('tanh'))

    # model.G.add(Rescale(from_scale=[-1., 1.], to_scale=[0., 1.]))

    # Define discriminator
    # model.D.add(Rescale(from_scale=[0., 1.], to_scale=[-1., 1.]))

    model.D.add(Conv2D(filters=128, kernel_size=5, strides=2, padding='same'))
    model.D.add(Activation.LeakyReLU())

    model.D.add(Conv2D(filters=128, kernel_size=5, strides=2, padding='same'))
    model.D.add(BatchNorm())
    model.D.add(Activation.LeakyReLU())

    model.D.add(Reshape(shape=[7 * 7 * 128]))
    model.D.add(Linear(output_dim=1))
    model.D.add(Activation('sigmoid'))

    # Build model
    optimizer = tf.train.AdamOptimizer(learning_rate=0.0002, beta1=0.5)
    model.build(loss=pedia.cross_entropy,
                G_optimizer=optimizer,
                D_optimizer=optimizer)

    return model
Пример #6
0
    def ConvBNReLU(filters, strength=1.0, bn=True):
        model.add(
            Conv2D(filters=filters,
                   kernel_size=5,
                   padding='same',
                   kernel_regularizer=regularizers.L2(strength=strength)))

        if bn:
            model.add(BatchNorm())

        model.add(Activation('relu'))
Пример #7
0
def vanilla(mark, bn=True):
    model = GAN(
        z_dim=100,
        sample_shape=[784],
        mark=mark,  #classes=10,
        output_shape=[28, 28, 1])

    model.G.add(Linear(output_dim=128))
    if bn:
        model.G.add(BatchNorm())
    model.G.add(Activation('relu'))

    model.G.add(Linear(output_dim=256))
    if bn:
        model.G.add(BatchNorm())
    model.G.add(Activation('relu'))

    model.G.add(Linear(output_dim=784))
    model.G.add(Activation('tanh'))

    model.G.add(Rescale(from_scale=[-1., 1.], to_scale=[0., 1.]))

    # ============================================================================

    model.D.add(Rescale(from_scale=[0., 1.], to_scale=[-1., 1.]))

    model.D.add(Linear(output_dim=256))
    model.D.add(Activation('lrelu'))

    model.D.add(Linear(output_dim=128))
    if bn:
        model.D.add(BatchNorm())
    model.D.add(Activation('lrelu'))

    model.D.add(Linear(output_dim=1))
    model.D.add(Activation('sigmoid'))

    # Build model
    model.build(loss=pedia.cross_entropy, smooth_factor=0.9)

    return model
Пример #8
0
def mlp01(mark):
    # Define model
    model = TDPlayer(mark=mark)

    model.add(Input(sample_shape=[15, 15]))
    model.add(Flatten())

    model.add(Linear(225))
    model.add(BatchNorm())
    model.add(Activation.ReLU())

    model.add(Linear(225))
    model.add(BatchNorm())
    model.add(Activation.ReLU())

    model.add(Linear(1))
    model.add(Activation('sigmoid'))

    # Build model
    model.build()

    return model
Пример #9
0
def mlp(th):
    assert isinstance(th, Config)
    # Initiate a model
    model = Classifier(mark=th.mark)

    # Add input layer
    model.add(Input([32000]))
    # Add hidden layers
    for _ in range(th.num_blocks):
        model.add(Linear(output_dim=th.hidden_dim))
        model.add(BatchNorm())
        # model.add(BatchNormalization())
        model.add(Activation(th.actype1))
        # model.add(Dropout(0.9))
    # Add output layer
    model.add(Linear(output_dim=th.num_classes))
    model.add(Activation('softmax'))

    # Build model
    optimizer = tf.train.AdamOptimizer(learning_rate=th.learning_rate)
    model.build(optimizer=optimizer)

    return model
Пример #10
0
 def fc_bn_relu(bn=True):
     model.add(Linear(100))
     model.add(Activation('relu'))
     if bn:
         model.add(BatchNorm())
Пример #11
0
def dcgan(mark):
    model = GAN(z_dim=100, sample_shape=[32, 32, 3], mark=mark, classes=10)

    nch = 256
    h = 5
    reg = regularizers.L2(strength=1e-7)

    # Define generator
    model.G.add(Linear(output_dim=nch * 4 * 4, weight_regularizer=reg))
    model.G.add(BatchNorm())
    model.G.add(Reshape(shape=(4, 4, nch)))

    model.G.add(
        Deconv2D(filters=int(nch / 2),
                 kernel_size=5,
                 padding='same',
                 kernel_regularizer=reg))
    model.G.add(BatchNorm())
    model.G.add(Activation.LeakyReLU())

    model.G.add(
        Deconv2D(filters=int(nch / 2),
                 kernel_size=5,
                 strides=2,
                 padding='same',
                 kernel_regularizer=reg))
    model.G.add(BatchNorm())
    model.G.add(Activation.LeakyReLU())

    model.G.add(
        Deconv2D(filters=int(nch / 4),
                 kernel_size=5,
                 strides=2,
                 padding='same',
                 kernel_regularizer=reg))
    model.G.add(BatchNorm())
    model.G.add(Activation.LeakyReLU())

    model.G.add(
        Deconv2D(filters=3,
                 kernel_size=5,
                 strides=2,
                 padding='same',
                 kernel_regularizer=reg))
    model.G.add(Activation('sigmoid'))

    # ===========================================================================

    # Define discriminator
    model.D.add(
        Conv2D(filters=int(nch / 4),
               kernel_size=h,
               strides=2,
               padding='same',
               kernel_regularizer=reg))
    model.D.add(Activation.LeakyReLU())

    model.D.add(
        Conv2D(filters=int(nch / 2),
               kernel_size=h,
               strides=2,
               padding='same',
               kernel_regularizer=reg))
    model.D.add(Activation.LeakyReLU())

    model.D.add(
        Conv2D(filters=nch,
               kernel_size=h,
               strides=2,
               padding='same',
               kernel_regularizer=reg))
    model.D.add(Activation.LeakyReLU())

    model.D.add(Flatten())
    model.D.add(Linear(output_dim=1, weight_regularizer=reg))
    model.D.add(Activation('sigmoid'))

    # Build model
    optimizer = tf.train.AdamOptimizer(learning_rate=0.0002, beta1=0.5)
    model.build(loss=pedia.cross_entropy,
                G_optimizer=optimizer,
                D_optimizer=optimizer)

    return model
Пример #12
0
 def linear_bn_relu(units, bn=True):
     model.add(Linear(output_dim=units))
     if bn: model.add(BatchNorm())
     model.add(Activation('relu'))
Пример #13
0
def res_00(th):
    assert isinstance(th, Config)
    model = Classifier(mark=th.mark)

    def data_dim(sample_rate=16000, duration=2, n_mfcc=50):
        audio_length = sample_rate * duration
        dim = (n_mfcc, 1 + int(np.floor(audio_length / 512)), 1)
        return dim

    dim = data_dim()

    # Add hidden layers
    subnet = model.add(inter_type=model.CONCAT)
    # the net to process raw data
    subsubnet = subnet.add()
    # subsubnet.add(Input(sample_shape=[32000, 1], name='raw_data'))
    subsubnet.add(Input(sample_shape=[32000, 1]))
    subsubnet.add(Conv1D(filters=16, kernel_size=9, padding='valid'))
    subsubnet.add(BatchNorm())
    subsubnet.add(Activation('relu'))
    subsubnet.add(Conv1D(filters=16, kernel_size=9, padding='valid'))
    subsubnet.add(BatchNorm())
    subsubnet.add(Activation('relu'))
    subsubnet.add(MaxPool1D(pool_size=16, strides=16))
    subsubnet.add(Dropout(th.raw_keep_prob))

    subsubnet.add(Conv1D(filters=32, kernel_size=3, padding='valid'))
    subsubnet.add(Activation('relu'))
    subsubnet.add(Conv1D(filters=32, kernel_size=3, padding='valid'))
    subsubnet.add(Activation('relu'))
    subsubnet.add(MaxPool1D(pool_size=4, strides=4))
    subsubnet.add(Dropout(th.raw_keep_prob))

    subsubnet.add(Conv1D(filters=32, kernel_size=3, padding='valid'))
    subsubnet.add(Activation('relu'))
    subsubnet.add(Conv1D(filters=32, kernel_size=3, padding='valid'))
    subsubnet.add(Activation('relu'))
    subsubnet.add(MaxPool1D(pool_size=4, strides=4))

    subsubnet.add(Conv1D(filters=256, kernel_size=3, padding='valid'))
    subsubnet.add(BatchNorm())
    subsubnet.add(Activation('relu'))
    subsubnet.add(Conv1D(filters=256, kernel_size=3, padding='valid'))
    subsubnet.add(BatchNorm())
    subsubnet.add(Activation('relu'))
    subsubnet.add(GlobalMaxPooling1D())

    # the net to process mfcc features
    subsubnet = subnet.add()
    subsubnet.add(Input(sample_shape=[dim[0], dim[1], 1], name='mfcc'))
    subsubnet.add(Conv2D(32, (4, 10), padding='same'))
    subsubnet.add(BatchNorm())
    subsubnet.add(Activation('relu'))
    subsubnet.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
    subsubnet.add(Dropout(th.mfcc_keep_prob))

    net = subsubnet.add(ResidualNet())
    net.add(Conv2D(32, (4, 10), padding='same'))
    net.add(BatchNorm())
    net.add_shortcut()
    subsubnet.add(Activation('relu'))
    subsubnet.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
    subsubnet.add(Dropout(th.mfcc_keep_prob))
    #
    net = subsubnet.add(ResidualNet())
    net.add(Conv2D(32, (4, 10), padding='same'))
    net.add(BatchNorm())
    net.add_shortcut()
    subsubnet.add(Activation('relu'))
    subsubnet.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
    subsubnet.add(Dropout(th.mfcc_keep_prob))

    net = subsubnet.add(ResidualNet())
    net.add(Conv2D(32, (4, 10), padding='same'))
    net.add(BatchNorm())
    net.add_shortcut()
    subsubnet.add(Activation('relu'))
    subsubnet.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
    subsubnet.add(Dropout(th.mfcc_keep_prob))

    net = subsubnet.add(ResidualNet())
    net.add(Conv2D(32, (4, 10), padding='same'))
    net.add(BatchNorm())
    net.add_shortcut()
    subsubnet.add(Activation('relu'))
    subsubnet.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
    subsubnet.add(Dropout(th.mfcc_keep_prob))
    subsubnet.add(Flatten())

    subsubnet.add(Dropout(th.concat_keep_prob))
    model.add(Linear(output_dim=128))
    model.add(BatchNorm())
    model.add(Activation('relu'))
    #
    model.add(Linear(output_dim=64))
    model.add(BatchNorm())
    model.add(Activation('relu'))

    # Add output layer
    model.add(Linear(output_dim=41))
    model.add(Activation('softmax'))

    # Build model
    optimizer = tf.train.AdamOptimizer(learning_rate=th.learning_rate)
    model.build(optimizer=optimizer)

    return model
Пример #14
0
def multinput_mlp(th):
    assert isinstance(th, Config)
    model = Classifier(mark=th.mark)

    def data_dim(sample_rate=16000, duration=2, n_mfcc=50):
        audio_length = sample_rate * duration
        dim = (n_mfcc, 1 + int(np.floor(audio_length / 512)), 1)
        return dim

    dim = data_dim()

    # Add hidden layers
    subnet = model.add(inter_type=model.CONCAT)

    subsubnet = subnet.add()
    subsubnet.add(Input(sample_shape=[32000, 1]))
    subsubnet.add(Linear(output_dim=512))
    subsubnet.add(Activation('relu'))
    subsubnet.add(Linear(output_dim=256))
    subsubnet.add(Activation('relu'))

    subsubnet = subnet.add()
    subsubnet.add(Input(sample_shape=[dim[0], dim[1], 1], name='mfcc'))
    subsubnet.add(Conv2D(32, (4, 10), padding='same'))
    subsubnet.add(BatchNorm())
    subsubnet.add(Activation('relu'))
    subsubnet.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
    subsubnet.add(Dropout(0.8))

    # subsubnet.add(Conv2D(32, (4, 10), padding='same'))
    # subsubnet.add(BatchNorm())
    # subsubnet.add(Activation('relu'))
    # subsubnet.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
    # subsubnet.add(Dropout(0.8))

    subsubnet.add(Conv2D(32, (4, 10), padding='same'))
    subsubnet.add(BatchNorm())
    subsubnet.add(Activation('relu'))
    subsubnet.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
    subsubnet.add(Dropout(0.7))

    subsubnet.add(Flatten())

    model.add(Linear(output_dim=128))
    model.add(BatchNorm())
    model.add(Activation('relu'))

    model.add(Linear(output_dim=64))
    model.add(BatchNorm())
    model.add(Activation('relu'))

    model.add(Linear(output_dim=64))
    model.add(BatchNorm())
    model.add(Activation('relu'))

    # Add output layer
    model.add(Linear(output_dim=41))
    model.add(Activation('softmax'))

    # Build model
    optimizer = tf.train.AdamOptimizer(learning_rate=th.learning_rate)
    model.build(optimizer=optimizer)

    return model