Ejemplo n.º 1
0
def multinput(th):
    assert isinstance(th, Config)
    model = Classifier(mark=th.mark)

    # Add hidden layers
    assert isinstance(th.fc_dims, list)
    subnet = model.add(inter_type=model.CONCAT)
    for dims in th.fc_dims:
        subsubnet = subnet.add()
        # Add input layer
        subsubnet.add(Input(sample_shape=th.input_shape))
        subsubnet.add(Flatten())
        assert isinstance(dims, list)

        for dim in dims:
            subsubnet.add(Linear(output_dim=dim))
            if core.use_bn: subsubnet.add(BatchNormalization())
            subsubnet.add(Activation(th.actype1))

    # Add output layer
    model.add(Linear(output_dim=th.num_classes))

    # Build model
    optimizer = tf.train.AdamOptimizer(learning_rate=th.learning_rate)
    model.build(optimizer=optimizer)

    return model
Ejemplo n.º 2
0
def multinput(th):
    assert isinstance(th, Config)
    model = Classifier(mark=th.mark)

    # Add hidden layers
    assert isinstance(th.fc_dims, list)
    subnet = model.add(inter_type=model.CONCAT)
    for dims in th.fc_dims:
        subsubnet = subnet.add()
        # Add input layer
        subsubnet.add(Input(sample_shape=th.input_shape))
        subsubnet.add(Flatten())
        assert isinstance(dims, list)

        for dim in dims:
            subsubnet.add(Linear(output_dim=dim))
            # if cf10_core.use_bn: subsubnet.add(BatchNormalization())
            subsubnet.add(Activation(th.actype1))

    # Add output layer
    model.add(Linear(output_dim=th.num_classes))

    # Build model
    model.build(metric=['accuracy', 'loss'],
                batch_metric='accuracy',
                eval_metric='accuracy')

    return model
Ejemplo n.º 3
0
def typical(th, cell):
    assert isinstance(th, Config)
    # Initiate a model
    model = Classifier(mark=th.mark, net_type=Recurrent)
    # Add layers
    model.add(Input(sample_shape=th.input_shape))
    # Add hidden layers
    model.add(cell)
    # Build model and return
    output_and_build(model, th)
    return model
Ejemplo n.º 4
0
def unet(th):
    assert isinstance(th, Config)

    model = Classifier(mark=th.mark)
    model.add(Input(sample_shape=th.input_shape))

    def add_encoder_block(filters,
                          kernel_size=3,
                          add_pool=True,
                          drop_out=False):
        model.add(Conv2D(filters, kernel_size))
        model.add(Activation.ReLU())
        model.add(Conv2D(filters, kernel_size))
        output = model.add(Activation.ReLU())
        if drop_out: output = model.add(Dropout(0.5))
        if add_pool: model.add(MaxPool2D((2, 2), 2))
        return output

    def add_decoder_block(filters, convX, ks1=2, ks2=3):
        model.add(Deconv2D(filters, ks1, strides=(2, 2)))
        model.add(Activation.ReLU())
        model.add(Concatenate(convX))
        model.add(Conv2D(filters, ks2))
        model.add(Activation.ReLU())
        model.add(Conv2D(filters, ks2))
        model.add(Activation.ReLU())

    # Construct encoder part
    conv64 = add_encoder_block(64)
    conv128 = add_encoder_block(128)
    conv256 = add_encoder_block(256)
    conv512 = add_encoder_block(512, drop_out=True)
    add_encoder_block(1024, add_pool=False, drop_out=True)

    # Construct decoder part
    add_decoder_block(512, conv512)
    add_decoder_block(256, conv256)
    add_decoder_block(128, conv128)
    add_decoder_block(64, conv64)

    # Add output layers
    model.add(Conv2D(2, 3))
    model.add(Activation.ReLU())
    model.add(Conv2D(1, 1))
    model.add(Activation('sigmoid'))

    model.build(optimizer=tf.train.AdamOptimizer(th.learning_rate),
                loss='binary_cross_entropy',
                metric='accuracy')

    return model
Ejemplo n.º 5
0
def fc_lstm(th):
  assert isinstance(th, Config)
  th.mark = 'fc_lstm_' + th.mark
  # Initiate a model
  model = Classifier(mark=th.mark, net_type=Recurrent)

  # Add input layer
  model.add(Input(sample_shape=th.input_shape))

  # Add fc layers
  for dim in th.fc_dims:
    checker.check_positive_integer(dim)
    model.add(Linear(output_dim=dim))
    # model.add(BatchNorm())
    model.add(Activation('relu'))

  # Add lstm cells
  for dim in th.rc_dims:
    model.add(BasicLSTMCell(state_size=dim))

  # Add output layer
  # model.add(Linear(output_dim=th.output_dim))

  # Build model
  optimizer = tf.train.AdamOptimizer(th.learning_rate)
  model.build(optimizer)

  return model
Ejemplo n.º 6
0
def get_container(th, flatten=False):
  assert isinstance(th, Config)
  model = Classifier(mark=th.mark)
  model.add(Input(sample_shape=th.input_shape))
  model.add(Normalize(sigma=255.))
  if th.centralize_data: model.add(Normalize(mu=th.data_mean))
  if flatten: model.add(Flatten())
  return model
Ejemplo n.º 7
0
def get_container(th, flatten=False):
    assert isinstance(th, Config)
    model = Classifier(mark=th.mark)
    model.add(Input(sample_shape=th.input_shape))
    model.add(Normalize(sigma=255.))
    if th.centralize_data: model.add(Normalize(mu=th.data_mean))
    if flatten:
        model.add(Flatten())
        # Register extractor and researcher
        model.register_extractor(mn_du.MNIST.connection_heat_map_extractor)
        monitor.register_grad_researcher(mn_du.MNIST.flatten_researcher)
    return model
Ejemplo n.º 8
0
def vanilla(mark):
    model = Classifier(mark=mark)
    model.add(Input(sample_shape=[784]))

    def fc_bn_relu(bn=True):
        model.add(Linear(100))
        model.add(Activation('relu'))
        if bn:
            model.add(BatchNorm())

    fc_bn_relu()
    fc_bn_relu()

    model.add(Linear(10))

    # Build model
    model.build(loss='cross_entropy',
                optimizer=tf.train.GradientDescentOptimizer(0.01))

    return model
Ejemplo n.º 9
0
def typical(th, cells):
    assert isinstance(th, Config)

    # Initiate a model
    model = Classifier(mark=th.mark, net_type=Recurrent)

    # Add layers
    model.add(Input(sample_shape=th.input_shape, dtype=tf.int32))
    model.add(Onehot(depth=th.num_classes))
    emb_init = tf.initializers.random_uniform(-1, 1)
    model.add(Dense(th.hidden_dim, use_bias=False,
                    weight_initializer=emb_init))

    if th.input_dropout > 0: model.add(Dropout(1 - th.input_dropout))
    # Add hidden layers
    if not isinstance(cells, (list, tuple)): cells = [cells]
    for cell in cells:
        model.add(cell)
    # Build model and return
    output_and_build(model, th)
    return model
Ejemplo n.º 10
0
def lstm_test(th):
    assert isinstance(th, Config)
    # Initiate model
    th.mark = 'lstm_' + th.mark
    model = Classifier(mark=th.mark, net_type=Recurrent)

    # Add input layer
    model.add(Input(sample_shape=[th.memory_depth]))
    # Add hidden layers
    for _ in range(th.num_blocks):
        model.add(BasicLSTMCell(th.hidden_dim, with_peepholes=False))
    # Add output layer
    model.add(Linear(output_dim=41))
    model.add(Activation('softmax'))

    # Build model
    optimizer = tf.train.AdamOptimizer(learning_rate=th.learning_rate)
    model.build(optimizer=optimizer)

    return model
Ejemplo n.º 11
0
def lstm0(th):
  assert isinstance(th, Config)
  th.mark = 'lstm_' + th.mark
  # Initiate a model
  model = Classifier(mark=th.mark, net_type=Recurrent)

  # Add input layer
  model.add(Input(sample_shape=th.input_shape))

  # Add lstm cells
  for dim in th.rc_dims:
    model.add(BasicLSTMCell(state_size=dim))

  # Add output layer
  model.add(Linear(output_dim=th.output_dim))

  # Build model
  optimizer = tf.train.AdamOptimizer(th.learning_rate)
  model.build(optimizer)

  return model
Ejemplo n.º 12
0
def conv_2d_test(th):
    assert isinstance(th, Config)
    # Initiate model
    th.mark = 'cnn_2d' + th.mark

    def data_dim(sample_rate=44100, duration=2, n_mfcc=40):
        audio_length = sample_rate * duration
        dim = (n_mfcc, 1 + int(np.floor(audio_length / 512)), 1)
        return dim

    dim = data_dim()

    model = Classifier(mark=th.mark)

    # Add input layer
    model.add(Input(sample_shape=[dim[0], dim[1], 1]))
    # Add hidden layers
    model.add(Conv2D(32, (4, 10), padding='same'))
    model.add(BatchNorm())
    model.add(Activation('relu'))
    model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
    # model.add(Dropout(0.7))

    model.add(Conv2D(32, (4, 10), padding='same'))
    model.add(BatchNorm())
    model.add(Activation('relu'))
    model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
    # model.add(Dropout(0.7))

    model.add(Conv2D(32, (4, 10), padding='same'))
    model.add(BatchNorm())
    model.add(Activation('relu'))
    model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
    # model.add(Dropout(0.7))

    model.add(Conv2D(32, (4, 10), padding='same'))
    model.add(BatchNorm())
    model.add(Activation('relu'))
    model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
    # model.add(Dropout(0.7))

    model.add(Flatten())
    model.add(Linear(output_dim=64))
    model.add(BatchNorm())
    model.add(Activation('relu'))

    # Add output layer
    model.add(Linear(output_dim=41))
    model.add(Activation('softmax'))

    # Build model
    optimizer = tf.train.AdamOptimizer(learning_rate=th.learning_rate)
    model.build(optimizer=optimizer)

    return model
Ejemplo n.º 13
0
def ka_convnet(mark):
    model = Classifier(mark=mark)
    model.add(Input(sample_shape=config.sample_shape))

    strength = 1e-5

    def ConvLayer(filters, bn=False):
        model.add(
            Conv2D(filters=filters,
                   kernel_size=5,
                   padding='same',
                   kernel_regularizer=regularizers.L2(strength=strength)))
        if bn:
            model.add(BatchNorm())
        model.add(Activation.ReLU())

    # Define structure
    ConvLayer(32)
    model.add(Dropout(0.5))
    ConvLayer(32, False)
    model.add(Dropout(0.5))
    model.add(MaxPool2D(2, 2, 'same'))
    ConvLayer(64, True)
    model.add(Dropout(0.5))
    model.add(MaxPool2D(2, 2, 'same'))

    model.add(Flatten())
    model.add(Linear(128))
    model.add(Activation.ReLU())
    # model.add(Dropout(0.5))
    model.add(Linear(10))

    # Build model
    model.build(optimizer=tf.train.AdamOptimizer(learning_rate=1e-4))

    return model
Ejemplo n.º 14
0
def deep_conv(mark):
    # Initiate predictor
    model = Classifier(mark=mark)
    model.add(Input(sample_shape=config.sample_shape))

    def ConvBNReLU(filters, strength=1.0, bn=True):
        model.add(
            Conv2D(filters=filters,
                   kernel_size=5,
                   padding='same',
                   kernel_regularizer=regularizers.L2(strength=strength)))

        if bn:
            model.add(BatchNorm())

        model.add(Activation('relu'))

    # Conv layers
    reg = 1e-5
    ConvBNReLU(32, reg)
    model.add(Dropout(0.5))
    ConvBNReLU(32, reg)
    model.add(MaxPool2D(2, 2, 'same'))

    ConvBNReLU(64, reg)
    model.add(Dropout(0.5))
    ConvBNReLU(64, reg)
    model.add(MaxPool2D(2, 2, 'same'))

    ConvBNReLU(128, reg)

    # FC layers
    model.add(Flatten())
    model.add(Linear(256))
    # model.add(BatchNorm())
    model.add(Activation('relu'))

    model.add(Linear(256))
    # model.add(BatchNorm())
    model.add(Activation('relu'))

    model.add(Linear(config.y_dim))

    # Build model
    model.build(optimizer=tf.train.AdamOptimizer(learning_rate=1e-4))

    return model
Ejemplo n.º 15
0
def multinput_mlp(th):
    assert isinstance(th, Config)
    model = Classifier(mark=th.mark)

    def data_dim(sample_rate=16000, duration=2, n_mfcc=50):
        audio_length = sample_rate * duration
        dim = (n_mfcc, 1 + int(np.floor(audio_length / 512)), 1)
        return dim

    dim = data_dim()

    # Add hidden layers
    subnet = model.add(inter_type=model.CONCAT)

    subsubnet = subnet.add()
    subsubnet.add(Input(sample_shape=[32000, 1]))
    subsubnet.add(Linear(output_dim=512))
    subsubnet.add(Activation('relu'))
    subsubnet.add(Linear(output_dim=256))
    subsubnet.add(Activation('relu'))

    subsubnet = subnet.add()
    subsubnet.add(Input(sample_shape=[dim[0], dim[1], 1], name='mfcc'))
    subsubnet.add(Conv2D(32, (4, 10), padding='same'))
    subsubnet.add(BatchNorm())
    subsubnet.add(Activation('relu'))
    subsubnet.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
    subsubnet.add(Dropout(0.8))

    # subsubnet.add(Conv2D(32, (4, 10), padding='same'))
    # subsubnet.add(BatchNorm())
    # subsubnet.add(Activation('relu'))
    # subsubnet.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
    # subsubnet.add(Dropout(0.8))

    subsubnet.add(Conv2D(32, (4, 10), padding='same'))
    subsubnet.add(BatchNorm())
    subsubnet.add(Activation('relu'))
    subsubnet.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
    subsubnet.add(Dropout(0.7))

    subsubnet.add(Flatten())

    model.add(Linear(output_dim=128))
    model.add(BatchNorm())
    model.add(Activation('relu'))

    model.add(Linear(output_dim=64))
    model.add(BatchNorm())
    model.add(Activation('relu'))

    model.add(Linear(output_dim=64))
    model.add(BatchNorm())
    model.add(Activation('relu'))

    # Add output layer
    model.add(Linear(output_dim=41))
    model.add(Activation('softmax'))

    # Build model
    optimizer = tf.train.AdamOptimizer(learning_rate=th.learning_rate)
    model.build(optimizer=optimizer)

    return model
Ejemplo n.º 16
0
def res_00(th):
    assert isinstance(th, Config)
    model = Classifier(mark=th.mark)

    def data_dim(sample_rate=16000, duration=2, n_mfcc=50):
        audio_length = sample_rate * duration
        dim = (n_mfcc, 1 + int(np.floor(audio_length / 512)), 1)
        return dim

    dim = data_dim()

    # Add hidden layers
    subnet = model.add(inter_type=model.CONCAT)
    # the net to process raw data
    subsubnet = subnet.add()
    # subsubnet.add(Input(sample_shape=[32000, 1], name='raw_data'))
    subsubnet.add(Input(sample_shape=[32000, 1]))
    subsubnet.add(Conv1D(filters=16, kernel_size=9, padding='valid'))
    subsubnet.add(BatchNorm())
    subsubnet.add(Activation('relu'))
    subsubnet.add(Conv1D(filters=16, kernel_size=9, padding='valid'))
    subsubnet.add(BatchNorm())
    subsubnet.add(Activation('relu'))
    subsubnet.add(MaxPool1D(pool_size=16, strides=16))
    subsubnet.add(Dropout(th.raw_keep_prob))

    subsubnet.add(Conv1D(filters=32, kernel_size=3, padding='valid'))
    subsubnet.add(Activation('relu'))
    subsubnet.add(Conv1D(filters=32, kernel_size=3, padding='valid'))
    subsubnet.add(Activation('relu'))
    subsubnet.add(MaxPool1D(pool_size=4, strides=4))
    subsubnet.add(Dropout(th.raw_keep_prob))

    subsubnet.add(Conv1D(filters=32, kernel_size=3, padding='valid'))
    subsubnet.add(Activation('relu'))
    subsubnet.add(Conv1D(filters=32, kernel_size=3, padding='valid'))
    subsubnet.add(Activation('relu'))
    subsubnet.add(MaxPool1D(pool_size=4, strides=4))

    subsubnet.add(Conv1D(filters=256, kernel_size=3, padding='valid'))
    subsubnet.add(BatchNorm())
    subsubnet.add(Activation('relu'))
    subsubnet.add(Conv1D(filters=256, kernel_size=3, padding='valid'))
    subsubnet.add(BatchNorm())
    subsubnet.add(Activation('relu'))
    subsubnet.add(GlobalMaxPooling1D())

    # the net to process mfcc features
    subsubnet = subnet.add()
    subsubnet.add(Input(sample_shape=[dim[0], dim[1], 1], name='mfcc'))
    subsubnet.add(Conv2D(32, (4, 10), padding='same'))
    subsubnet.add(BatchNorm())
    subsubnet.add(Activation('relu'))
    subsubnet.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
    subsubnet.add(Dropout(th.mfcc_keep_prob))

    net = subsubnet.add(ResidualNet())
    net.add(Conv2D(32, (4, 10), padding='same'))
    net.add(BatchNorm())
    net.add_shortcut()
    subsubnet.add(Activation('relu'))
    subsubnet.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
    subsubnet.add(Dropout(th.mfcc_keep_prob))
    #
    net = subsubnet.add(ResidualNet())
    net.add(Conv2D(32, (4, 10), padding='same'))
    net.add(BatchNorm())
    net.add_shortcut()
    subsubnet.add(Activation('relu'))
    subsubnet.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
    subsubnet.add(Dropout(th.mfcc_keep_prob))

    net = subsubnet.add(ResidualNet())
    net.add(Conv2D(32, (4, 10), padding='same'))
    net.add(BatchNorm())
    net.add_shortcut()
    subsubnet.add(Activation('relu'))
    subsubnet.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
    subsubnet.add(Dropout(th.mfcc_keep_prob))

    net = subsubnet.add(ResidualNet())
    net.add(Conv2D(32, (4, 10), padding='same'))
    net.add(BatchNorm())
    net.add_shortcut()
    subsubnet.add(Activation('relu'))
    subsubnet.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
    subsubnet.add(Dropout(th.mfcc_keep_prob))
    subsubnet.add(Flatten())

    subsubnet.add(Dropout(th.concat_keep_prob))
    model.add(Linear(output_dim=128))
    model.add(BatchNorm())
    model.add(Activation('relu'))
    #
    model.add(Linear(output_dim=64))
    model.add(BatchNorm())
    model.add(Activation('relu'))

    # Add output layer
    model.add(Linear(output_dim=41))
    model.add(Activation('softmax'))

    # Build model
    optimizer = tf.train.AdamOptimizer(learning_rate=th.learning_rate)
    model.build(optimizer=optimizer)

    return model
Ejemplo n.º 17
0
def conv_test(th):
    assert isinstance(th, Config)
    # Initiate model
    th.mark = 'cnn' + th.mark
    model = Classifier(mark=th.mark)

    # Add input layer
    model.add(Input(sample_shape=[32000, 1]))
    # Add hidden layers
    model.add(Conv1D(filters=16, kernel_size=9, padding='valid'))
    model.add(Activation('relu'))
    model.add(Conv1D(filters=16, kernel_size=9, padding='valid'))
    model.add(Activation('relu'))
    model.add(MaxPool1D(pool_size=16, strides=16))
    # model.add(Dropout(0.9))

    model.add(Conv1D(filters=32, kernel_size=3, padding='valid'))
    model.add(Activation('relu'))
    model.add(Conv1D(filters=32, kernel_size=3, padding='valid'))
    model.add(Activation('relu'))
    model.add(MaxPool1D(pool_size=4, strides=4))
    # model.add(Dropout(0.9))

    model.add(Conv1D(filters=32, kernel_size=3, padding='valid'))
    model.add(Activation('relu'))
    model.add(Conv1D(filters=32, kernel_size=3, padding='valid'))
    model.add(Activation('relu'))
    model.add(MaxPool1D(pool_size=4, strides=4))
    # model.add(Dropout(0.9))
    #
    model.add(Conv1D(filters=256, kernel_size=3, padding='valid'))
    model.add(Activation('relu'))
    model.add(Conv1D(filters=256, kernel_size=3, padding='valid'))
    model.add(Activation('relu'))
    model.add(GlobalMaxPooling1D())
    # model.add(Dropout(0.8))
    #
    model.add(Linear(output_dim=64))
    model.add(Activation('relu'))
    model.add(Linear(output_dim=1028))
    model.add(Activation('relu'))

    # Add output layer
    model.add(Linear(output_dim=41))
    model.add(Activation('softmax'))

    # Build model
    optimizer = tf.train.AdamOptimizer(learning_rate=th.learning_rate)
    model.build(optimizer=optimizer)

    return model
Ejemplo n.º 18
0
def mlp(th):
    assert isinstance(th, Config)
    # Initiate a model
    model = Classifier(mark=th.mark)

    # Add input layer
    model.add(Input(sample_shape=th.input_shape))
    model.add(Flatten())
    # Add hidden layers
    assert isinstance(th.fc_dims, list)
    for dim in th.fc_dims:
        model.add(Linear(output_dim=dim))
        model.add(BatchNormalization())
        model.add(Activation(th.actype1))

    # Add output layer
    model.add(Linear(output_dim=th.num_classes))

    # Build model
    optimizer = tf.train.AdamOptimizer(learning_rate=th.learning_rate)
    model.build(optimizer=optimizer)

    return model
Ejemplo n.º 19
0
def mlp(th):
    assert isinstance(th, Config)
    # Initiate a model
    model = Classifier(mark=th.mark)

    # Add input layer
    model.add(Input([32000]))
    # Add hidden layers
    for _ in range(th.num_blocks):
        model.add(Linear(output_dim=th.hidden_dim))
        model.add(BatchNorm())
        # model.add(BatchNormalization())
        model.add(Activation(th.actype1))
        # model.add(Dropout(0.9))
    # Add output layer
    model.add(Linear(output_dim=th.num_classes))
    model.add(Activation('softmax'))

    # Build model
    optimizer = tf.train.AdamOptimizer(learning_rate=th.learning_rate)
    model.build(optimizer=optimizer)

    return model