Example #1
0
def vanilla(mark, bn=False):
    z_dim = 100
    model = VAE(z_dim=z_dim,
                mark=mark,
                classes=0,
                sample_shape=[784],
                output_shape=[28, 28, 1])

    # Define encoder
    model.Q.add(Linear(output_dim=128))
    model.Q.add(Activation.ReLU())

    fork = Fork(name='mu_sigma')
    fork.add('mu', Linear(output_dim=z_dim))
    fork.add('sigma', Linear(output_dim=z_dim))

    model.Q.add(fork)

    # Define decoder
    model.P.add(Linear(output_dim=128))
    model.P.add(Activation.ReLU())
    model.P.add(Linear(output_dim=784))
    model.P.add(Activation('sigmoid'))

    # Build model
    model.build()

    return model
Example #2
0
def test_00(memory, learning_rate=0.001):
    # Configurations
    mark = 'test'
    D = memory

    # Initiate model
    model = NeuralNet(memory, mark=mark)
    nn = model.nn
    assert isinstance(nn, Predictor)

    # Add layers
    nn.add(Input([memory]))

    nn.add(Linear(output_dim=2 * D))
    nn.add(Activation('relu'))
    nn.add(Linear(output_dim=2 * D))
    nn.add(Activation('relu'))
    nn.add(Linear(output_dim=2 * D))
    nn.add(Polynomial(order=3))
    nn.add(Linear(output_dim=1))

    # Build model
    model.default_build(learning_rate)

    # Return model
    return model
Example #3
0
 def add_res_block():
     net = nn.add(ResidualNet())
     net.add(Linear(output_dim=D))
     net.add(Activation(activation))
     net.add(Linear(output_dim=D))
     net.add_shortcut()
     net.add(Activation(activation))
Example #4
0
def vn_00(memory_depth, mark, degree=None, homo_str=0.0):
    D = memory_depth
    hidden_dims = [[40] * 4, [40] * 5]

    if degree is None: degree = len(hidden_dims) + 1
    elif degree < 1: raise ValueError('!! Degree must be greater than 1')

    activation = lambda: Activation('relu')
    learning_rate = 0.001
    reg = 0.00
    reg = None

    # Initiate model
    model = NeuralNet(D, mark, degree=degree)

    for order in range(2, degree + 1):
        dims = hidden_dims[order - 2]
        for dim in dims:
            model.nn.T[order].add(
                Linear(dim, weight_regularizer='l2', strength=reg))
            model.nn.T[order].add(activation())
        model.nn.T[order].add(Linear(1, weight_regularizer='l2', strength=reg))

    # Build model
    model.nn.build(loss='euclid',
                   metric='ratio',
                   metric_name='Err%',
                   homo_strength=homo_str,
                   optimizer=tf.train.AdamOptimizer(learning_rate))
    return model


# endregion : Volterra Networks
Example #5
0
def mlp_00(memory_depth, mark):
    D = memory_depth
    hidden_dims = [10, 10, 10]

    activation = lambda: Activation('relu')
    learning_rate = 0.001
    reg = 0.00

    # Initiate model
    model = NeuralNet(memory_depth, mark)
    model.nn.add(Input([D]))

    for dim in hidden_dims:
        model.nn.add(
            Linear(output_dim=dim, weight_regularizer='l2', strength=reg))
        model.nn.add(activation())

    model.nn.add(Linear(output_dim=1, weight_regularizer='l2', strength=reg))

    # Build model
    model.nn.build(loss='euclid',
                   metric='ratio',
                   metric_name='Err%',
                   optimizer=tf.train.AdamOptimizer(learning_rate))

    return model
Example #6
0
def fc_lstm(th):
  assert isinstance(th, Config)
  th.mark = 'fc_lstm_' + th.mark
  # Initiate a model
  model = Classifier(mark=th.mark, net_type=Recurrent)

  # Add input layer
  model.add(Input(sample_shape=th.input_shape))

  # Add fc layers
  for dim in th.fc_dims:
    checker.check_positive_integer(dim)
    model.add(Linear(output_dim=dim))
    # model.add(BatchNorm())
    model.add(Activation('relu'))

  # Add lstm cells
  for dim in th.rc_dims:
    model.add(BasicLSTMCell(state_size=dim))

  # Add output layer
  # model.add(Linear(output_dim=th.output_dim))

  # Build model
  optimizer = tf.train.AdamOptimizer(th.learning_rate)
  model.build(optimizer)

  return model
Example #7
0
def bres_net_res0(th, activation='relu'):
    assert isinstance(th, NlsHub)
    # Initiate a neural net model
    th.mark = '{}-{}'.format(th.mark, 'res')
    model = NeuralNet(th.memory_depth, mark=th.mark, nn_class=BResNet)
    nn = model.nn
    assert isinstance(nn, BResNet)
    nn.strict_residual = False

    # Add layers
    nn.add(Input([th.memory_depth]))
    nn.add(Linear(output_dim=th.hidden_dim))
    nn.add(Activation(activation))
    branch = nn.add_branch()
    branch.add(Linear(output_dim=1))

    def add_res_block():
        net = nn.add(ResidualNet())
        net.add(Linear(output_dim=th.hidden_dim))
        net.add(Activation(activation))
        net.add_shortcut()
        branch = nn.add_branch()
        branch.add(Linear(output_dim=1))

    for _ in range(th.num_blocks - 1):
        add_res_block()
    nn.add(Linear(output_dim=1))

    # Build
    model.default_build(th.learning_rate)

    return model
Example #8
0
def mlp_00(mark,
           memory_depth,
           layer_dim,
           layer_num,
           learning_rate,
           activation='relu'):
    # Configurations
    pass

    # Initiate a predictor
    model = NeuralNet(memory_depth, mark=mark)
    nn = model.nn
    assert isinstance(nn, Predictor)

    # Add layers
    nn.add(Input([memory_depth]))
    for i in range(layer_num):
        nn.add(Linear(output_dim=layer_dim))
        nn.add(Activation(activation))
    nn.add(Linear(output_dim=1))

    # Build model
    model.default_build(learning_rate)

    # Return model
    return model
Example #9
0
def res_00(th, activation='relu'):
    assert isinstance(th, NlsHub)
    model = NeuralNet(th.memory_depth, mark=th.mark, nn_class=Predictor)
    nn = model.nn
    assert isinstance(nn, Predictor)

    # Add blocks
    nn.add(Input([th.memory_depth]))
    nn.add(
        Linear(output_dim=th.hidden_dim,
               weight_regularizer=th.regularizer,
               strength=th.reg_strength))
    nn.add(Activation(activation))

    def add_res_block():
        net = nn.add(ResidualNet())
        net.add(
            Linear(output_dim=th.hidden_dim,
                   weight_regularizer=th.regularizer,
                   strength=th.reg_strength))
        net.add(Activation(activation))
        net.add_shortcut()

    for _ in range(th.num_blocks):
        add_res_block()
    nn.add(Linear(output_dim=1))

    # Build model
    model.default_build(th.learning_rate)

    # Return model
    return model
Example #10
0
def _output_and_build(model, th):
    assert isinstance(model, Classifier)
    assert isinstance(th, Config)

    if th.optimizer == 'adam':
        optimizer = tf.train.AdamOptimizer(th.learning_rate)
    elif th.optimizer == 'nesterov':
        th.momentum = 0.9
        optimizer = tf.train.MomentumOptimizer(th.learning_rate,
                                               th.momentum,
                                               use_nesterov=True)
    elif th.optimizer == 'sgd':
        optimizer = tf.train.GradientDescentOptimizer(th.learning_rate)
    else:
        raise ValueError('!! In this task, th.optimizer must be a string')

    # Add output layer
    model.add(Linear(output_dim=th.output_dim))
    model.add(Activation('softmax'))

    model.build(optimizer=th.get_optimizer(optimizer),
                metric=['loss', 'seq_acc'],
                batch_metric='seq_acc',
                eval_metric='seq_acc',
                last_only=True)
Example #11
0
def init_vn(mark, homo_str):
  D = NN_MEM_DEPTH
  hidden_dims = HIDDEN_DIMS

  degree = NN_DEGREE
  if degree is None: degree = len(hidden_dims) + 1
  elif degree < 1: raise ValueError('!! Degree must be greater than 1')

  activation = lambda: Activation('relu')
  learning_rate = 0.001
  reg = None

  # Initiate model
  model = NeuralNet(D, mark, degree=degree, orders=NN_ORDERS)

  for order in range(NN_MAX_VOL_ORD + 1, degree + 1):
    if order not in NN_ORDERS: continue
    dims = hidden_dims[order - NN_MAX_VOL_ORD - 1]
    for dim in dims:
      model.nn.add(order, Linear(dim, weight_regularizer='l2', strength=reg))
      model.nn.add(order, activation())
    model.nn.add(order, Linear(1, weight_regularizer='l2', strength=reg))

  # Build model
  model.nn.build(loss='euclid', metric='ratio', metric_name='Err %',
                 homo_strength=homo_str,
                 optimizer=tf.train.AdamOptimizer(learning_rate))
  return model
Example #12
0
 def add_res_block():
     net = nn.add(ResidualNet())
     net.add(Linear(output_dim=th.hidden_dim))
     net.add(Activation(activation))
     net.add_shortcut()
     branch = nn.add_branch()
     branch.add(Linear(output_dim=1))
Example #13
0
def multinput(th):
    assert isinstance(th, Config)
    model = Classifier(mark=th.mark)

    # Add hidden layers
    assert isinstance(th.fc_dims, list)
    subnet = model.add(inter_type=model.CONCAT)
    for dims in th.fc_dims:
        subsubnet = subnet.add()
        # Add input layer
        subsubnet.add(Input(sample_shape=th.input_shape))
        subsubnet.add(Flatten())
        assert isinstance(dims, list)

        for dim in dims:
            subsubnet.add(Linear(output_dim=dim))
            if core.use_bn: subsubnet.add(BatchNormalization())
            subsubnet.add(Activation(th.actype1))

    # Add output layer
    model.add(Linear(output_dim=th.num_classes))

    # Build model
    optimizer = tf.train.AdamOptimizer(learning_rate=th.learning_rate)
    model.build(optimizer=optimizer)

    return model
Example #14
0
def multinput(th):
    assert isinstance(th, Config)
    model = Classifier(mark=th.mark)

    # Add hidden layers
    assert isinstance(th.fc_dims, list)
    subnet = model.add(inter_type=model.CONCAT)
    for dims in th.fc_dims:
        subsubnet = subnet.add()
        # Add input layer
        subsubnet.add(Input(sample_shape=th.input_shape))
        subsubnet.add(Flatten())
        assert isinstance(dims, list)

        for dim in dims:
            subsubnet.add(Linear(output_dim=dim))
            # if cf10_core.use_bn: subsubnet.add(BatchNormalization())
            subsubnet.add(Activation(th.actype1))

    # Add output layer
    model.add(Linear(output_dim=th.num_classes))

    # Build model
    model.build(metric=['accuracy', 'loss'],
                batch_metric='accuracy',
                eval_metric='accuracy')

    return model
Example #15
0
def mlp_res00(mark,
              memory_depth,
              branch_num,
              hidden_dim,
              learning_rate,
              activation,
              identity_init=True):
    # Initiate a neural net
    if identity_init:
        model = NeuralNet(memory_depth,
                          mark=mark,
                          bamboo=True,
                          identity_initial=True)
    else:
        model = NeuralNet(memory_depth,
                          mark=mark,
                          bamboo=True,
                          identity_initial=False)

    nn = model.nn
    assert isinstance(nn, Bamboo)

    # Add layers
    nn.add(Input([memory_depth]))

    for _ in range(branch_num):
        nn.add(Linear(output_dim=hidden_dim))
        nn.add(Activation(activation))
        branch = nn.add_branch()
        branch.add(Linear(output_dim=1))

    resnet = nn.add(ResidualNet())
    resnet.add(Linear(output_dim=hidden_dim))
    resnet.add(Activation(activation))
    resnet.add(Linear(output_dim=hidden_dim))
    resnet.add_shortcut()
    resnet.add(Activation(activation))

    nn.add(Linear(output_dim=hidden_dim))
    nn.add(Activation(activation))
    nn.add(Linear(output_dim=1))

    # Build model
    model.default_build(learning_rate)

    # Return model
    return model
Example #16
0
def deep_conv(mark):
    # Initiate predictor
    model = Classifier(mark=mark)
    model.add(Input(sample_shape=config.sample_shape))

    def ConvBNReLU(filters, strength=1.0, bn=True):
        model.add(
            Conv2D(filters=filters,
                   kernel_size=5,
                   padding='same',
                   kernel_regularizer=regularizers.L2(strength=strength)))

        if bn:
            model.add(BatchNorm())

        model.add(Activation('relu'))

    # Conv layers
    reg = 1e-5
    ConvBNReLU(32, reg)
    model.add(Dropout(0.5))
    ConvBNReLU(32, reg)
    model.add(MaxPool2D(2, 2, 'same'))

    ConvBNReLU(64, reg)
    model.add(Dropout(0.5))
    ConvBNReLU(64, reg)
    model.add(MaxPool2D(2, 2, 'same'))

    ConvBNReLU(128, reg)

    # FC layers
    model.add(Flatten())
    model.add(Linear(256))
    # model.add(BatchNorm())
    model.add(Activation('relu'))

    model.add(Linear(256))
    # model.add(BatchNorm())
    model.add(Activation('relu'))

    model.add(Linear(config.y_dim))

    # Build model
    model.build(optimizer=tf.train.AdamOptimizer(learning_rate=1e-4))

    return model
Example #17
0
def mlp02(mark,
          memory_depth,
          branch_num,
          hidden_dim,
          learning_rate,
          activation,
          identity_init=False):
    # Initiate a neural net
    if identity_init:
        model = NeuralNet(memory_depth,
                          mark=mark,
                          bamboo_braod=True,
                          identity_initial=True)
    else:
        model = NeuralNet(memory_depth,
                          mark=mark,
                          bamboo_broad=True,
                          identity_initial=False)

    nn = model.nn
    assert isinstance(nn, Bamboo_Broad)

    # Add layers
    nn.add(Input([memory_depth]))

    branch = nn.add_branch()
    branch.add(Linear(output_dim=hidden_dim))
    branch.add(Activation(activation))
    branch.add(Linear(output_dim=1))

    for _ in range(branch_num - 1):
        branch = nn.add_branch()
        branch.add(
            Linear(output_dim=hidden_dim,
                   weight_initializer=tf.zeros_initializer(),
                   bias_initializer=tf.zeros_initializer()))
        branch.add(Activation(activation))
        branch.add(
            Linear(output_dim=1,
                   weight_initializer=tf.zeros_initializer(),
                   bias_initializer=tf.zeros_initializer()))

    # Build model
    model.default_build(learning_rate)

    # Return model
    return model
Example #18
0
def _add_fc_relu_layers(nn, hidden_dims, activation='relu', strength=0.0):
    assert isinstance(nn, Predictor)
    assert isinstance(hidden_dims, (tuple, list))

    for dim in hidden_dims:
        nn.add(
            Linear(output_dim=dim, weight_regularizer='l2', strength=strength))
        nn.add(Activation(activation))
Example #19
0
 def add_res_block():
     net = nn.add(ResidualNet())
     net.add(
         Linear(output_dim=th.hidden_dim,
                weight_regularizer=th.regularizer,
                strength=th.reg_strength))
     net.add(Activation(activation))
     net.add_shortcut()
Example #20
0
 def conv_bn_relu(filters, twod=True, bn=True):
     if twod:
         subsubnet.add(
             Conv2D(filters=filters, kernel_size=(4, 10), padding='same'))
     else:
         subsubnet.add(
             Conv1D(filters=filters, kernel_size=9, padding='valid'))
     if bn: subsubnet.add(BatchNorm())
     subsubnet.add(Activation('relu'))
Example #21
0
 def ConvLayer(filters, bn=False):
     model.add(
         Conv2D(filters=filters,
                kernel_size=5,
                padding='same',
                kernel_regularizer=regularizers.L2(strength=strength)))
     if bn:
         model.add(BatchNorm())
     model.add(Activation.ReLU())
Example #22
0
def finalize(th, model, add_output_layer=True):
  assert isinstance(th, Config) and isinstance(model, Classifier)
  # Add output layer
  if add_output_layer:
    model.add(Dense(num_neurons=th.num_classes))
    model.add(Activation('softmax'))
  # Build model
  model.build(th.get_optimizer(), metric=['accuracy', 'loss'],
              batch_metric='accuracy', eval_metric='accuracy')
  return model
Example #23
0
def output_and_build(model, th):
    assert isinstance(model, Classifier) and isinstance(th, Config)
    # Add output dropout if necessary
    if th.output_dropout > 0: model.add(Dropout(1 - th.output_dropout))
    # Add output layer
    model.add(Dense(num_neurons=th.output_dim))
    model.add(Activation('softmax'))

    # Build model
    model.build(loss=th.loss_string, metric=['loss', 'f1'], batch_metric='f1')
Example #24
0
def output_and_build(model, th):
    assert isinstance(model, Classifier)
    assert isinstance(th, Config)
    # Add output layer
    model.add(Dense(num_neurons=th.output_dim))
    model.add(Activation('softmax'))

    model.build(metric='gen_acc',
                batch_metric='gen_acc',
                val_targets='val_targets')
Example #25
0
def output_and_build(model, th):
    assert isinstance(model, Classifier)
    assert isinstance(th, Config)
    # Add dropout if necessary
    if th.output_dropout > 0: model.add(Dropout(1 - th.output_dropout))
    # Add output layer
    model.add(Dense(num_neurons=th.output_dim))
    model.add(Activation('softmax'))

    model.build(loss='cross_entropy', metric='bpc', batch_metric='bpc')
Example #26
0
    def ConvBNReLU(filters, strength=1.0, bn=True):
        model.add(
            Conv2D(filters=filters,
                   kernel_size=5,
                   padding='same',
                   kernel_regularizer=regularizers.L2(strength=strength)))

        if bn:
            model.add(BatchNorm())

        model.add(Activation('relu'))
Example #27
0
def mlp00(mark):
    # Define model
    model = TDPlayer(mark=mark)

    model.add(Input(sample_shape=[15, 15]))
    model.add(Flatten())

    model.add(Linear(225))
    model.add(Activation.ReLU())

    model.add(Linear(225))
    model.add(Activation.ReLU())

    model.add(Linear(1))
    model.add(Activation('sigmoid'))

    # Build model
    model.build()

    return model
Example #28
0
def finalize(th, model):
    assert isinstance(th, Config) and isinstance(model, Classifier)
    # Add output layer
    model.add(Dense(num_neurons=th.num_classes, prune_frac=0.05))
    # model.add(Dense(num_neurons=th.num_classes))
    model.add(Activation('softmax'))
    # Build model
    model.build(metric=['accuracy', 'loss'],
                batch_metric='accuracy',
                eval_metric='accuracy')
    return model
Example #29
0
def output_and_build(model, th):
  assert isinstance(model, Classifier)
  assert isinstance(th, Config)
  # Add output layer
  model.add(Linear(
    output_dim=th.output_dim,
    use_bias=th.bias_out_units,
  ))
  model.add(Activation('softmax'))

  model.build(metric='accuracy', batch_metric='accuracy', last_only=True)
Example #30
0
def output_and_build(model, th):
    assert isinstance(model, Classifier)
    assert isinstance(th, Config)
    # Add dropout if necessary
    if th.output_dropout > 0: model.add(Dropout(1 - th.output_dropout))
    # Add output layer
    model.add(Dense(num_neurons=th.output_dim))
    model.add(Activation('softmax'))

    model.build(last_only=True,
                metric=['accuracy', 'loss'],
                batch_metric='accuracy',
                eval_metric='accuracy')