Esempio n. 1
0
    def __init__(self,
                 z_dim=None,
                 sample_shape=None,
                 output_shape=None,
                 mark=None,
                 classes=0):
        # Call parent's constructor
        Model.__init__(self, mark)

        self._targets = None
        self._conditional = classes > 0
        self._classes = classes
        if self._conditional:
            with self._graph.as_default():
                self._targets = tf.placeholder(dtype=tf.float32,
                                               shape=[None, classes],
                                               name='one_hot_labels')

        # Define generator and discriminator
        self.Generator = Net(pedia.Generator)
        self.Discriminator = Net(pedia.Discriminator)
        # Alias
        self.G = self.Generator
        self.D = self.Discriminator

        # If z_dim/sample_shape is provided, define the input for
        #   generator/discriminator accordingly
        if z_dim is not None:
            self.G.add(Input(sample_shape=[None, z_dim], name='z'))
        if sample_shape is not None:
            if (not isinstance(sample_shape, list)
                    and not isinstance(sample_shape, tuple)):
                raise TypeError('sample shape must be a list or a tuple')
            self.D.add(
                Input(sample_shape=[None] + list(sample_shape),
                      name='samples'))

        self._z_dim = z_dim
        self._sample_shape = sample_shape
        self._output_shape = output_shape
        self._sample_num = None

        # Private tensors and ops
        self._G, self._outputs = None, None
        self._Dr, self._Df = None, None
        self._logits_Dr, self._logits_Df = None, None
        self._loss_G, self._loss_D = None, None
        self._loss_Dr, self._loss_Df = None, None
        self._train_step_G, self._train_step_D = None, None
        self._merged_summary_G, self._merged_summary_D = None, None
Esempio n. 2
0
def mlp_00(learning_rate=0.001, memory_depth=80):
    """
  Performance on WH:
    [0] depth = 80
  """
    # Configuration
    hidden_dims = [2 * memory_depth] * 4
    strength = 0
    activation = 'lrelu'

    mark = 'mlp_D{}_{}_{}'.format(memory_depth, hidden_dims, activation)

    # Initiate a predictor
    model = NeuralNet(memory_depth, mark=mark)
    nn = model.nn
    assert isinstance(nn, Predictor)

    # Add layers
    nn.add(Input([memory_depth]))
    lc._add_fc_relu_layers(nn, hidden_dims, activation, strength=strength)
    nn.add(Linear(output_dim=1, weight_regularizer='l2', strength=strength))

    # Build model
    # optimizer = tf.train.GradientDescentOptimizer(learning_rate)
    optimizer = tf.train.AdamOptimizer(learning_rate)
    nn.build(loss='euclid',
             metric='rms_ratio',
             metric_name='RMS(err)%',
             optimizer=optimizer)

    # Return model
    return model
Esempio n. 3
0
def svn_01(memory_depth, mark, hidden_dim, order1, learning_rate=0.001):

    strength = 0
    # Initiate a predictor
    model = NeuralNet(memory_depth, mark=mark)
    nn = model.nn
    assert isinstance(nn, Predictor)

    # Add layers
    nn.add(Input([memory_depth]))
    nn.add(Linear(output_dim=hidden_dim))
    nn.add(Polynomial(order=order1))
    nn.add(
        Linear(output_dim=1,
               weight_regularizer='l2',
               strength=strength,
               use_bias=False))

    # Build model
    nn.build(loss='euclid',
             metric='rms_ratio',
             metric_name='RMS(err)%',
             optimizer=tf.train.AdamOptimizer(learning_rate))

    # Return model
    return model
Esempio n. 4
0
def mlp_00(memory_depth, mark):
    D = memory_depth
    hidden_dims = [10, 10, 10]

    activation = lambda: Activation('relu')
    learning_rate = 0.001
    reg = 0.00

    # Initiate model
    model = NeuralNet(memory_depth, mark)
    model.nn.add(Input([D]))

    for dim in hidden_dims:
        model.nn.add(
            Linear(output_dim=dim, weight_regularizer='l2', strength=reg))
        model.nn.add(activation())

    model.nn.add(Linear(output_dim=1, weight_regularizer='l2', strength=reg))

    # Build model
    model.nn.build(loss='euclid',
                   metric='ratio',
                   metric_name='Err%',
                   optimizer=tf.train.AdamOptimizer(learning_rate))

    return model
Esempio n. 5
0
def mlp_00(mark,
           memory_depth,
           layer_dim,
           layer_num,
           learning_rate,
           activation='relu'):
    # Configurations
    pass

    # Initiate a predictor
    model = NeuralNet(memory_depth, mark=mark)
    nn = model.nn
    assert isinstance(nn, Predictor)

    # Add layers
    nn.add(Input([memory_depth]))
    for i in range(layer_num):
        nn.add(Linear(output_dim=layer_dim))
        nn.add(Activation(activation))
    nn.add(Linear(output_dim=1))

    # Build model
    model.default_build(learning_rate)

    # Return model
    return model
Esempio n. 6
0
def test_00(memory, learning_rate=0.001):
    # Configurations
    mark = 'test'
    D = memory

    # Initiate model
    model = NeuralNet(memory, mark=mark)
    nn = model.nn
    assert isinstance(nn, Predictor)

    # Add layers
    nn.add(Input([memory]))

    nn.add(Linear(output_dim=2 * D))
    nn.add(Activation('relu'))
    nn.add(Linear(output_dim=2 * D))
    nn.add(Activation('relu'))
    nn.add(Linear(output_dim=2 * D))
    nn.add(Polynomial(order=3))
    nn.add(Linear(output_dim=1))

    # Build model
    model.default_build(learning_rate)

    # Return model
    return model
Esempio n. 7
0
    def __init__(self,
                 degree,
                 depth,
                 mark=None,
                 max_volterra_order=3,
                 **kwargs):
        # Check parameters
        if degree < 1: raise ValueError('!! Degree must be a positive integer')
        if depth < 0: raise ValueError('!! Depth must be a positive integer')

        # Call parent's constructor
        Model.__init__(self, mark)

        # Initialize fields
        self.degree = degree
        self.depth = depth
        self._max_volterra_order = min(max_volterra_order, degree)
        self.T = {}
        self._input = Input([depth], name='input')
        self._output = None
        self._target = None
        self._alpha = 1.1
        self._outputs = {}

        # Initialize operators in each degree
        orders = kwargs.get('orders', None)
        if orders is None: orders = list(range(1, self.degree + 1))
        self.orders = orders
        self._init_T()
Esempio n. 8
0
def res_00(th, activation='relu'):
    assert isinstance(th, NlsHub)
    model = NeuralNet(th.memory_depth, mark=th.mark, nn_class=Predictor)
    nn = model.nn
    assert isinstance(nn, Predictor)

    # Add blocks
    nn.add(Input([th.memory_depth]))
    nn.add(
        Linear(output_dim=th.hidden_dim,
               weight_regularizer=th.regularizer,
               strength=th.reg_strength))
    nn.add(Activation(activation))

    def add_res_block():
        net = nn.add(ResidualNet())
        net.add(
            Linear(output_dim=th.hidden_dim,
                   weight_regularizer=th.regularizer,
                   strength=th.reg_strength))
        net.add(Activation(activation))
        net.add_shortcut()

    for _ in range(th.num_blocks):
        add_res_block()
    nn.add(Linear(output_dim=1))

    # Build model
    model.default_build(th.learning_rate)

    # Return model
    return model
Esempio n. 9
0
def bres_net_res0(th, activation='relu'):
    assert isinstance(th, NlsHub)
    # Initiate a neural net model
    th.mark = '{}-{}'.format(th.mark, 'res')
    model = NeuralNet(th.memory_depth, mark=th.mark, nn_class=BResNet)
    nn = model.nn
    assert isinstance(nn, BResNet)
    nn.strict_residual = False

    # Add layers
    nn.add(Input([th.memory_depth]))
    nn.add(Linear(output_dim=th.hidden_dim))
    nn.add(Activation(activation))
    branch = nn.add_branch()
    branch.add(Linear(output_dim=1))

    def add_res_block():
        net = nn.add(ResidualNet())
        net.add(Linear(output_dim=th.hidden_dim))
        net.add(Activation(activation))
        net.add_shortcut()
        branch = nn.add_branch()
        branch.add(Linear(output_dim=1))

    for _ in range(th.num_blocks - 1):
        add_res_block()
    nn.add(Linear(output_dim=1))

    # Build
    model.default_build(th.learning_rate)

    return model
Esempio n. 10
0
def fc_lstm(th):
  assert isinstance(th, Config)
  th.mark = 'fc_lstm_' + th.mark
  # Initiate a model
  model = Classifier(mark=th.mark, net_type=Recurrent)

  # Add input layer
  model.add(Input(sample_shape=th.input_shape))

  # Add fc layers
  for dim in th.fc_dims:
    checker.check_positive_integer(dim)
    model.add(Linear(output_dim=dim))
    # model.add(BatchNorm())
    model.add(Activation('relu'))

  # Add lstm cells
  for dim in th.rc_dims:
    model.add(BasicLSTMCell(state_size=dim))

  # Add output layer
  # model.add(Linear(output_dim=th.output_dim))

  # Build model
  optimizer = tf.train.AdamOptimizer(th.learning_rate)
  model.build(optimizer)

  return model
Esempio n. 11
0
def multinput(th):
    assert isinstance(th, Config)
    model = Classifier(mark=th.mark)

    # Add hidden layers
    assert isinstance(th.fc_dims, list)
    subnet = model.add(inter_type=model.CONCAT)
    for dims in th.fc_dims:
        subsubnet = subnet.add()
        # Add input layer
        subsubnet.add(Input(sample_shape=th.input_shape))
        subsubnet.add(Flatten())
        assert isinstance(dims, list)

        for dim in dims:
            subsubnet.add(Linear(output_dim=dim))
            if core.use_bn: subsubnet.add(BatchNormalization())
            subsubnet.add(Activation(th.actype1))

    # Add output layer
    model.add(Linear(output_dim=th.num_classes))

    # Build model
    optimizer = tf.train.AdamOptimizer(learning_rate=th.learning_rate)
    model.build(optimizer=optimizer)

    return model
Esempio n. 12
0
def get_container(th, flatten=False):
    assert isinstance(th, Config)
    model = Classifier(mark=th.mark)
    model.add(Input(sample_shape=th.input_shape))
    if th.centralize_data: model.add(Normalize(mu=th.data_mean, sigma=255.))
    if flatten: model.add(Flatten())
    return model
Esempio n. 13
0
def multinput(th):
    assert isinstance(th, Config)
    model = Classifier(mark=th.mark)

    # Add hidden layers
    assert isinstance(th.fc_dims, list)
    subnet = model.add(inter_type=model.CONCAT)
    for dims in th.fc_dims:
        subsubnet = subnet.add()
        # Add input layer
        subsubnet.add(Input(sample_shape=th.input_shape))
        subsubnet.add(Flatten())
        assert isinstance(dims, list)

        for dim in dims:
            subsubnet.add(Linear(output_dim=dim))
            # if cf10_core.use_bn: subsubnet.add(BatchNormalization())
            subsubnet.add(Activation(th.actype1))

    # Add output layer
    model.add(Linear(output_dim=th.num_classes))

    # Build model
    model.build(metric=['accuracy', 'loss'],
                batch_metric='accuracy',
                eval_metric='accuracy')

    return model
Esempio n. 14
0
    def __init__(self,
                 z_dim=None,
                 sample_shape=None,
                 output_shape=None,
                 mark=None,
                 classes=0):
        # Call parent's constructor
        Model.__init__(self, mark)

        # Fields
        self._output_shape = output_shape

        # Define encoder and decoder
        self.Encoder = Net(pedia.Encoder)
        self.Decoder = Net(pedia.Decoder)

        self.Q = self.Encoder
        self.P = self.Decoder

        # If z_dim/sample_shape is provided, define the input for
        #   decoder/encoder accordingly
        if z_dim is not None:
            self.P.add(Input(sample_shape=[None, z_dim], name='z'))
        if sample_shape is not None:
            if (not isinstance(sample_shape, list)
                    and not isinstance(sample_shape, tuple)):
                raise TypeError('sample shape must be a list or a tuple')
            self.Q.add(
                Input(sample_shape=[None] + list(sample_shape),
                      name='samples'))

        # Placeholders
        self._sample_num = None
        self._classes = classes
        self._conditional = classes > 0
        if self._conditional:
            self._targets = tf.placeholder(dtype=tf.float32,
                                           shape=[None, classes],
                                           name='one_hot_labels')

        self._P, self._outputs = None, None

        # ...
        pass
Esempio n. 15
0
def conv_2d_test(th):
    assert isinstance(th, Config)
    # Initiate model
    th.mark = 'cnn_2d' + th.mark

    def data_dim(sample_rate=44100, duration=2, n_mfcc=40):
        audio_length = sample_rate * duration
        dim = (n_mfcc, 1 + int(np.floor(audio_length / 512)), 1)
        return dim

    dim = data_dim()

    model = Classifier(mark=th.mark)

    # Add input layer
    model.add(Input(sample_shape=[dim[0], dim[1], 1]))
    # Add hidden layers
    model.add(Conv2D(32, (4, 10), padding='same'))
    model.add(BatchNorm())
    model.add(Activation('relu'))
    model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
    # model.add(Dropout(0.7))

    model.add(Conv2D(32, (4, 10), padding='same'))
    model.add(BatchNorm())
    model.add(Activation('relu'))
    model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
    # model.add(Dropout(0.7))

    model.add(Conv2D(32, (4, 10), padding='same'))
    model.add(BatchNorm())
    model.add(Activation('relu'))
    model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
    # model.add(Dropout(0.7))

    model.add(Conv2D(32, (4, 10), padding='same'))
    model.add(BatchNorm())
    model.add(Activation('relu'))
    model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
    # model.add(Dropout(0.7))

    model.add(Flatten())
    model.add(Linear(output_dim=64))
    model.add(BatchNorm())
    model.add(Activation('relu'))

    # Add output layer
    model.add(Linear(output_dim=41))
    model.add(Activation('softmax'))

    # Build model
    optimizer = tf.train.AdamOptimizer(learning_rate=th.learning_rate)
    model.build(optimizer=optimizer)

    return model
Esempio n. 16
0
def typical(th, cell):
    assert isinstance(th, Config)
    # Initiate a model
    model = Classifier(mark=th.mark, net_type=Recurrent)
    # Add layers
    model.add(Input(sample_shape=th.input_shape))
    # Add hidden layers
    model.add(cell)
    # Build model and return
    output_and_build(model, th)
    return model
Esempio n. 17
0
def get_container(th, flatten=False):
    assert isinstance(th, Config)
    model = Classifier(mark=th.mark)
    model.add(Input(sample_shape=th.input_shape))
    model.add(Normalize(sigma=255.))
    if th.centralize_data: model.add(Normalize(mu=th.data_mean))
    if flatten:
        model.add(Flatten())
        # Register extractor and researcher
        model.register_extractor(mn_du.MNIST.connection_heat_map_extractor)
        monitor.register_grad_researcher(mn_du.MNIST.flatten_researcher)
    return model
Esempio n. 18
0
def typical(th, cells):
  assert isinstance(th, Config)
  # Initiate a model
  model = Predictor(mark=th.mark, net_type=Recurrent)
  # Add layers
  model.add(Input(sample_shape=th.input_shape))
  # Add hidden layers
  if not isinstance(cells, (list, tuple)): cells = [cells]
  for cell in cells: model.add(cell)
  # Build model and return
  output_and_build(model, th)
  return model
Esempio n. 19
0
def conv_test(th):
    assert isinstance(th, Config)
    # Initiate model
    th.mark = 'cnn' + th.mark
    model = Classifier(mark=th.mark)

    # Add input layer
    model.add(Input(sample_shape=[32000, 1]))
    # Add hidden layers
    model.add(Conv1D(filters=16, kernel_size=9, padding='valid'))
    model.add(Activation('relu'))
    model.add(Conv1D(filters=16, kernel_size=9, padding='valid'))
    model.add(Activation('relu'))
    model.add(MaxPool1D(pool_size=16, strides=16))
    # model.add(Dropout(0.9))

    model.add(Conv1D(filters=32, kernel_size=3, padding='valid'))
    model.add(Activation('relu'))
    model.add(Conv1D(filters=32, kernel_size=3, padding='valid'))
    model.add(Activation('relu'))
    model.add(MaxPool1D(pool_size=4, strides=4))
    # model.add(Dropout(0.9))

    model.add(Conv1D(filters=32, kernel_size=3, padding='valid'))
    model.add(Activation('relu'))
    model.add(Conv1D(filters=32, kernel_size=3, padding='valid'))
    model.add(Activation('relu'))
    model.add(MaxPool1D(pool_size=4, strides=4))
    # model.add(Dropout(0.9))
    #
    model.add(Conv1D(filters=256, kernel_size=3, padding='valid'))
    model.add(Activation('relu'))
    model.add(Conv1D(filters=256, kernel_size=3, padding='valid'))
    model.add(Activation('relu'))
    model.add(GlobalMaxPooling1D())
    # model.add(Dropout(0.8))
    #
    model.add(Linear(output_dim=64))
    model.add(Activation('relu'))
    model.add(Linear(output_dim=1028))
    model.add(Activation('relu'))

    # Add output layer
    model.add(Linear(output_dim=41))
    model.add(Activation('softmax'))

    # Build model
    optimizer = tf.train.AdamOptimizer(learning_rate=th.learning_rate)
    model.build(optimizer=optimizer)

    return model
Esempio n. 20
0
def vanilla_RNN(mark):
    batch_size = 3
    num_steps = 8
    model = Predictor(mark, net_type=Recurrent)

    # Add functions
    model.add(Input(sample_shape=[1]))
    model.add(BasicRNNCell(state_size=10, inner_struct='concat'))
    model.add(Linear(output_dim=1))

    # Build model
    model._build(loss='euclid', metric='ratio', metric_name='Err %')

    # Return model
    return model
Esempio n. 21
0
def res_00(memory,
           blocks,
           order1,
           order2,
           activation='relu',
           learning_rate=0.001):
    # Configurations
    mark = 'res'
    D = memory

    # Initiate model
    model = NeuralNet(memory, mark=mark)
    nn = model.nn
    assert isinstance(nn, Predictor)

    # Add layers
    nn.add(Input([D]))

    def add_res_block():
        net = nn.add(ResidualNet())
        net.add(Linear(output_dim=D))
        net.add(Activation(activation))
        net.add(Linear(output_dim=D))
        net.add_shortcut()
        net.add(Activation(activation))

    def add_res_block_poly():
        net = nn.add(ResidualNet())
        net.add(Linear(output_dim=D))
        net.add(Polynomial(order=order1))
        net.add(Linear(output_dim=D))
        net.add_shortcut()
        net.add(Polynomial(order=order2))

    if activation == 'poly':
        for _ in range(blocks):
            add_res_block_poly()
    else:
        for _ in range(blocks):
            add_res_block()

    nn.add(Linear(output_dim=1))

    # Build model
    model.default_build(learning_rate)

    # Return model
    return model
Esempio n. 22
0
def mlp02(mark,
          memory_depth,
          branch_num,
          hidden_dim,
          learning_rate,
          activation,
          identity_init=False):
    # Initiate a neural net
    if identity_init:
        model = NeuralNet(memory_depth,
                          mark=mark,
                          bamboo_braod=True,
                          identity_initial=True)
    else:
        model = NeuralNet(memory_depth,
                          mark=mark,
                          bamboo_broad=True,
                          identity_initial=False)

    nn = model.nn
    assert isinstance(nn, Bamboo_Broad)

    # Add layers
    nn.add(Input([memory_depth]))

    branch = nn.add_branch()
    branch.add(Linear(output_dim=hidden_dim))
    branch.add(Activation(activation))
    branch.add(Linear(output_dim=1))

    for _ in range(branch_num - 1):
        branch = nn.add_branch()
        branch.add(
            Linear(output_dim=hidden_dim,
                   weight_initializer=tf.zeros_initializer(),
                   bias_initializer=tf.zeros_initializer()))
        branch.add(Activation(activation))
        branch.add(
            Linear(output_dim=1,
                   weight_initializer=tf.zeros_initializer(),
                   bias_initializer=tf.zeros_initializer()))

    # Build model
    model.default_build(learning_rate)

    # Return model
    return model
Esempio n. 23
0
def deep_conv(mark):
    # Initiate predictor
    model = Classifier(mark=mark)
    model.add(Input(sample_shape=config.sample_shape))

    def ConvBNReLU(filters, strength=1.0, bn=True):
        model.add(
            Conv2D(filters=filters,
                   kernel_size=5,
                   padding='same',
                   kernel_regularizer=regularizers.L2(strength=strength)))

        if bn:
            model.add(BatchNorm())

        model.add(Activation('relu'))

    # Conv layers
    reg = 1e-5
    ConvBNReLU(32, reg)
    model.add(Dropout(0.5))
    ConvBNReLU(32, reg)
    model.add(MaxPool2D(2, 2, 'same'))

    ConvBNReLU(64, reg)
    model.add(Dropout(0.5))
    ConvBNReLU(64, reg)
    model.add(MaxPool2D(2, 2, 'same'))

    ConvBNReLU(128, reg)

    # FC layers
    model.add(Flatten())
    model.add(Linear(256))
    # model.add(BatchNorm())
    model.add(Activation('relu'))

    model.add(Linear(256))
    # model.add(BatchNorm())
    model.add(Activation('relu'))

    model.add(Linear(config.y_dim))

    # Build model
    model.build(optimizer=tf.train.AdamOptimizer(learning_rate=1e-4))

    return model
Esempio n. 24
0
def mlp_res00(mark,
              memory_depth,
              branch_num,
              hidden_dim,
              learning_rate,
              activation,
              identity_init=True):
    # Initiate a neural net
    if identity_init:
        model = NeuralNet(memory_depth,
                          mark=mark,
                          bamboo=True,
                          identity_initial=True)
    else:
        model = NeuralNet(memory_depth,
                          mark=mark,
                          bamboo=True,
                          identity_initial=False)

    nn = model.nn
    assert isinstance(nn, Bamboo)

    # Add layers
    nn.add(Input([memory_depth]))

    for _ in range(branch_num):
        nn.add(Linear(output_dim=hidden_dim))
        nn.add(Activation(activation))
        branch = nn.add_branch()
        branch.add(Linear(output_dim=1))

    resnet = nn.add(ResidualNet())
    resnet.add(Linear(output_dim=hidden_dim))
    resnet.add(Activation(activation))
    resnet.add(Linear(output_dim=hidden_dim))
    resnet.add_shortcut()
    resnet.add(Activation(activation))

    nn.add(Linear(output_dim=hidden_dim))
    nn.add(Activation(activation))
    nn.add(Linear(output_dim=1))

    # Build model
    model.default_build(learning_rate)

    # Return model
    return model
Esempio n. 25
0
def pet(memory, hidden_dim, order, learning_rate, mark='pet'):
    # Initiate a predictor
    model = NeuralNet(memory_depth=memory, mark=mark)
    nn = model.nn
    assert isinstance(nn, Predictor)

    # Add layers
    nn.add(Input([memory]))
    nn.add(Linear(output_dim=hidden_dim, use_bias=False))
    nn.add(inter_type=pedia.sum)
    for i in range(1, order + 1):
        nn.add_to_last_net(Homogeneous(order=i))

    # Build model
    model.default_build(learning_rate=learning_rate)

    return model
Esempio n. 26
0
def rnn0(th):
    assert isinstance(th, NlsHub)
    # Initiate a neural net model
    nn_class = lambda mark: Predictor(mark=mark, net_type=Recurrent)
    model = NeuralNet(th.memory_depth, mark=th.mark, nn_class=nn_class)
    nn = model.nn
    assert isinstance(nn, Predictor)

    # Add layers
    nn.add(Input(sample_shape=[th.memory_depth]))
    for _ in range(th.num_blocks):
        nn.add(BasicRNNCell(state_size=th.hidden_dim))
    nn.add(Linear(output_dim=1))

    # Build
    model.default_build(th.learning_rate)

    return model
Esempio n. 27
0
def lstm(th):
    assert isinstance(th, Config)
    # Initiate model
    th.mark = 'lstm_' + th.mark
    model = Predictor(mark=th.mark, net_type=Recurrent)

    # Add input layer
    model.add(Input(sample_shape=[th.memory_depth]))
    # Add hidden layers
    for _ in range(th.num_blocks):
        model.add(BasicLSTMCell(th.hidden_dim, with_peepholes=False))
    # Add output layer
    model.add(Linear(output_dim=1))

    # Build model
    optimizer = tf.train.AdamOptimizer(learning_rate=th.learning_rate)
    model.build_as_regressor(optimizer)

    return model
Esempio n. 28
0
def tlp(memory_depth, hidden_dim, mark='tlp'):
    # Hyper-parameters
    learning_rate = 0.001

    # Initiate a predictor
    model = NeuralNet(memory_depth, mark=mark)
    nn = model.nn
    assert isinstance(nn, Predictor)

    # Add layers
    nn.add(Input([memory_depth]))
    nn.add(Linear(output_dim=hidden_dim))
    nn.add(Activation('sigmoid'))
    nn.add(Linear(output_dim=1, use_bias=False))

    # Build model
    model.default_build(learning_rate=learning_rate)

    return model
Esempio n. 29
0
def mlp00(mark):
    # Define model
    model = TDPlayer(mark=mark)

    model.add(Input(sample_shape=[15, 15]))
    model.add(Flatten())

    model.add(Linear(225))
    model.add(Activation.ReLU())

    model.add(Linear(225))
    model.add(Activation.ReLU())

    model.add(Linear(1))
    model.add(Activation('sigmoid'))

    # Build model
    model.build()

    return model
Esempio n. 30
0
def vanilla(mark):
    model = Classifier(mark=mark)
    model.add(Input(sample_shape=[784]))

    def fc_bn_relu(bn=True):
        model.add(Linear(100))
        model.add(Activation('relu'))
        if bn:
            model.add(BatchNorm())

    fc_bn_relu()
    fc_bn_relu()

    model.add(Linear(10))

    # Build model
    model.build(loss='cross_entropy',
                optimizer=tf.train.GradientDescentOptimizer(0.01))

    return model