Esempio n. 1
0
def model(th):
    assert isinstance(th, m.Config)
    layers = [Conveyor(length=th.conveyor_length)]
    # Bilinear part
    bl_config = m.bl_config()
    for d1, d2 in bl_config[:-1]:
        layers.append(Bilinear(d1, d2, 'relu', max_norm=th.max_norm))
        if th.dropout > 0: layers.append(Dropout(1 - th.dropout))

    d1, d2 = bl_config[-1]
    layers.append(Bilinear(d1, d2, 'tanh', max_norm=th.max_norm))
    layers.append(Flatten())

    # GAM-RHN part
    layers.append(
        GamRHN(
            gam_config=th.gam_config,
            head_size=th.head_size,
            state_size=th.state_size,
            num_layers=th.num_layers,
            kernel=th.hyper_kernel,
            gam_dropout=th.gam_dropout,
            rhn_dropout=th.rhn_dropout,
        ))
    return m.typical(th, layers)
Esempio n. 2
0
def output_and_build(model, th):
    assert isinstance(model, Classifier)
    assert isinstance(th, Config)
    # Add dropout if necessary
    if th.output_dropout > 0: model.add(Dropout(1 - th.output_dropout))
    # Add output layer
    model.add(Dense(num_neurons=th.output_dim))
    model.add(Activation('softmax'))

    model.build(loss='cross_entropy', metric='bpc', batch_metric='bpc')
Esempio n. 3
0
def output_and_build(model, th):
    assert isinstance(model, Classifier) and isinstance(th, Config)
    # Add output dropout if necessary
    if th.output_dropout > 0: model.add(Dropout(1 - th.output_dropout))
    # Add output layer
    model.add(Dense(num_neurons=th.output_dim))
    model.add(Activation('softmax'))

    # Build model
    model.build(loss=th.loss_string, metric=['loss', 'f1'], batch_metric='f1')
Esempio n. 4
0
 def add_encoder_block(filters,
                       kernel_size=3,
                       add_pool=True,
                       drop_out=False):
     model.add(Conv2D(filters, kernel_size))
     model.add(Activation.ReLU())
     model.add(Conv2D(filters, kernel_size))
     output = model.add(Activation.ReLU())
     if drop_out: output = model.add(Dropout(0.5))
     if add_pool: model.add(MaxPool2D((2, 2), 2))
     return output
Esempio n. 5
0
def output_and_build(model, th):
    assert isinstance(model, Classifier)
    assert isinstance(th, Config)
    # Add dropout if necessary
    if th.output_dropout > 0: model.add(Dropout(1 - th.output_dropout))
    # Add output layer
    model.add(Dense(num_neurons=th.output_dim))
    model.add(Activation('softmax'))

    model.build(last_only=True,
                metric=['accuracy', 'loss'],
                batch_metric='accuracy',
                eval_metric='accuracy')
Esempio n. 6
0
def typical(th, cells):
    assert isinstance(th, Config)

    # Initiate a model
    model = Classifier(mark=th.mark, net_type=Recurrent)

    # Add layers
    model.add(Input(sample_shape=th.input_shape, dtype=tf.int32))
    model.add(Onehot(depth=th.num_classes))
    emb_init = tf.initializers.random_uniform(-1, 1)
    model.add(Dense(th.hidden_dim, use_bias=False,
                    weight_initializer=emb_init))

    if th.input_dropout > 0: model.add(Dropout(1 - th.input_dropout))
    # Add hidden layers
    if not isinstance(cells, (list, tuple)): cells = [cells]
    for cell in cells:
        model.add(cell)
    # Build model and return
    output_and_build(model, th)
    return model
Esempio n. 7
0
  def _add_layers(self):

    # Construct left half
    layers_to_link = []
    for i, filters in enumerate(self.num_filter_list):
      # Add conv layer
      for _ in range(self.left_repeats):
        last_layer = self._add_conv(filters, kernel_size=3)
      # Add dropout layer if necessary
      if i + 1 in (self.net_height, self.net_height - 1) and self.dropout_rate:
        last_layer = self.add(Dropout(1 - self.dropout_rate))
      # Save to layer list for future linking if necessary
      if i + 1 != self.net_height:
        layers_to_link.append(last_layer)
        # Add pooling layer
        self.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))

    # Construct right half
    for filters, pre_layer in zip(
        reversed(self.num_filter_list[:-1]), reversed(layers_to_link)):
      # Up-sampling
      self.add(Deconv2D(
        filters, kernel_size=2, strides=2, activation=self.activation,
        kernel_initializer=self.kernel_initializer))
      # Add Conv layer
      # self._add_conv(filters, kernel_size=2)
      # Merge
      self.add(Concatenate(pre_layer))
      # Add Conv layers
      for _ in range(self.right_repeats): self._add_conv(filters, kernel_size=3)

    # Add output layer
    if self.num_classes == 2:
      self._add_conv(2, kernel_size=3)
      self._add_conv(1, kernel_size=1, use_activation=False)
      self.add(Activation('sigmoid'))
    else:
      self._add_conv(self.num_classes, kernel_size=3)
      self._add_conv(self.num_classes, kernel_size=1, use_activation=False)
      self.add(Activation('softmax'))