Exemple #1
0
def build_model(vocab_size, embedding_size):
    model = Sequential()
    model.add(Embedding(vocab_size, embedding_size))
    model.add(Bidirectional(LSTM(64)))
    model.add(Dense(64, activation='relu'))
    model.add(Dense(2))

    model.compile(optimizer='adam',
                  loss=SparseCategoricalCrossentropy(from_logits=True),
                  metrics=['accuracy'])

    return model
def create_detector_model(class_n, roip_fm_shape, class_indices_dict, seed=42):
    he_init = he_uniform(seed)
    gl_init = glorot_uniform(seed)
    input = Input(roip_fm_shape)
    output = Flatten(name='DetectorFlatten')(input)
    output = Dense(units=500,
                   kernel_initializer=he_init,
                   activation='relu',
                   name='DetectorDense1')(output)
    output = Dense(units=500,
                   kernel_initializer=he_init,
                   activation='relu',
                   name='DetectorDense2')(output)
    cls_logits = Dense(
        units=class_n + 1,  # Add background class
        kernel_initializer=gl_init,
        activation='linear',
        name='DetectorClsLogits')(output)
    cls = Activation('softmax', name='DetectorCls')
    reg = Dense(units=4 * class_n,
                kernel_initializer=he_init,
                activation='linear',
                name='DetectorReg')(output)
    reg = Reshape(target_shape=(-1, 4))(reg)
    model = Model(inputs=[input], outputs=[cls_logits, cls, reg])
    # Since only deltas from respective class contribute to the loss
    reg_loss = on_index_wrapper(Huber(), class_indices_dict['not_sign'])
    # Computes metric on batch - Keras can`t reset metric on batch end if it is wrapped
    reg_metric = on_index_wrapper(mean_absolute_error,
                                  class_indices_dict['not_sign'])
    model.compile(optimizer='adadelta',
                  loss={
                      'DetectorClsLogits': SparseCategoricalCrossentropy(),
                      'DetectorCls': None,
                      'DetectorReg': reg_loss
                  },
                  metrics={
                      'DetectorCls': SparseCategoricalAccuracy(),
                      'DetectorReg': reg_metric
                  })
    return model
Exemple #3
0
    def build_network(self, name="", learning_rate=0.001):
        """
        Input : self.width * self.height board (42 squares) + player

            https://keras.io/api/layers/activations/#relu-function
            https://keras.io/api/layers/activations/#softmax-function
            https://keras.io/api/losses/probabilistic_losses/#sparsecategoricalcrossentropy-class
            https://keras.io/api/optimizers/Nadam/
            https://keras.io/api/metrics/accuracy_metrics/#accuracy-class

        Output: 2 => [[player_0_prob, player_1_prob]]
        """
        print(f"Building model{(' ' + name) if name else ''}...")

        self.model = keras.Sequential(name=name or None)

        # Input layer
        self.model.add(
            Dense(self.input_shape[0], input_dim=self.input_shape[0]))

        # One or more large layers
        self.model.add(Dense(64, activation='relu'))
        self.model.add(Dense(256, activation='relu'))
        self.model.add(Dense(256, activation='relu'))
        # self.model.add(Dense(64, activation='relu'))

        # Smaller ending layer
        self.model.add(Dense(self.board_nodes, activation='relu'))
        # self.model.add(Dense(self.game.width, activation='relu'))

        # Output end layer
        self.model.add(Dense(3, activation='softmax'))

        self.model.compile(loss=SparseCategoricalCrossentropy(),
                           optimizer=Nadam(learning_rate=learning_rate),
                           metrics=["accuracy"])

        self.model.summary()
Exemple #4
0
    def build_network(self, name="", learning_rate=0.001):
        """
        Input : self.width * self.height board (7 * 6 squares)
        Output: 2 => [[player_0_prob, player_1_prob]]
        """
        print(f"Building model{(' ' + name) if name else ''}...")

        self.model = keras.Sequential(name=name or None)

        # Input layer
        self.model.add(
            keras.layers.Conv2D(8,
                                kernel_size=self.game.consecutive // 2,
                                activation='relu',
                                input_shape=self.input_shape,
                                data_format="channels_last"))
        # self.model.add(keras.layers.MaxPool2D(2, strides=1))

        # One or more large layers
        # self.model.add(keras.layers.Conv2D(64, kernel_size=self.game.consecutive // 2, activation='relu'))

        self.model.add(keras.layers.Flatten())
        # self.model.add(Dense(256, activation='relu'))
        # self.model.add(Dense(256, activation='relu'))
        # self.model.add(Dense(64, activation='relu'))

        # Smaller ending layer
        self.model.add(Dense(self.board_nodes, activation='relu'))

        # Output end layer
        self.model.add(Dense(3, activation='softmax'))

        self.model.compile(loss=SparseCategoricalCrossentropy(),
                           optimizer=Nadam(learning_rate=learning_rate),
                           metrics=["accuracy"])

        self.model.summary()
Exemple #5
0
    def build_network(self, name="", learning_rate=0.001):
        """
        Input : self.width * self.height board (42 squares) + player
        Output: 2 => [[player_0_prob, player_1_prob]]
        """
        print(f"Building model{(' ' + name) if name else ''}...")

        self.model = keras.Sequential(name=name or None)

        # Input layer
        self.model.add(
            Dense(self.input_shape[0], input_dim=self.input_shape[0]))

        # Larger hidden layer
        self.model.add(Dense(self.board_nodes * 2, activation='relu'))

        # Output end layer
        self.model.add(Dense(3, activation='softmax'))

        self.model.compile(loss=SparseCategoricalCrossentropy(),
                           optimizer=Nadam(learning_rate=learning_rate),
                           metrics=["accuracy"])

        self.model.summary()
from keras.losses import SparseCategoricalCrossentropy
from keras.optimizers import Adam
from constants.trainingConstants import *
import utils
import trainingFunctions
from resnet_v1 import ResNet_v1

training_loss = None
if LOSS == 'SCCE':
    training_loss = SparseCategoricalCrossentropy()

training_opti = None
if OPTI == 'ADAM':
    training_opti = Adam()

model = ResNet_v1.build(width=IMAG_WIDTH,
                        height=IMG_HEIGHT,
                        depth=IMG_DEPTH,
                        classes=NB_CLASSES,
                        stages=STAGES,
                        filters=FILTERS,
                        se=SE_MODULES)

model.compile(
    optimizer=training_opti,
    loss=training_loss,
    metrics=['accuracy'],
)

history = trainingFunctions.training_augmented(model, EPOCHS, SEED)
utils.plot_training_results(history, EPOCHS, model, save=True)
Exemple #7
0
start_time = time.time()    # -------------------------------------------------┐
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()

train_images = train_images / 255.0
test_images = test_images / 255.0
preprocess_time = time.time() - start_time   # --------------------------------┘

start_time = time.time()    # -------------------------------------------------┐
model = Sequential()
model.add(Flatten(input_shape=(28, 28)))
for hidden in options.hidden:
    model.add(Dense(hidden, activation='relu'))
model.add(Dense(10))

model.compile(optimizer='adam',
              loss=SparseCategoricalCrossentropy(from_logits=True),
              metrics=['accuracy'])
startup_time = time.time() - start_time   # -----------------------------------┘

model.summary()
custom_logger = CustomLogger(options.log_path)

start_time = time.time()    # -------------------------------------------------┐
model.fit(train_images, train_labels,
          batch_size=batch_size,
          epochs=epochs,
          verbose=1,
          validation_data=(test_images, test_labels),
          callbacks=[custom_logger])
train_time = time.time() - start_time   # -------------------------------------┘
Exemple #8
0
    def _assemble_network(self) -> Model:
        """
        Assembles the network as Keras Model.

        Every layer needs to be named correctly:
        Input layers must be named: "input_team[1|2]_<feature_name>".
        Intermediate layers must be named "head[1|2]_<layer_name>".
        Output layer must be named: "output".

        Head2 is basically just mirrored main head.

        Seed is set for each initializer to increase reproducibility.

        :return: Keras model.
        """
        head1_inputs = []
        head2_inputs = []

        for f in self._features:
            if f in FEATURES_TO_LENC:
                head1_inputs.append(
                    Input(batch_shape=(BATCH_SIZE, None, self._lenc_bitlen),
                          name=f"input_team1_{f}"))
                head2_inputs.append(
                    Input(batch_shape=(BATCH_SIZE, None, self._lenc_bitlen),
                          name=f"input_team2_{f}"))
            else:
                head1_inputs.append(
                    Input(batch_shape=(BATCH_SIZE, None, 1),
                          name=f"input_team1_{f}"))
                head2_inputs.append(
                    Input(batch_shape=(BATCH_SIZE, None, 1),
                          name=f"input_team2_{f}"))

        # Main head
        head1_input_concat = concatenate(inputs=head1_inputs,
                                         name="head1_input_concat")
        head1_rnn1 = LSTM(35,
                          dropout=self._dropout,
                          stateful=STATEFUL,
                          return_sequences=False,
                          kernel_regularizer=l2(0.01),
                          kernel_initializer=glorot_uniform(self._seed),
                          name="head1_rnn1")(head1_input_concat)
        head1_fc1 = Dense(15,
                          activation="elu",
                          kernel_regularizer=l2(0.01),
                          kernel_initializer=glorot_uniform(self._seed),
                          name="head1_fc1")(head1_rnn1)

        # Head2
        head2_input_concat = concatenate(inputs=head2_inputs,
                                         name="head2_input_concat")
        head2_rnn1 = LSTM(35,
                          dropout=self._dropout,
                          stateful=STATEFUL,
                          return_sequences=False,
                          kernel_regularizer=l2(0.01),
                          trainable=False,
                          kernel_initializer=glorot_uniform(self._seed),
                          name="head2_rnn1")(head2_input_concat)
        head2_fc1 = Dense(15,
                          activation="elu",
                          kernel_regularizer=l2(0.01),
                          trainable=False,
                          kernel_initializer=glorot_uniform(self._seed),
                          name="head2_fc1")(head2_rnn1)

        joint_concat = concatenate([head1_fc1, head2_fc1], name="joint_concat")
        output = Dense(2,
                       activation="softmax",
                       kernel_initializer=glorot_uniform(self._seed),
                       name="output")(joint_concat)

        model = Model(inputs=head1_inputs + head2_inputs,
                      outputs=output,
                      name=self._team_name)

        model.compile(optimizer=Adam(learning_rate=self._lr,
                                     clipvalue=0.5,
                                     epsilon=1e-7),
                      loss=SparseCategoricalCrossentropy(),
                      metrics=["acc"])

        return model
Exemple #9
0
                        strides=(1, 1),
                        activation='relu',
                        padding="same"),
    keras.layers.BatchNormalization(),
    keras.layers.Conv2D(filters=384,
                        kernel_size=(3, 3),
                        strides=(1, 1),
                        activation='relu',
                        padding="same"),
    keras.layers.BatchNormalization(),
    keras.layers.Conv2D(filters=256,
                        kernel_size=(3, 3),
                        strides=(1, 1),
                        activation='relu',
                        padding="same"),
    keras.layers.BatchNormalization(),
    keras.layers.MaxPool2D(pool_size=(3, 3), strides=(2, 2)),
    keras.layers.Flatten(),
    keras.layers.Dense(4096, activation='relu'),
    keras.layers.Dropout(0.5),
    keras.layers.Dense(4096, activation='relu'),
    keras.layers.Dropout(0.5),
    keras.layers.Dense(10, activation='softmax')
])

model.compile(optimizer=Adam(),
              loss=SparseCategoricalCrossentropy(),
              metrics=['accuracy'])

history = model.fit(x=train_data, y=train_label, epochs=10)