Ejemplo n.º 1
0
 def __init__(self, model_path, class_labels_json):
     self.class_labels = class_labels_json
     self.height = 150
     self.width = 150
     self.model_path = model_path
     self.model = Sequential()
     self.class_labels = self.read_class_labels_if_exists()
Ejemplo n.º 2
0
    def _make_layer(self,
                    name,
                    block,
                    out_channels,
                    num_blocks,
                    stride=1,
                    dilate=False):
        ## Constructs a layer of blocks
        downsample = None
        previous_dilation = self.dilation
        if dilate:
            self.dilation *= stride
            stride = 1
        if stride != 1 or self.inplanes != out_channels * block.expansion:
            downsample = Sequential([
                layers.Conv2D(out_channels * block.expansion, (1, 1), stride),
                layers.BatchNormalization()
            ])

        self.inplanes = out_channels * block.expansion

        blocks = [
            block(out_channels=out_channels,
                  stride=stride,
                  downsample=downsample,
                  base_width=self.base_width,
                  dilation=previous_dilation)
        ]
        for _ in range(1, num_blocks):
            blocks.append(
                block(out_channels=out_channels,
                      base_width=self.base_width,
                      dilation=self.dilation))

        return Sequential(layers=blocks, name=name)
Ejemplo n.º 3
0
def make_resnet(net):
    match = re.match(r'([a-z]*)(\d+)', net)
    net_type, n_layers = match.group(1), match.group(2)
    residual = (net_type == 'resnet')
    n = (int(n_layers) - 2) // 6
    print("Net: detected n = {} {} shortcuts".format(
        n, 'with' if residual else 'without'))

    Block = ConvBNReluResidualBlock if residual else ConvBNReluBlock
    _layers = [
        ConvBNRelu(strides=1, filters=16, input_shape=(32, 32, 3)),
        Block(strides=1, filters=16),
        [Block(strides=1, filters=16) for _ in range(n - 1)],
        Block(strides=2, filters=32),
        [Block(strides=1, filters=32) for _ in range(n - 1)],
        Block(strides=2, filters=64),
        [Block(strides=1, filters=64) for _ in range(n - 1)],
        GlobalAveragePooling2D(),
        Dense(10, 'softmax')
    ]

    layers = []
    for x in _layers:
        try:
            for l in x:
                layers.append(l)
        except TypeError:
            layers.append(x)

    model = Sequential(layers)
    model.compile(optimizer='adam',
                  loss='sparse_categorical_crossentropy',
                  metrics=['acc'])
    return model
Ejemplo n.º 4
0
    def train(self):

        self.model = Sequential()
        self.model.add(Dense(self.n1, activation=tf.nn.relu, use_bias=self.bias, input_shape=(1,)))
        if self.n2 != 0:
            self.model.add(Dense(self.n2, activation=tf.nn.relu, use_bias=self.bias))
        self.model.add(Dense(1))
        self.model.compile(optimizer=self.optimizer, loss=self.loss, metrics=['mse', 'mse'])

        self.keys, self.values = import_data(self.filename)
        self.model.fit(self.keys, self.values, epochs=self.epochs, batch_size=self.batch_size, verbose=self.verbose,
                       validation_split=self.validation_split)

        if not os.path.exists("models_tf/{}".format(self.identifier)):
            os.makedirs("models_tf/{}".format(self.identifier))
        self.model.save("models_tf/{}/super_layer.h5".format(self.identifier))

        converter = tf.lite.TFLiteConverter.from_keras_model(self.model)  # TF 2.0
        # converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]

        # Following code is used for further optimization of weights. Here we notice a massive drop in performance
        converter.optimizations = [tf.lite.Optimize.DEFAULT]

        sample = tf.cast(self.keys, tf.float32)
        sample = tf.data.Dataset.from_tensor_slices((sample)).batch(1)
        def representative_data_gen():
            for input_value in sample.take(1300000):
                yield [input_value]
        converter.representative_dataset = representative_data_gen

        tflite_model = converter.convert()

        open("models_tf/{}/super_layer.tflite".format(self.identifier), "wb").write(tflite_model)
 def create_model(self):
     model = Sequential()
     model.add(Dense(512, activation=relu, input_dim=self.state_size))
     model.add(Dense(512, activation=relu))
     model.add(Dense(self.action_size, activation=softmax))
     model.compile(optimizer=adam(), loss=categorical_crossentropy)
     return model
Ejemplo n.º 6
0
    def train(self):
        batch_size = 64
        nb_epoch = 100
        if self.has_train:
            nb_epoch = nb_epoch - self.epoch
            print('new epoch', nb_epoch)
            self.model.fit(self.X_train,
                           self.y_train,
                           batch_size=batch_size,
                           epochs=nb_epoch,
                           callbacks=[self.checkpointer, self.csv_logger])
        else:
            # 1. define the network
            self.model = Sequential()
            self.model.add(Dense(1024, input_dim=41, activation='relu'))
            self.model.add(Dropout(0.01))
            self.model.add(Dense(1))
            self.model.add(Activation('sigmoid'))
            self.model.compile(loss='binary_crossentropy',
                               optimizer='adam',
                               metrics=['accuracy'])

            self.model.fit(self.X_train,
                           self.y_train,
                           batch_size=batch_size,
                           epochs=nb_epoch,
                           callbacks=[self.checkpointer, self.csv_logger])
            self.model.save("./dnn1layer_model.hdf5")
        score, acc = self.model.evaluate(self.X_test, self.y_test)
        print('Test score:', score)
        print('Test accuracy', acc)
Ejemplo n.º 7
0
 def __init__(self, channel, reduction=16):
     super().__init__()
     self.fc = Sequential()
     self.fc.add(layers.GlobalAveragePooling1D())
     self.fc.add(layers.Dense(channel // reduction, activation='relu'))
     self.fc.add(layers.Dense(channel))
     self.fc.add(layers.Activation('sigmoid'))
Ejemplo n.º 8
0
    def build_q_CNN(self):

        self.model = Sequential()

        self.model.add(
            Conv2D(32,
                   kernel_size=8,
                   activation='relu',
                   strides=4,
                   input_shape=self.input_shape))

        self.model.add(Conv2D(64, kernel_size=4, activation='relu', strides=2))

        self.model.add(Conv2D(64, kernel_size=3, activation='relu', strides=1))

        self.model.add(Flatten())

        self.model.add(Dense(512, activation='relu'))

        self.model.add(Dense(self.action_size, activation='linear'))

        self.model.compile(loss=self.loss_metric,
                           optimizer=Adam(lr=self.learning_rate,
                                          decay=self.learning_rate_decay))

        return self.model
Ejemplo n.º 9
0
def make_resnet(net):
    n, residual = parse_net(net)
    print("Net: detected n = {} {} shortcuts".format(
        n, 'with' if residual else 'without'))

    Block = ConvBNReluResidualBlock if residual else ConvBNReluBlock
    layers = [
        ConvBNRelu(strides=1,
                   kernel_size=3,
                   filters=16,
                   input_shape=(32, 32, 3)),
        Block(strides=1, kernel_size=3, filters=16),
        [Block(strides=1, kernel_size=3, filters=16) for _ in range(n - 1)],
        Block(strides=2, kernel_size=3, filters=32),
        [Block(strides=1, kernel_size=3, filters=32) for _ in range(n - 1)],
        Block(strides=2, kernel_size=3, filters=64),
        [Block(strides=1, kernel_size=3, filters=64) for _ in range(n - 1)],
        GlobalAveragePooling2D(),
        Dense(10, 'softmax')
    ]
    layers = flatten_layers(layers)

    model = Sequential(layers)
    model.compile(optimizer='adam',
                  loss='sparse_categorical_crossentropy',
                  metrics=['acc'])
    return model
Ejemplo n.º 10
0
 def _build_model(self):
     model = Sequential()
     model.add(Dense(24, input_dim=self.state_size, activation='relu'))
     model.add(Dense(24, activation='relu'))
     model.add(Dense(self.action_size, activation='linear'))
     model.compile(loss='mse', optimizer=Adam(lr=self.learning_rate))
     return model
Ejemplo n.º 11
0
    def build_q_dense_from_json(self):
        self.model = Sequential()

        len_layers = len(self.layers)
        assert len_layers >= 2, 'You must have a network of \
                                            at least 2 layers'

        for i, units in enumerate(self.layers):

            if i == 0:
                self.model.add(
                    Dense(units=units,
                          input_dim=self.observations_size,
                          activation='relu',
                          name='Input_state'))

            else:
                self.model.add(
                    Dense(units=units, activation='relu', name='Hidden_layer'))

            if i + 1 == len_layers:
                break

        self.model.add(
            Dense(self.action_size,
                  activation='linear',
                  name='Output_Q_action'))

        self.model.compile(loss=self.loss_metric,
                           optimizer=Adam(lr=self.learning_rate,
                                          decay=self.learning_rate_decay))

        return self.model
Ejemplo n.º 12
0
    def get_bidirectional_model(self, pre_embeddings, dp_rate=0.0, use_lstm=False):
        """
        follow the common model construction step shown in keras manual
        :param pre_embeddings:
        :param dp_rate: drop out rate
        :param use_lstm: utilize LSTM or GRU unit
        :return: the model
        """
        # Embedding part can try multichannel as same as origin paper
        embedding_layer = Embedding(self.max_features,  # 字典长度
                                    self.embedding_dims,  # 词向量维度
                                    weights=[pre_embeddings],  # 预训练的词向量
                                    input_length=self.maxlen,  # 每句话的最大长度
                                    trainable=False  # 是否在训练过程中更新词向量
                                    )
        model = Sequential()
        model.add(embedding_layer)

        if use_lstm:
            model.add(Bidirectional(LSTM(RNN_DIM, recurrent_dropout=dp_rate)))
        else:
            model.add(Bidirectional(GRU(RNN_DIM, recurrent_dropout=dp_rate)))

        # model.add(Dropout(dp_rate))
        model.add(Dense(self.class_num, activation=self.last_activation))

        return model
Ejemplo n.º 13
0
Archivo: nn.py Proyecto: zhxuan300/FATE
def build_nn_model(input_shape,
                   nn_define,
                   loss,
                   optimizer,
                   metrics,
                   is_supported_layer=has_builder,
                   default_layer=None) -> KerasNNModel:
    model = Sequential()
    is_first_layer = True
    for layer_config in nn_define:
        layer = layer_config.get("layer", default_layer)
        if layer and is_supported_layer(layer):
            del layer_config["layer"]
            if is_first_layer:
                layer_config["input_shape"] = input_shape
                is_first_layer = False
            builder = get_builder(layer)
            model.add(builder(**layer_config))

        else:
            raise ValueError(f"dnn not support layer {layer}")

    return from_keras_sequential_model(model=model,
                                       loss=loss,
                                       optimizer=optimizer,
                                       metrics=metrics)
Ejemplo n.º 14
0
def build_simple_model():
    model = Sequential()
    model.add(Flatten(input_shape=(lookback // step, float_data.shape[-1])))
    model.add(Dense(32, activation='relu'))
    model.add(Dense(1))
    model.compile(optimizer=RMSprop(), loss='mae')
    return model
Ejemplo n.º 15
0
class MyAlexNet(Model):
    def __init__(self):
        super(MyAlexNet, self).__init__()

        self.Layers = [
            layers.Conv2D(filters=48 , kernel_size=[3, 3], padding='same', activation=nn.relu, kernel_regularizer=regularizers.l2(hps.lamda)), # 64
            layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same'),

            layers.Conv2D(filters=128, kernel_size=[3, 3],padding='same', activation=nn.relu, kernel_regularizer=regularizers.l2(hps.lamda)),  # 192
            layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same'),

            layers.Conv2D(filters=192, kernel_size=[3, 3], padding='same', activation=nn.relu, kernel_regularizer=regularizers.l2(hps.lamda)), # 384
            layers.Conv2D(filters=192, kernel_size=[3, 3], padding='same', activation=nn.relu, kernel_regularizer=regularizers.l2(hps.lamda)), # 256
            layers.Conv2D(filters=128, kernel_size=[3, 3], padding='same', activation=nn.relu, kernel_regularizer=regularizers.l2(hps.lamda)), # 256
            layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same'),

            layers.Flatten(),

            layers.Dense(2048, activation=nn.relu, kernel_regularizer=regularizers.l2(hps.lamda)),     # 2048
            layers.Dense(2048, activation=nn.relu, kernel_regularizer=regularizers.l2(hps.lamda)),     # 2048
            layers.Dense(10, activation=nn.softmax, kernel_regularizer=regularizers.l2(hps.lamda)),
            # layers.Dense(10, activation=None),
        ]
        self.net = Sequential(self.Layers)
        self.net.build(input_shape=[None, 32, 32, 3])

    def call(self, inputs, training=None, mask=None):
        inputs = tf.reshape(inputs, [-1, 32, 32, 3])
        out = self.net(inputs)
        return out
Ejemplo n.º 16
0
def downsample(units,
               input_shape=None,
               apply_batchnorm=True,
               layer_type='dense'):

    initializer = random_normal_initializer(0., 0.02)

    seq = Sequential()
    if layer_type == 'dense':
        seq.add(
            layers.Dense(units,
                         input_shape=[
                             input_shape,
                         ],
                         kernel_initializer=initializer,
                         use_bias=False))
    elif layer_type == 'conv':
        seq.add(
            layers.Conv2D(filters=units,
                          kernel_size=3,
                          strides=(2, 2),
                          padding='same',
                          input_shape=input_shape,
                          kernel_initializer=initializer,
                          use_bias=False))
    else:
        raise ValueError('wrong layer type!')
    if apply_batchnorm:
        seq.add(layers.BatchNormalization())

    seq.add(layers.LeakyReLU())
    return seq
Ejemplo n.º 17
0
def model_builder():
    model = Sequential()
    model.add(Dense(256, input_dim=len(x_train.columns), kernel_initializer = "random_normal", activation = "relu"))
    model.add(Dense(256, kernel_initializer = "random_normal", activation = "relu"))
    model.add(Dense(1, kernel_initializer = "random_normal", activation="relu"))
    model.compile(optimizer="adam", loss="mean_squared_error")
    return model 
Ejemplo n.º 18
0
    def train(self):
        batch_size = 64
        units = 100
        embedding_matrix = np.zeros((self.vocab_size, 100))
        for word, index in self.tk.word_index.items():
            embedding_vector = self.word2vec.get(word)
            if embedding_vector is not None:
                embedding_matrix[index] = embedding_vector

        self.model = Sequential()
        self.model.add(
            Embedding(self.vocab_size,
                      units,
                      weights=[embedding_matrix],
                      trainable=False))
        self.model.add(
            Bidirectional(LSTM(units, return_sequences=True, dropout=0.2)))
        self.model.add(Bidirectional(LSTM(units, dropout=0.2)))
        self.model.add(Dense(self.output_size, activation='sigmoid'))
        print(self.model.summary())
        self.model.compile(optimizer='adam',
                           loss='sparse_categorical_crossentropy',
                           metrics=['acc'])
        history = self.model.fit(self.X_train,
                                 self.y_train,
                                 epochs=100,
                                 batch_size=batch_size,
                                 verbose=1)
Ejemplo n.º 19
0
def get_model(h1, h2, initializer="random_uniform"):
    model = Sequential()
    model.add(Dense(h1, activation="tanh", kernel_initializer=initializer))
    if h2 != 0:
        model.add(Dense(h2, activation="tanh", kernel_initializer=initializer))
    model.add(Dense(1, activation="tanh", kernel_initializer=initializer))
    model.compile(optimizer='adam', loss='mean_squared_error', metrics=['accuracy'])
    return model
Ejemplo n.º 20
0
    def create_model(self) -> Sequential:
        model = Sequential()
        model.add(Dense(64, input_dim=4, activation='relu'))
        model.add(Dense(64, activation='relu'))
        model.add(Dense(2, activation='linear'))
        model.compile(loss="mse", optimizer=Adam(self.learning_rate))

        return model
Ejemplo n.º 21
0
def DeepQNetwork(lr, num_actions, input_dims, fc1, fc2):
    q_net = Sequential()
    q_net.add(Dense(fc1, input_dim=input_dims, activation='relu'))
    q_net.add(Dense(fc2, activation='relu'))
    q_net.add(Dense(num_actions, activation=None))
    q_net.compile(optimizer=Adam(learning_rate=lr), loss='mse')

    return q_net
Ejemplo n.º 22
0
    def createModel(self):
        """Create and compile the keras model. See layers-18pct.cfg and 
           layers-params-18pct.cfg for the network model, 
           and https://code.google.com/archive/p/cuda-convnet/wikis/LayerParams.wiki 
           for documentation on the layer format.
        """
        self.model = Sequential()
        self.model.add(
            keras.layers.Conv2D(filters=32,
                                kernel_size=5,
                                strides=(1, 1),
                                padding='same',
                                input_shape=(32, 32, 3),
                                data_format="channels_last",
                                dilation_rate=(1, 1),
                                activation=tf.nn.relu))
        self.model.add(
            keras.layers.MaxPooling2D(pool_size=(3, 3),
                                      strides=(2, 2),
                                      padding='same'))
        self.model.add(
            keras.layers.BatchNormalization(
                axis=1,
                momentum=0.99,
                epsilon=0.001,
            ))
        self.model.add(
            keras.layers.Conv2D(filters=32,
                                kernel_size=5,
                                strides=(1, 1),
                                padding='same',
                                dilation_rate=(1, 1),
                                activation=tf.nn.relu))
        self.model.add(
            keras.layers.AveragePooling2D(pool_size=(3, 3),
                                          strides=(2, 2),
                                          padding='same'))
        self.model.add(
            keras.layers.BatchNormalization(axis=-1,
                                            momentum=0.99,
                                            epsilon=0.001))
        self.model.add(
            keras.layers.Conv2D(filters=32,
                                kernel_size=5,
                                strides=(1, 1),
                                padding='same',
                                dilation_rate=(1, 1),
                                activation=tf.nn.relu))
        self.model.add(
            keras.layers.AveragePooling2D(pool_size=(3, 3),
                                          strides=(2, 2),
                                          padding='same'))
        self.model.add(keras.layers.Flatten())
        self.model.add(keras.layers.Dense(10, activation=tf.nn.softmax))

        self.model.compile(optimizer=keras.optimizers.Adam(),
                           loss='sparse_categorical_crossentropy',
                           metrics=['accuracy'])
Ejemplo n.º 23
0
def build_simple_rnn_model(max_features=10000):
    model = Sequential()
    model.add(Embedding(max_features, 32))
    model.add(SimpleRNN(32))
    model.add(Dense(1, activation='sigmoid'))
    model.compile(optimizer='rmsprop',
                  loss='binary_crossentropy',
                  metrics=['acc'])
    return model
Ejemplo n.º 24
0
 def __init__(self):
     #Here we'll create the brain of our agent, a deep neural network with Keras
     self.model = Sequential()
     self.q_targets = []
     self.model.add(kl.Dense(10, input_dim=27))
     self.model.add(kl.Activation('relu'))
     self.model.add(kl.Dense(4, input_dim=10))
     self.model.add(kl.Activation('tanh'))
     self.model.compile(optimizer="adam", loss='MSE', metrics=['accuracy'])
Ejemplo n.º 25
0
 def __init__(self):
     super(myModel, self).__init__()
     """
     初始化我们自己需要 使用到的神经网络层。
     """
     self.rows = None
     self.cols = None
     self.kernel = (3, 3)
     self.init = RandomNormal(stddev=0.01)
     self.model = Sequential()
Ejemplo n.º 26
0
    def __init__(self,
                 discount_factor,
                 name='DQNetwork',
                 model=None,
                 state_size=None,
                 action_size=None,
                 hidden_sizes=None,
                 hidden_activation_functions=None,
                 output_activation_function=None):
        self.name = name
        self.discount_factor = discount_factor
        if model is not None:
            self.model = load_model(model)
        else:
            try:
                assert state_size is not None
                assert action_size is not None
                assert output_activation_function is not None
            except AssertionError:
                print(
                    "Network needs to know at least state_size, actionsize, and have an output_activation_function"
                )
            try:
                if hidden_activation_functions is not None or hidden_sizes is not None:
                    assert len(hidden_activation_functions) == len(
                        hidden_sizes)
            except AssertionError:
                if len(hidden_activation_functions) > len(hidden_sizes):
                    print(
                        "Too many hidden activation functions, must have length(hidden sizes) "
                    )
                else:
                    print(
                        "Too few hidden activation functions, must have length(hidden sizes) "
                    )
                exit(1)
            except TypeError:
                print(
                    "Either hidden_activation_functions or hidden_sizes is None; Make both None or neither."
                )

            self.model = Sequential()
            self.model.add(InputLayer(input_shape=(state_size, )))
            if hidden_activation_functions is not None and hidden_sizes is not None:
                hidden_inputs = np.roll(hidden_sizes, 1)
                hidden_inputs[0] = state_size
                for s, i, a in zip(hidden_sizes, hidden_inputs,
                                   hidden_activation_functions):
                    self.model.add(Dense(s, input_dim=i, activation=a))
                    self.model.add(Dropout(0.1))
            self.model.add(
                Dense(action_size,
                      input_dim=state_size,
                      activation=output_activation_function))
            self.model.compile(loss='mse', optimizer='adam', metrics=['mse'])
Ejemplo n.º 27
0
class Discriminator(Model):
    def __init__(self, num_layer: int, num_cat: int):
        super().__init__()

        self.num_layer = num_layer
        self.num_cat = num_cat

    def build(self, input_shape):
        kernel_size = 5
        stride_size = 2

        width = input_shape[2]
        height = input_shape[1]
        num_channel = input_shape[3]
        next_channel = num_channel * 32

        kernel_init = keras.initializers.RandomNormal(stddev=0.02)
        bias_init = keras.initializers.zeros()

        self.conv2ds = Sequential()

        for i in range(self.num_layer - 1):
            conv = Conv2D(next_channel,
                          kernel_size,
                          stride_size,
                          padding="same",
                          activation=tf.nn.leaky_relu,
                          kernel_initializer=kernel_init,
                          bias_initializer=bias_init,
                          input_shape=(height, width, num_channel))
            self.conv2ds.add(conv)

            width = conv.output_shape[2]
            height = conv.output_shape[1]
            num_channel = next_channel
            next_channel *= stride_size

        conv = Conv2D(1 + self.num_cat, (height, width), (height, width),
                      padding="valid",
                      kernel_initializer=kernel_init,
                      bias_initializer=bias_init)
        self.conv2ds.add(conv)

        self.flatten = Flatten()

    def call(self, inputs: keras.layers.Input, **kwargs):
        output = inputs

        output = self.conv2ds(output)
        output = self.flatten(output)

        classify_result: tf.Tensor = output[:, :1]
        cat_result: tf.Tensor = output[:, 1:]

        return classify_result, cat_result
Ejemplo n.º 28
0
 def __init__(
     self,
     dim: int,
     layer_dims: List[Union[float, int]] = [0.5, 0.5, 1, 1, 2, 2, 4],
 ):
     super().__init__()
     dims = [int(dim * f) for f in layer_dims]
     layers = [self._make_layers(dim, i) for i, dim in enumerate(dims)]
     layers += [ConvBlock(dim * 4, 4, 2, False)]
     self.layer = Sequential(layers)
     self.head = Sequential([Flatten(), Dense(1)])
Ejemplo n.º 29
0
    def __init__(self, session=None, dir_loc=None):
        # Initilize session and tensorboard dirs
        self.session = tf.Session() if session is None else session
        self.dir_loc = './logs' if dir_loc is None else dir_loc
        self._writer = None

        # Initialize feature extractor
        self.extractor = Sequential()

        # Set idication for when model is build
        self.built = False
    def __init__(self,
                 learning_rate: float = 0.0001,
                 hidden_layers_count: int = 0,
                 neurons_per_hidden_layer: int = 0):
        self.model = Sequential()

        for i in range(hidden_layers_count):
            self.model.add(Dense(neurons_per_hidden_layer, activation=tanh))

        self.model.add(Dense(1, activation=softmax, use_bias=True))
        self.model.compile(loss=mse, optimizer=Adam(lr=learning_rate))
Ejemplo n.º 31
0
    def __init__(self, game: GridGame):
        super().__init__(game)
        # game params
        self.board_height = game.board_height
        self.board_width = game.board_width
        example_board = game.create_board()
        self.action_size = len(game.get_valid_moves(example_board))
        self.epochs_completed = 0
        self.epochs_to_train = 100
        args = Namespace(lr=0.001,
                         dropout=0.3,
                         epochs=10,
                         batch_size=64,
                         num_channels=512)
        self.checkpoint_name = 'random weights'
        self.args = args

        num_channels = 512
        kernel_size = [3, 3]
        dropout = 0.3
        model = Sequential()
        # regularizer = regularizers.l2(0.00006)
        regularizer = regularizers.l2(0.0001)
        model.add(Conv2D(num_channels,
                         kernel_size,
                         padding='same',
                         activation='relu',
                         input_shape=(self.board_height, self.board_width, 1),
                         activity_regularizer=regularizer))
        model.add(Conv2D(num_channels,
                         kernel_size,
                         padding='same',
                         activation='relu',
                         activity_regularizer=regularizer))
        model.add(Conv2D(num_channels,
                         kernel_size,
                         activation='relu',
                         activity_regularizer=regularizer))
        model.add(Conv2D(num_channels,
                         kernel_size,
                         activation='relu',
                         activity_regularizer=regularizer))
        model.add(Dropout(dropout))
        model.add(Dropout(dropout))
        model.add(Flatten())
        model.add(Dense(self.action_size + 1))
        model.compile('adam', 'mean_squared_error')
        self.model = model