예제 #1
0
def compile_and_train(model: training.Model,
                      num_epochs: int) -> Tuple[History, str]:

    accuracies = []
    losses = []
    model.compile(loss=categorical_crossentropy,
                  optimizer=Adam(),
                  metrics=['acc'])
    filepath = 'weights/' + model.name + '.hdf5'
    checkpoint = ModelCheckpoint(filepath,
                                 monitor='loss',
                                 verbose=0,
                                 save_weights_only=True,
                                 save_best_only=True,
                                 mode='auto',
                                 save_freq=1,
                                 period=1)
    tensor_board = TensorBoard(log_dir='logs/', histogram_freq=0, batch_size=5)
    history = model.fit(x=x_train,
                        y=y_train,
                        batch_size=32,
                        epochs=num_epochs,
                        verbose=1,
                        callbacks=[checkpoint, tensor_board],
                        validation_split=0.2)
    weight_files = glob.glob(os.path.join(os.getcwd(), 'weights/*'))
    weight_file = max(weight_files, key=os.path.getctime)  # most recent file
    return history, weight_file
예제 #2
0
    def test_stateful_metrics(self):
        with self.cached_session():
            np.random.seed(1334)

            class BinaryTruePositives(layers.Layer):
                """Stateful Metric to count the total true positives over all batches.

        Assumes predictions and targets of shape `(samples, 1)`.

        Arguments:
            threshold: Float, lower limit on prediction value that counts as a
                positive class prediction.
            name: String, name for the metric.
        """
                def __init__(self, name='true_positives', **kwargs):
                    super(BinaryTruePositives, self).__init__(name=name,
                                                              **kwargs)
                    self.true_positives = K.variable(value=0, dtype='int32')
                    self.stateful = True

                def reset_states(self):
                    K.set_value(self.true_positives, 0)

                def __call__(self, y_true, y_pred):
                    """Computes the number of true positives in a batch.

          Args:
              y_true: Tensor, batch_wise labels
              y_pred: Tensor, batch_wise predictions

          Returns:
              The total number of true positives seen this epoch at the
                  completion of the batch.
          """
                    y_true = math_ops.cast(y_true, 'int32')
                    y_pred = math_ops.cast(math_ops.round(y_pred), 'int32')
                    correct_preds = math_ops.cast(
                        math_ops.equal(y_pred, y_true), 'int32')
                    true_pos = math_ops.cast(
                        math_ops.reduce_sum(correct_preds * y_true), 'int32')
                    current_true_pos = self.true_positives * 1
                    self.add_update(state_ops.assign_add(
                        self.true_positives, true_pos),
                                    inputs=[y_true, y_pred])
                    return current_true_pos + true_pos

            metric_fn = BinaryTruePositives()
            config = metrics.serialize(metric_fn)
            metric_fn = metrics.deserialize(
                config,
                custom_objects={'BinaryTruePositives': BinaryTruePositives})

            # Test on simple model
            inputs = layers.Input(shape=(2, ))
            outputs = layers.Dense(1, activation='sigmoid')(inputs)
            model = Model(inputs, outputs)
            model.compile(optimizer='sgd',
                          loss='binary_crossentropy',
                          metrics=['acc', metric_fn])

            # Test fit, evaluate
            samples = 100
            x = np.random.random((samples, 2))
            y = np.random.randint(2, size=(samples, 1))
            val_samples = 10
            val_x = np.random.random((val_samples, 2))
            val_y = np.random.randint(2, size=(val_samples, 1))

            history = model.fit(x,
                                y,
                                epochs=1,
                                batch_size=10,
                                validation_data=(val_x, val_y))
            outs = model.evaluate(x, y, batch_size=10)
            preds = model.predict(x)

            def ref_true_pos(y_true, y_pred):
                return np.sum(np.logical_and(y_pred > 0.5, y_true == 1))

            # Test correctness (e.g. updates should have been run)
            self.assertAllClose(outs[2], ref_true_pos(y, preds), atol=1e-5)

            # Test correctness of the validation metric computation
            val_preds = model.predict(val_x)
            val_outs = model.evaluate(val_x, val_y, batch_size=10)
            self.assertAllClose(val_outs[2],
                                ref_true_pos(val_y, val_preds),
                                atol=1e-5)
            self.assertAllClose(val_outs[2],
                                history.history['val_true_positives'][-1],
                                atol=1e-5)

            # Test with generators
            gen = [(np.array([x0]), np.array([y0])) for x0, y0 in zip(x, y)]
            val_gen = [(np.array([x0]), np.array([y0]))
                       for x0, y0 in zip(val_x, val_y)]
            history = model.fit_generator(iter(gen),
                                          epochs=1,
                                          steps_per_epoch=samples,
                                          validation_data=iter(val_gen),
                                          validation_steps=val_samples)
            outs = model.evaluate_generator(iter(gen), steps=samples)
            preds = model.predict_generator(iter(gen), steps=samples)

            # Test correctness of the metric results
            self.assertAllClose(outs[2], ref_true_pos(y, preds), atol=1e-5)

            # Test correctness of the validation metric computation
            val_preds = model.predict_generator(iter(val_gen),
                                                steps=val_samples)
            val_outs = model.evaluate_generator(iter(val_gen),
                                                steps=val_samples)
            self.assertAllClose(val_outs[2],
                                ref_true_pos(val_y, val_preds),
                                atol=1e-5)
            self.assertAllClose(val_outs[2],
                                history.history['val_true_positives'][-1],
                                atol=1e-5)
예제 #3
0
  def test_stateful_metrics(self):
    with self.test_session():
      np.random.seed(1334)

      class BinaryTruePositives(layers.Layer):
        """Stateful Metric to count the total true positives over all batches.

        Assumes predictions and targets of shape `(samples, 1)`.

        Arguments:
            threshold: Float, lower limit on prediction value that counts as a
                positive class prediction.
            name: String, name for the metric.
        """

        def __init__(self, name='true_positives', **kwargs):
          super(BinaryTruePositives, self).__init__(name=name, **kwargs)
          self.true_positives = K.variable(value=0, dtype='int32')
          self.stateful = True

        def reset_states(self):
          K.set_value(self.true_positives, 0)

        def __call__(self, y_true, y_pred):
          """Computes the number of true positives in a batch.

          Args:
              y_true: Tensor, batch_wise labels
              y_pred: Tensor, batch_wise predictions

          Returns:
              The total number of true positives seen this epoch at the
                  completion of the batch.
          """
          y_true = math_ops.cast(y_true, 'int32')
          y_pred = math_ops.cast(math_ops.round(y_pred), 'int32')
          correct_preds = math_ops.cast(math_ops.equal(y_pred, y_true), 'int32')
          true_pos = math_ops.cast(
              math_ops.reduce_sum(correct_preds * y_true), 'int32')
          current_true_pos = self.true_positives * 1
          self.add_update(
              state_ops.assign_add(self.true_positives, true_pos),
              inputs=[y_true, y_pred])
          return current_true_pos + true_pos

      metric_fn = BinaryTruePositives()
      config = metrics.serialize(metric_fn)
      metric_fn = metrics.deserialize(
          config, custom_objects={'BinaryTruePositives': BinaryTruePositives})

      # Test on simple model
      inputs = layers.Input(shape=(2,))
      outputs = layers.Dense(1, activation='sigmoid')(inputs)
      model = Model(inputs, outputs)
      model.compile(optimizer='sgd',
                    loss='binary_crossentropy',
                    metrics=['acc', metric_fn])

      # Test fit, evaluate
      samples = 100
      x = np.random.random((samples, 2))
      y = np.random.randint(2, size=(samples, 1))
      val_samples = 10
      val_x = np.random.random((val_samples, 2))
      val_y = np.random.randint(2, size=(val_samples, 1))

      history = model.fit(x, y,
                          epochs=1,
                          batch_size=10,
                          validation_data=(val_x, val_y))
      outs = model.evaluate(x, y, batch_size=10)
      preds = model.predict(x)

      def ref_true_pos(y_true, y_pred):
        return np.sum(np.logical_and(y_pred > 0.5, y_true == 1))

      # Test correctness (e.g. updates should have been run)
      self.assertAllClose(outs[2], ref_true_pos(y, preds), atol=1e-5)

      # Test correctness of the validation metric computation
      val_preds = model.predict(val_x)
      val_outs = model.evaluate(val_x, val_y, batch_size=10)
      self.assertAllClose(
          val_outs[2], ref_true_pos(val_y, val_preds), atol=1e-5)
      self.assertAllClose(
          val_outs[2], history.history['val_true_positives'][-1], atol=1e-5)

      # Test with generators
      gen = [(np.array([x0]), np.array([y0])) for x0, y0 in zip(x, y)]
      val_gen = [(np.array([x0]), np.array([y0]))
                 for x0, y0 in zip(val_x, val_y)]
      history = model.fit_generator(iter(gen),
                                    epochs=1,
                                    steps_per_epoch=samples,
                                    validation_data=iter(val_gen),
                                    validation_steps=val_samples)
      outs = model.evaluate_generator(iter(gen), steps=samples)
      preds = model.predict_generator(iter(gen), steps=samples)

      # Test correctness of the metric results
      self.assertAllClose(outs[2], ref_true_pos(y, preds), atol=1e-5)

      # Test correctness of the validation metric computation
      val_preds = model.predict_generator(iter(val_gen), steps=val_samples)
      val_outs = model.evaluate_generator(iter(val_gen), steps=val_samples)
      self.assertAllClose(
          val_outs[2], ref_true_pos(val_y, val_preds), atol=1e-5)
      self.assertAllClose(
          val_outs[2], history.history['val_true_positives'][-1], atol=1e-5)
예제 #4
0
def run(model):
    # Download kitti dataset
    build_data.maybe_download_training_img(DATA_DIRECTORY)

    x, y = build_data.get_data(TRAINING_DATA_DIRECTORY, IMAGE_SHAPE)

    if model is None:
        inputs = Input(shape=(IMAGE_SHAPE[0], IMAGE_SHAPE[1], 3))

        # Block 1
        block1_conv1 = Conv2D(64, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block1_conv1')(inputs)
        block1_conv2 = Conv2D(64, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block1_conv2')(block1_conv1)
        block1_pool = MaxPooling2D((2, 2), strides=(2, 2),
                                   name='block1_pool')(block1_conv2)

        # Block 2
        block2_conv1 = Conv2D(128, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block2_conv1')(block1_pool)
        block2_conv2 = Conv2D(128, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block2_conv2')(block2_conv1)
        block2_pool = MaxPooling2D((2, 2), strides=(2, 2),
                                   name='block2_pool')(block2_conv2)

        # Block 3
        block3_conv1 = Conv2D(256, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block3_conv1')(block2_pool)
        block3_conv2 = Conv2D(256, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block3_conv2')(block3_conv1)
        block3_conv3 = Conv2D(256, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block3_conv3')(block3_conv2)
        block3_pool = MaxPooling2D((2, 2), strides=(2, 2),
                                   name='block3_pool')(block3_conv3)

        # Block 4
        block4_conv1 = Conv2D(512, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block4_conv1')(block3_pool)
        block4_conv2 = Conv2D(512, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block4_conv2')(block4_conv1)
        block4_conv3 = Conv2D(512, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block4_conv3')(block4_conv2)
        block4_pool = MaxPooling2D((2, 2), strides=(2, 2),
                                   name='block4_pool')(block4_conv3)

        # Block 5
        block5_conv1 = Conv2D(512, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block5_conv1')(block4_pool)
        block5_conv2 = Conv2D(512, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block5_conv2')(block5_conv1)
        block5_conv3 = Conv2D(512, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block5_conv3')(block5_conv2)
        block5_pool = MaxPooling2D((2, 2), strides=(2, 2),
                                   name='block5_pool')(block5_conv3)

        pool5_conv1x1 = Conv2D(2, (1, 1), activation='relu',
                               padding='same')(block5_pool)
        upsample_1 = Conv2DTranspose(2,
                                     kernel_size=(4, 4),
                                     strides=(2, 2),
                                     padding="same")(pool5_conv1x1)

        pool4_conv1x1 = Conv2D(2, (1, 1), activation='relu',
                               padding='same')(block4_pool)
        add_1 = Add()([upsample_1, pool4_conv1x1])

        upsample_2 = Conv2DTranspose(2,
                                     kernel_size=(4, 4),
                                     strides=(2, 2),
                                     padding="same")(add_1)
        pool3_conv1x1 = Conv2D(2, (1, 1), activation='relu',
                               padding='same')(block3_pool)
        add_2 = Add()([upsample_2, pool3_conv1x1])

        upsample_3 = Conv2DTranspose(2,
                                     kernel_size=(16, 16),
                                     strides=(8, 8),
                                     padding="same")(add_2)
        output = Dense(2, activation='softmax')(upsample_3)

        model = Model(inputs, output, name='multinet_seg')

        adam = Adam(lr=LEARNING_RATE)
        model.compile(loss='categorical_crossentropy',
                      optimizer=adam,
                      metrics=['accuracy'])

    model.fit(x, y, batch_size=BATCH_SIZE, epochs=EPOCHS)
    model.save('trained_model/trained_model' + str(time.time()) + '.h5')
예제 #5
0
def data_pre(data):
    # 得到标签
    label = [[i] * len(data[i]) for i in range(len(data))][0]
    label = to_categorical(label)
    # 切词
    context = []
    for i in data:
        for j in i:
            context.append(jieba.lcut(j))

    # 构建词典
    tokenizer = Tokenizer(num_words=20000)
    tokenizer.fit_on_texts(context)

    train_tags_title = tokenizer.texts_to_sequences(context)
    train_tags_title_preprocessed = pad_sequences(train_tags_title,
                                                  maxlen=45,
                                                  padding='post')

    # 预训练词向量
    # embedding_matrix = np.zeros((278028, 30), dtype=np.float32)
    # f = open('wiki.zh.text.vector', encoding='utf-8')
    # f = f.readlines()
    # for text in f:
    #     text = text.split()
    #     if text[0] in context:
    #         embedding_matrix[context[text[0]]] = text[1:]

    # 模型
    x_1 = Input(shape=(45, ))  # 输入数据维度
    embed_1 = Embedding(input_dim=45,
                        output_dim=45)(x_1)  # 将索引值转化为稠密向量,且只能做第一层
    L_1 = (LSTM(64))(embed_1)  # 第一个括号构建一个层 64是输出空间的维度,第二个括号用该层做计算
    L_1 = Dropout(0.5)(L_1)  # 防止过拟合,0.5在这里是需要丢弃的输入比例
    L_1 = Dense(9, activation='softmax')(L_1)  # 3是输出空间维度
    model_one = Model(x_1, L_1)  # x_1输入,L_1输出
    model_one.compile(loss='categorical_crossentropy',
                      optimizer='rmsprop',
                      metrics=['acc'])  # 'binary_crossentropy'
    history = model_one.fit(train_tags_title_preprocessed,
                            label,
                            batch_size=512,
                            epochs=20,
                            validation_split=0.1,
                            shuffle=True)
    # 汇总acc函数历史数据
    plt.plot(history.history['acc'])
    plt.plot(history.history['val_acc'])
    plt.title('model acc')
    plt.ylabel('accuracy')
    plt.xlabel('epoch')
    plt.legend(['train', 'val'], loc='upper left')
    plt.show()
    # 汇总损失函数历史数据
    plt.plot(history.history['loss'])
    plt.plot(history.history['val_loss'])
    plt.title('model loss')
    plt.ylabel('loss')
    plt.xlabel('epoch')
    plt.legend(['train', 'val'], loc='upper left')
    plt.show()
예제 #6
0
y_train = np_utils.to_categorical(y_train, NUM_CLASSES)
y_test = np_utils.to_categorical(y_test, NUM_CLASSES)

X_train = X_train.astype("float") / 255.0
X_test = X_test.astype("float") / 255.0

# モデルの定義
model = VGG16(weights='imagenet',
              include_top=False,
              input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3))

top_model = Sequential()
top_model.add(Flatten(input_shape=model.output_shape[1:]))
top_model.add(Dense(256, activation='relu'))
top_model.add(Dropout(0.5))
top_model.add(Dense(NUM_CLASSES, activation='softmax'))

model = Model(inputs=model.input, outputs=top_model(model.output))

for layer in model.layers[:15]:
    layer.trainable = False

opt = Adam(lr=0.0001)
model.compile(loss="categorical_crossentropy",
              optimizer=opt,
              metrics=['accuracy'])
model.fit(X_train, y_train, batch_size=16, epochs=5)
results = model.evaluate(X_test, y_test, batch_size=16)
print("test loss, test acc:", results)
model.save("./AnimalJudgmentModel.h5")
예제 #7
0
파일: chessModel.py 프로젝트: egusev/Zebra
class ChessModel:
    """
    The model which can be trained to take observations of a game of chess
    and return value and policy predictions. Inpired by https://github.com/Zeta36/chess-alpha-zero/blob/master/src/chess_zero/agent/model_chess.py

    Attributes:
        :ivar Config config: configuration to use
        :ivar Model model: the Keras model to use for predictions
    """
    def __init__(self, config):
        self.config = config
        self.model = None  # type: Model
        self.digest = None
        self.api = None

    def build(self):
        """
        Builds the full Keras model and stores it in self.model.
        """
        mc = self.config
        in_x = x = Input((12, 8, 8))

        # (batch, channels, height, width)
        x = Conv2D(filters=mc.cnn_filter_num,
                   kernel_size=mc.cnn_first_filter_size,
                   padding="same",
                   data_format="channels_first",
                   use_bias=False,
                   kernel_regularizer=l2(mc.l2_reg),
                   name="input_conv-" + str(mc.cnn_first_filter_size) + "-" +
                   str(mc.cnn_filter_num))(x)
        x = BatchNormalization(axis=1, name="input_batchnorm")(x)
        x = Activation("relu", name="input_relu")(x)

        for i in range(mc.res_layer_num):
            x = self._build_residual_block(x, i + 1)

        res_out = x

        # for policy output
        x = Conv2D(filters=2,
                   kernel_size=1,
                   data_format="channels_first",
                   use_bias=False,
                   kernel_regularizer=l2(mc.l2_reg),
                   name="policy_conv-1-2")(res_out)
        x = BatchNormalization(axis=1, name="policy_batchnorm")(x)
        x = Activation("relu", name="policy_relu")(x)
        x = Flatten(name="policy_flatten")(x)

        policy_out = Dense(self.config.n_labels,
                           kernel_regularizer=l2(mc.l2_reg),
                           activation="softmax",
                           name="policy_out")(x)

        # for value output
        x = Conv2D(filters=4,
                   kernel_size=1,
                   data_format="channels_first",
                   use_bias=False,
                   kernel_regularizer=l2(mc.l2_reg),
                   name="value_conv-1-4")(res_out)
        x = BatchNormalization(axis=1, name="value_batchnorm")(x)
        x = Activation("relu", name="value_relu")(x)
        x = Flatten(name="value_flatten")(x)
        x = Dense(mc.value_fc_size,
                  kernel_regularizer=l2(mc.l2_reg),
                  activation="relu",
                  name="value_dense")(x)
        value_out = Dense(1,
                          kernel_regularizer=l2(mc.l2_reg),
                          activation="tanh",
                          name="value_out")(x)

        self.model = Model(in_x, [policy_out, value_out], name="chess_model")

    def _build_residual_block(self, x, index):
        # mc = self.config.model
        mc = self.config

        in_x = x
        res_name = "res" + str(index)
        x = Conv2D(filters=mc.cnn_filter_num,
                   kernel_size=mc.cnn_filter_size,
                   padding="same",
                   data_format="channels_first",
                   use_bias=False,
                   kernel_regularizer=l2(mc.l2_reg),
                   name=res_name + "_conv1-" + str(mc.cnn_filter_size) + "-" +
                   str(mc.cnn_filter_num))(x)
        x = BatchNormalization(axis=1, name=res_name + "_batchnorm1")(x)
        x = Activation("relu", name=res_name + "_relu1")(x)
        x = Conv2D(filters=mc.cnn_filter_num,
                   kernel_size=mc.cnn_filter_size,
                   padding="same",
                   data_format="channels_first",
                   use_bias=False,
                   kernel_regularizer=l2(mc.l2_reg),
                   name=res_name + "_conv2-" + str(mc.cnn_filter_size) + "-" +
                   str(mc.cnn_filter_num))(x)
        x = BatchNormalization(axis=1,
                               name="res" + str(index) + "_batchnorm2")(x)
        x = Add(name=res_name + "_add")([in_x, x])
        x = Activation("relu", name=res_name + "_relu2")(x)
        return x

    def compile(self, optimizer, loss, metrics, loss_weights=None):
        self.model.compile(optimizer=optimizer,
                           loss=loss,
                           metrics=metrics,
                           loss_weights=loss_weights)
        return self.model

    def fit(self,
            dataset,
            y=None,
            validation_data=None,
            batch_size=None,
            epochs=10,
            shuffle=True,
            val_split=None,
            callbacks=None):
        self.model.fit(x=dataset,
                       y=y,
                       batch_size=batch_size,
                       epochs=epochs,
                       shuffle=True,
                       validation_split=val_split,
                       validation_data=validation_data,
                       callbacks=callbacks)
        return self.model

    def predict(self,
                x,
                batch_size=None,
                steps=None,
                callbacks=None,
                max_queue_size=10,
                workers=1,
                use_multiprocessing=False):
        value = self.model.predict(x=x,
                                   batch_size=batch_size,
                                   steps=steps,
                                   callbacks=callbacks,
                                   max_queue_size=max_queue_size,
                                   workers=workers,
                                   use_multiprocessing=use_multiprocessing)
        return value

    def summary(self):
        self.model.summary(line_length=None, positions=None, print_fn=None)
    def run(self, ctx, exa, train: bool):
        session_config = tf.ConfigProto(allow_soft_placement=True,
                                        log_device_placement=False)
        session = tf.Session(config=session_config)
        tf.keras.backend.set_session(session)

        config = self.read_config(exa)
        batch_size = config["batch_size"]
        epochs = config["epochs"]
        steps_per_epoch = ctx.size() // batch_size
        use_cache = config["use_cache"]
        load_path = None
        if "model_load_bucketfs_path" in config:
            load_path = config["model_load_bucketfs_path"]
        save_url = None
        if "model_save_bucketfs_url" in config:
            save_url = config["model_save_bucketfs_url"]
        save_path = config["model_temporary_save_path"]
        dataset = DatasetUtils().create_generator_dataset(
            ctx, epochs, batch_size, use_cache, exa.meta.input_columns)

        with tf.device(config["device"]):
            input_columns, keras_inputs, preprocessed_keras_inputs = \
                ColumnEncoder().generate_inputs(
                    exa.meta.input_columns, config["columns"])
            table_network = self.create_table_network(
                preprocessed_keras_inputs)
            output_columns, keras_outputs, losses, loss_weights, output_metrics = \
                ColumnEncoder().generate_outputs(
                    exa.meta.input_columns, table_network, config["columns"])
            session.run(tf.tables_initializer())

            dataset = DatasetUtils().create_dataset(dataset, input_columns,
                                                    output_columns, batch_size,
                                                    use_cache)

            session.run(tf.global_variables_initializer())
            session.run(tf.local_variables_initializer())

            dataset_iterator = dataset.make_initializable_iterator()
            session.run(dataset_iterator.initializer)

            saver = tf.train.Saver(max_to_keep=1, save_relative_paths=True)
            print("load_path", load_path, flush=True)
            if load_path is not None and load_path != "":
                initial_epoch = Utils().restore_model_and_get_inital_epoch(
                    session, saver, load_path + "/checkpoints/tmp/save")
            else:
                initial_epoch = 0
            callbacks = Utils().create_callbacks(session, saver, save_path)

            model = Model(inputs=keras_inputs, outputs=keras_outputs)
            profile = config["profile"]
            profile_model_options = Utils().add_profiler(
                callbacks, profile, session, save_path)
            print(output_metrics, flush=True)
            model.compile(optimizer='rmsprop',
                          loss=losses,
                          loss_weights=loss_weights,
                          metrics=output_metrics,
                          **profile_model_options)
            print(model.summary(), flush=True)

            if train:
                print("Starting training", flush=True)
                history = model.fit(dataset_iterator,
                                    steps_per_epoch=steps_per_epoch,
                                    epochs=initial_epoch + epochs,
                                    verbose=2,
                                    callbacks=callbacks,
                                    initial_epoch=initial_epoch)
                ctx.emit(str(history.history))
                print("save_url", save_url, flush=True)
                if save_url != "" and save_url is not None:
                    tarfile = f"/tmp/save"
                    os.makedirs(tarfile, exist_ok=True)
                    self.tar_save(save_path, tarfile)
                    self.upload_save(save_url, tarfile)

            else:
                print("Starting prediction", flush=True)
                for i in range(steps_per_epoch):
                    print(f"Predicting Batch {i}/steps_per_epoch", flush=True)
                    output = model.predict(dataset_iterator, steps=1)
                    ctx.emit(output)
예제 #9
0
class ChessModel:
    """
    The model which can be trained to take observations of a game of chess and return value and policy
    predictions.

    Attributes:
        :ivar Config config: configuration to use
        :ivar Model model: the Keras model to use for predictions
        :ivar digest: basically just a hash of the file containing the weights being used by this model
        :ivar ChessModelAPI api: the api to use to listen for and then return this models predictions (on a pipe).
    """
    def __init__(self, config):
        self.config = config
        self.model = None  # type: Model
        self.digest = None
        self.api = None

    def build(self):
        """
        Builds the full Keras model and stores it in self.model.
        """
        mc = self.config
        in_x = x = Input((12, 8, 8))

        # (batch, channels, height, width)
        x = Conv2D(filters=mc.cnn_filter_num,
                   kernel_size=mc.cnn_first_filter_size,
                   padding="same",
                   data_format="channels_first",
                   use_bias=False,
                   kernel_regularizer=l2(mc.l2_reg),
                   name="input_conv-" + str(mc.cnn_first_filter_size) + "-" +
                   str(mc.cnn_filter_num))(x)
        x = BatchNormalization(axis=1, name="input_batchnorm")(x)
        x = Activation("relu", name="input_relu")(x)

        for i in range(mc.res_layer_num):
            x = self._build_residual_block(x, i + 1)

        res_out = x

        # for value output
        x = Conv2D(filters=4,
                   kernel_size=1,
                   data_format="channels_first",
                   use_bias=False,
                   kernel_regularizer=l2(mc.l2_reg),
                   name="value_conv-1-4")(res_out)
        x = BatchNormalization(axis=1, name="value_batchnorm")(x)
        x = Activation("relu", name="value_relu")(x)
        x = Flatten(name="value_flatten")(x)
        x = Dense(mc.value_fc_size,
                  kernel_regularizer=l2(mc.l2_reg),
                  activation="relu",
                  name="value_dense")(x)
        value_out = Dense(1,
                          kernel_regularizer=l2(mc.l2_reg),
                          activation="tanh",
                          name="value_out")(x)

        self.model = Model(in_x, [value_out], name="chess_model")

    def _build_residual_block(self, x, index):
        # mc = self.config.model
        mc = self.config

        in_x = x
        res_name = "res" + str(index)
        x = Conv2D(filters=mc.cnn_filter_num,
                   kernel_size=mc.cnn_filter_size,
                   padding="same",
                   data_format="channels_first",
                   use_bias=False,
                   kernel_regularizer=l2(mc.l2_reg),
                   name=res_name + "_conv1-" + str(mc.cnn_filter_size) + "-" +
                   str(mc.cnn_filter_num))(x)
        x = BatchNormalization(axis=1, name=res_name + "_batchnorm1")(x)
        x = Activation("relu", name=res_name + "_relu1")(x)
        x = Conv2D(filters=mc.cnn_filter_num,
                   kernel_size=mc.cnn_filter_size,
                   padding="same",
                   data_format="channels_first",
                   use_bias=False,
                   kernel_regularizer=l2(mc.l2_reg),
                   name=res_name + "_conv2-" + str(mc.cnn_filter_size) + "-" +
                   str(mc.cnn_filter_num))(x)
        x = BatchNormalization(axis=1,
                               name="res" + str(index) + "_batchnorm2")(x)
        x = Add(name=res_name + "_add")([in_x, x])
        x = Activation("relu", name=res_name + "_relu2")(x)
        return x

    def compile(self, optimizer, loss, metrics):
        self.model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
        return self.model

    def fit(self, dataset, batch_size, epochs, shuffle, validation_split,
            validation_data, callbacks):
        self.model.fit(x=dataset,
                       batch_size=batch_size,
                       epochs=epochs,
                       shuffle=True,
                       validation_split=validation_split,
                       validation_data=validation_data,
                       callbacks=callbacks)
        return self.model

    def predict(self,
                x,
                batch_size=None,
                steps=None,
                callbacks=None,
                max_queue_size=10,
                workers=1,
                use_multiprocessing=False):
        value = self.model.predict(x=x,
                                   batch_size=batch_size,
                                   steps=steps,
                                   callbacks=callbacks,
                                   max_queue_size=max_queue_size,
                                   workers=workers,
                                   use_multiprocessing=use_multiprocessing)
        return value