コード例 #1
0
def build_transformer_model_for_pretraining():
    """构建训练模型,通用于TPU/GPU
    注意全程要用keras标准的层写法,一些比较灵活的“移花接木”式的
    写法可能会在TPU上训练失败。此外,要注意的是TPU并非支持所有
    tensorflow算子,尤其不支持动态(变长)算子,因此编写相应运算
    时要格外留意。
    """
    bert, train_model, loss = build_transformer_model_with_mlm()

    # 优化器
    optimizer = extend_with_weight_decay(Adam)
    if which_optimizer == 'lamb':
        optimizer = extend_with_layer_adaptation(optimizer)
    optimizer = extend_with_piecewise_linear_lr(optimizer)
    optimizer_params = {
        'learning_rate': learning_rate,
        'lr_schedule': lr_schedule,
        'weight_decay_rate': weight_decay_rate,
        'exclude_from_weight_decay': exclude_from_weight_decay,
        'bias_correction': False,
    }
    if grad_accum_steps > 1:
        optimizer = extend_with_gradient_accumulation(optimizer)
        optimizer_params['grad_accum_steps'] = grad_accum_steps
    optimizer = optimizer(**optimizer_params)

    # 模型定型
    train_model.compile(loss=loss, optimizer=optimizer)

    # 如果传入权重,则加载。注:须在此处加载,才保证不报错。
    if checkpoint_path is not None:
        bert.load_weights_from_checkpoint(checkpoint_path)

    return train_model
コード例 #2
0
ファイル: model.py プロジェクト: 4AI/AGN
    def build(self):
        bert_model, _ = load_bert(
            config_path=os.path.join(self.config['pretrained_model_dir'],
                                     'bert_config.json'),
            checkpoint_path=os.path.join(self.config['pretrained_model_dir'],
                                         'bert_model.ckpt'),
        )
        text_mask = L.Lambda(
            lambda x: K.cast(K.expand_dims(K.greater(x, 0), 2), K.floatx()))(
                bert_model.input[0])
        # GI
        gi_in = L.Input(name="gi",
                        shape=(self.config["max_len"], ),
                        dtype="float32")
        gi = gi_in

        # AGN
        X = bert_model.output
        gi = L.Dense(self.config['max_len'], activation='tanh')(gi)  # (B, L)
        gi = L.Lambda(lambda x: K.expand_dims(x, 2))(gi)  # (B, L, 1)
        X, attn_weight = AGN(epsilon=self.config['epsilon'])([X, gi])
        X = L.Lambda(lambda x: x[0] - 1e10 * (1.0 - x[1]))([X, text_mask])
        output = L.Lambda(lambda x: K.max(x, 1))(X)
        #output = L.Dense(128, activation='relu')(output)
        output = L.Dropout(self.config.get('dropout', 0.2))(output)
        output = L.Dense(self.config['output_size'],
                         activation='softmax')(output)
        self.model = keras.Model(inputs=(*bert_model.input, gi_in),
                                 outputs=output)
        self.attn_model = keras.Model(inputs=(*bert_model.input, gi_in),
                                      outputs=attn_weight)

        optimizer = extend_with_weight_decay(Adam)
        optimizer = extend_with_piecewise_linear_lr(optimizer)
        optimizer_params = {
            'learning_rate': self.config['learning_rate'],
            'lr_schedule': {
                self.config['steps_per_epoch'] * 2: 1,
                self.config['steps_per_epoch'] * 3: 0.2,
                self.config['steps_per_epoch'] * self.config['epochs']: 0.1
            },
            'weight_decay_rate': 0.01,
            'exclude_from_weight_decay': ['Norm', 'bias'],
            'bias_correction': False,
        }

        self.model.compile(
            loss='sparse_categorical_crossentropy',
            optimizer=optimizer(**optimizer_params),
        )
        self.model.summary()

        if self.config.get('apply_fgm', True):
            print('apply fgm')
            fgm(self.model, 'Embedding-Token',
                self.config.get('fgm_epsilon', 0.2))
コード例 #3
0
def get_suggested_optimizer(init_lr=5e-5, total_steps=None):
    lr_schedule = {1000: 1, 10000: 0.01}
    if total_steps is not None:
        lr_schedule = {total_steps // 10: 1, total_steps: 0.1}
    optimizer = extend_with_weight_decay(Adam)
    optimizer = extend_with_piecewise_linear_lr(optimizer)
    optimizer_params = {
        'learning_rate': init_lr,
        'lr_schedule': lr_schedule,
        'weight_decay_rate': 0.01,
        'exclude_from_weight_decay': ['Norm', 'bias'],
        'bias_correction': False,
    }
    optimizer = optimizer(**optimizer_params)
    return optimizer
コード例 #4
0
def build_train_bert_model():
    """构建训练模型,通用于TPU/GPU
    注意全程要用keras标准的层写法,一些比较灵活的“移花接木”式的
    写法可能会在TPU上训练失败。此外,要注意的是TPU并非支持所有
    tensorflow算子,尤其不支持动态(变长)算子,因此编写相应运算
    时要格外留意。
    """
    bert = build_bert_model(config_path,
                            with_mlm='linear',
                            application='lm',
                            return_keras_model=False)
    token_ids = bert.model.input[0]
    proba = bert.model.output

    def lm_loss(inputs):
        """计算loss的函数,需要封装为一个层
        """
        y_true, y_pred, mask = inputs
        y_true = y_true[:, 1:]
        y_pred = y_pred[:, :-1]
        mask = mask[:, 1:]
        loss = K.sparse_categorical_crossentropy(y_true,
                                                 y_pred,
                                                 from_logits=True)
        loss = K.sum(loss * mask) / (K.sum(mask) + K.epsilon())
        return loss

    def lm_acc(inputs):
        """计算准确率的函数,需要封装为一个层
        """
        y_true, y_pred, mask = inputs
        y_true = K.cast(y_true, K.floatx())
        y_true = y_true[:, 1:]
        y_pred = y_pred[:, :-1]
        mask = mask[:, 1:]
        acc = keras.metrics.sparse_categorical_accuracy(y_true, y_pred)
        acc = K.sum(acc * mask) / (K.sum(mask) + K.epsilon())
        return acc

    mask = bert.model.get_layer('Sequence-Mask').output
    loss = Lambda(lm_loss, name='lm_loss')([token_ids, proba, mask])
    acc = Lambda(lm_acc, name='lm_acc')([token_ids, proba, mask])

    train_model = Model(bert.model.inputs, [loss, acc])

    # 优化器
    optimizer = extend_with_weight_decay(Adam)
    if which_optimizer == 'lamb':
        optimizer = extend_with_layer_adaptation(optimizer)
    optimizer = extend_with_piecewise_linear_lr(optimizer)
    optimizer_params = {
        'learning_rate': learning_rate,
        'lr_schedule': lr_schedule,
        'weight_decay_rate': weight_decay_rate,
        'exclude_from_weight_decay': exclude_from_weight_decay,
        'bias_correction': False,
    }
    if grad_accum_steps > 1:
        optimizer = extend_with_gradient_accumulation(optimizer)
        optimizer_params['grad_accum_steps'] = grad_accum_steps
    optimizer = optimizer(**optimizer_params)

    # 模型定型
    train_model.compile(
        loss={
            'lm_loss': lambda y_true, y_pred: y_pred,
            'lm_acc': lambda y_true, y_pred: K.stop_gradient(y_pred),
        },
        optimizer=optimizer,
    )

    # 如果传入权重,则加载。注:须在此处加载,才保证不报错。
    if checkpoint_path is not None:
        bert.load_weights_from_checkpoint(checkpoint_path)

    return train_model
コード例 #5
0
ファイル: train.py プロジェクト: charlesXu86/Chatbot_S2S
model = build_transformer_model(
    config_path,
    checkpoint_path,
    model='nezha',
    application='lm',
    keep_tokens=keep_tokens,  # 只保留keep_tokens中的字,精简原字表
    compound_tokens=compound_tokens,  # 要扩充的词表
)

output = CrossEntropy(1)([model.inputs[0], model.outputs[0]])

model = Model(model.inputs, output)
model.summary()

AdamW = extend_with_weight_decay(Adam, 'AdamW')
AdamWG = extend_with_gradient_accumulation(AdamW, 'AdamWG')
optimizer = AdamWG(learning_rate=2e-5,
                   weight_decay_rate=0.01,
                   exclude_from_weight_decay=['Norm', 'bias'],
                   grad_accum_steps=16)
model.compile(optimizer=optimizer)


class ChatBot(AutoRegressiveDecoder):
    """基于随机采样对话机器人
    """
    @AutoRegressiveDecoder.wraps(default_rtype='probas')
    def predict(self, inputs, output_ids, states):
        token_ids, segment_ids = inputs
        token_ids = np.concatenate([token_ids, output_ids], 1)
コード例 #6
0
ファイル: train.py プロジェクト: zhurui-xiaozhuzaizai/WoBERT
model = build_transformer_model(
    config_path,
    checkpoint_path,
    with_mlm='linear',
    keep_tokens=keep_tokens,  # 只保留keep_tokens中的字,精简原字表
    compound_tokens=compound_tokens,  # 增加词,用字平均来初始化
)

# 训练用模型
y_in = keras.layers.Input(shape=(None, ))
outputs = CrossEntropy(1)([y_in, model.output])

train_model = keras.models.Model(model.inputs + [y_in], outputs)

AdamW = extend_with_weight_decay(Adam, name='AdamW')
AdamWG = extend_with_gradient_accumulation(AdamW, name='AdamWG')
optimizer = AdamWG(
    learning_rate=5e-6,
    weight_decay_rate=0.01,
    exclude_from_weight_decay=['Norm', 'bias'],
    grad_accum_steps=16,
)
train_model.compile(optimizer=optimizer)
train_model.summary()


class Evaluator(keras.callbacks.Callback):
    """训练回调
    """
    def on_epoch_end(self, epoch, logs=None):
コード例 #7
0
ファイル: pretraining.py プロジェクト: will-wiki/bert4keras
def build_train_bert_model():
    """构建训练模型,通用于TPU/GPU
    注意全程要用keras标准的层写法,一些比较灵活的“移花接木”式的
    写法可能会在TPU上训练失败。此外,要注意的是TPU并非支持所有
    tensorflow算子,尤其不支持动态(变长)算子,因此编写相应运算
    时要格外留意。
    """
    bert = build_bert_model(config_path, with_mlm='linear', return_keras_model=False)
    bert_model = bert.model
    proba = bert_model.output

    # 辅助输入
    token_ids = Input(shape=(None, ), dtype='int64', name='token_ids') # 目标id
    is_masked = Input(shape=(None, ), dtype='bool', name='is_masked') # mask标记

    def mlm_loss(inputs):
        """计算loss的函数,需要封装为一个层
        """
        y_true, y_pred, is_masked = inputs
        is_masked = K.cast(is_masked, K.floatx())
        loss = K.sparse_categorical_crossentropy(y_true, y_pred, from_logits=True)
        loss = K.sum(loss * is_masked) / (K.sum(is_masked) + K.epsilon())
        return loss

    def mlm_acc(inputs):
        """计算准确率的函数,需要封装为一个层
        """
        y_true, y_pred, is_masked = inputs
        is_masked = K.cast(is_masked, K.floatx())
        y_true = K.cast(y_true, K.floatx())
        acc = keras.metrics.sparse_categorical_accuracy(y_true, y_pred)
        acc = K.sum(acc * is_masked) / (K.sum(is_masked) + K.epsilon())
        return acc

    loss = Lambda(mlm_loss, name='mlm_loss')([token_ids, proba, is_masked])
    acc = Lambda(mlm_acc, name='mlm_acc')([token_ids, proba, is_masked])

    train_model = Model(bert_model.inputs + [token_ids, is_masked], [loss, acc])

    # 优化器
    optimizer = extend_with_weight_decay(Adam)
    if which_optimizer == 'lamb':
        optimizer = extend_with_layer_adaptation(optimizer)
    optimizer = extend_with_piecewise_linear_lr(optimizer)
    optimizer_params = {
        'learning_rate': learning_rate,
        'lr_schedule': lr_schedule,
        'weight_decay_rate': weight_decay_rate,
        'exclude_from_weight_decay': exclude_from_weight_decay,
        'bias_correction': False,
    }
    if grad_accum_steps > 1:
        optimizer = extend_with_gradient_accumulation(optimizer)
        optimizer_params['grad_accum_steps'] = grad_accum_steps
    optimizer = optimizer(**optimizer_params)

    # 模型定型
    train_model.compile(
        loss={
            'mlm_loss': lambda y_true, y_pred: y_pred,
            'mlm_acc': lambda y_true, y_pred: K.stop_gradient(y_pred),
        },
        optimizer=optimizer,
    )

    # 如果传入权重,则加载。注:须在此处加载,才保证不报错。
    if checkpoint_path is not None:
        bert.load_weights_from_checkpoint(checkpoint_path)

    return train_model