示例#1
0
dense = ScaleDense(lr_multiplier=5,
                   units=num_classes,
                   activation='softmax',
                   weights=predecessor_model.layers[-1].get_weights())
output = dense(output)

predecessor_3_model = Model(predecessor_model.inputs, output)
predecessor_3_model.compile(
    loss='sparse_categorical_crossentropy',
    optimizer=Adam(1e-5),  # 用足够小的学习率
    metrics=['sparse_categorical_accuracy'],
)
predecessor_3_model.summary()

if __name__ == '__main__':
    epochs = 5
    # 训练predecessor
    predecessor_evaluator = Evaluator('best_predecessor.weights')
    predecessor_model.fit_generator(train_generator.generator(),
                                    steps_per_epoch=len(train_generator),
                                    epochs=epochs,
                                    callbacks=[predecessor_evaluator])

    # 训练predecessor_3_model
    predecessor_model.load_weights('best_predecessor.weights')
    predecessor_3_evaluator = Evaluator('best_predecessor_3.weights')
    predecessor_3_model.fit_generator(train_generator.generator(),
                                      steps_per_epoch=len(train_generator),
                                      epochs=10,
                                      callbacks=[predecessor_3_evaluator])
示例#2
0
        trans = K.eval(CRF.trans)
        wordseg.trans = trans
        print(trans)
        acc = evaluate(val_data)

        if acc > self.best_acc:
            self.best_acc = acc
            model.save_weights('./best_model.weights')
        print('acc is: {:.3f}, best acc is :{:.4f}'.format(acc, self.best_acc))

    def on_train_end(self, logs=None):
        model.load_weights('./best_model.weights')
        public_evaluate(test_path, test_result_path, test_score_path)


opt = extend_with_gradient_accumulation(Adam)
opt = opt(learning_rate=lr)
model.compile(loss=CRF.sparse_loss,
              optimizer=opt,
              metrics=[CRF.sparse_accuracy])

if __name__ == '__main__':
    evaluator = Evaluator()
    train_genarator = data_generator(train_data, batch_size)
    model.fit_generator(train_genarator.generator(),
                        steps_per_epoch=len(train_genarator),
                        epochs=epochs,
                        callbacks=[evaluator])
else:
    model.load_weights('./best_model.weights')
        total += len(y_true)
        right += (y_true == y_pred).sum()
    print(total, right)
    return right / total


class Evaluator(keras.callbacks.Callback):
    def __init__(self, save_path='best_model.weights'):
        self.best_val_acc = 0.
        self.save_path = save_path

    def on_epoch_end(self, epoch, logs=None):
        val_acc = evaluate()
        if val_acc > self.best_val_acc:
            self.best_val_acc = val_acc
            self.model.save_weights(self.save_path)

        print('current acc :{}, best val acc: {}'.format(
            val_acc, self.best_val_acc))


if __name__ == '__main__':
    evaluator = Evaluator()
    train_model.fit_generator(train_generator.generator(),
                              steps_per_epoch=len(train_generator),
                              epochs=5,
                              callbacks=[evaluator])

else:
    classifier.load_weights('best_model.weights')
    total, right = 0., 0.
    for x_true, y_true in tqdm(data):
        y_pred = model.predict(x_true).argmax(axis=1)
        y_true = y_true[:, 0]
        total += len(y_true)
        right += (y_true == y_pred).sum()

    return right / total


class Evaluator(keras.callbacks.Callback):
    def __init__(self):
        self.best_acc = 0.

    def on_epoch_end(self, epoch, logs=None):
        acc = evaluate(val_generator)
        if acc > self.best_acc:
            self.best_acc = acc
            self.model.save_weights('best_baseline.weights')
        print('acc: {}, best acc: {}'.format(acc, self.best_acc))


if __name__ == '__main__':
    evaluator = Evaluator()
    model.fit_generator(train_generator.generator(),
                        steps_per_epoch=len(train_generator),
                        epochs=epochs,
                        callbacks=[evaluator])
else:
    model.load_weights('best_baseline.weights')
示例#5
0
        right += (y_true == y_pred).sum()
    return right / total


class Evaluator(keras.callbacks.Callback):
    def __init__(self, savename):
        self.best_val_acc = 0.
        self.savename = savename

    def on_epoch_end(self, epoch, logs=None):
        val_acc = evaluate(valid_generator, self.model)
        if val_acc > self.best_val_acc:
            self.best_val_acc = val_acc
            self.model.save_weights(self.savename)
        print(
            u'val_acc: %.5f, best_val_acc: %.5f\n' %
            (val_acc, self.best_val_acc)
        )


if __name__ == '__main__':
    evaluator = Evaluator('best_clf.weights')
    model.compile(loss='sparse_categorical_crossentropy', optimizer=Adam(1e-5), metrics=['acc'])
    model.fit_generator(train_sim_generator.generator(),
                        steps_per_epoch=len(train_sim_generator) * 2,
                        epochs=5,
                        callbacks=[evaluator]
                        )
else:
    model.load_weights('best_clf.weights')
                'answers': [{
                    'text': new_answer
                }]
            })
    paragraphs = []
    for context, qas in paras.items():
        paragraphs.append({'context': context, 'qas': qas})

    data = {'data': [{'paragraphs': paragraphs}]}

    with open(file_name, 'w') as f:
        json.dump(data, f)


if __name__ == '__main__':
    train_generator = data_generator(train_data + val_data, batch_size)
    evaluator = Evaluator()
    model.fit_generator(train_generator.generator(),
                        steps_per_epoch=len(train_generator),
                        epochs=epochs,
                        callbacks=[evaluator])

    # generate question and answer
    model.load_weights('question_answer_generation.weights')
    file_name = '/home/mingming.xu/datasets/NLP/qa/dureader_robust-data/train_qa_generator.json'
    generate_new_data(file_name)

else:
    model.load_weights('question_answer_generation.weights')
    file_name = '/home/mingming.xu/datasets/NLP/qa/dureader_robust-data/train_qa_generator.json'
    generate_new_data(file_name)
        if acc > self.best_acc:
            self.best_acc = acc
            self.model.save_weights('best_pet_model.weights')
        print('acc :{}, best acc:{}'.format(acc, self.best_acc))


def write_to_file(path):
    preds = []
    for x, _ in tqdm(test_generator):
        pred = predict(x)
        preds.extend(pred)

    ret = []
    for data, p in zip(test_data, preds):
        ret.append([data[0], data[2], str(p)])

    with open(path, 'w') as f:
        for r in ret:
            f.write('\t'.join(r) + '\n')


if __name__ == '__main__':
    evaluator = Evaluator()
    train_model.fit_generator(train_generator.generator(),
                              steps_per_epoch=len(train_generator),
                              epochs=10,
                              callbacks=[evaluator])

    train_model.load_weights('best_pet_model.weights')
    write_to_file('submission.tsv')