def predict(model_path, label, x): config, model, tokenizer = read_model(model_path, label) classifier = KerasClassifier(build_fn=create_model, epochs=config['epochs'], batch_size=config['batch_size'], verbose=True) classifier.model = model x = [tokenize(text) for text in x] x = pad_sequences(tokenizer.texts_to_sequences(x), padding=config['padding'], maxlen=config['max_len']) return model.predict(x)
def load_pipeline_keras() -> Pipeline: """Load a Keras Pipeline from disk.""" build_model = lambda: load_model(config.CLASSIFICATION_MODEL_PATH) classifier = KerasClassifier(build_fn=build_model, epochs=30, batch_size=200, validation_split=0.05, verbose=2, callbacks=m.clc_callbacks_list ) classifier.model = build_model() return Pipeline([ # ('dataset', dataset), ('lstm_model_classification', classifier) ])
def load_pipeline_keras(): dataset = joblib.load(config.PIPELINE_PATH) build_model = lambda: load_model(config.MODEL_PATH) classifier = KerasClassifier( build_fn=build_model, batch_size=config.BATCH_SIZE, validation_split=10, epochs=config.EPOCHS, verbose=2, callbacks=m.callbacks_list, #image_size = config.IMAGE_SIZE ) classifier.classes_ = joblib.load(config.CLASSES_PATH) classifier.model = build_model() return Pipeline([('dataset', dataset), ('cnn_model', classifier)])
with open(args.pipeline, 'rb') as f: pipeline = pickle.load(f) if args.keras: # extra code for rebuilding a KerasClassifier wrapper object # this is necessary because KerasClassifiers are not well suited for I/O # but scikit-multilearn handles KerasClassifiers better than native Keras models import h5py from keras.wrappers.scikit_learn import KerasClassifier from keras.models import load_model from ml_utils import create_keras_model # look for and load all Keras models found in the same directory as the pipeline pipeline_dir = os.path.dirname(args.pipeline) clf_filename = os.path.join(pipeline_dir, "model.h5") clf = KerasClassifier(create_keras_model) clf.model = load_model(clf_filename) with h5py.File(clf_filename, "r") as clf_h5: clf.classes_ = clf_h5.attrs["classes"] pipeline.classifier = clf if args.thresholds: pipeline.set_threshold( [float(tt) for tt in args.thresholds.split(',')]) print("Thresholds:", pipeline.threshold) if args.lyrics is not None: pipeline.classify_text(args.lyrics, verbose=True) else: while True: try: inp = input("\nEnter your lyrics:") except (KeyboardInterrupt, EOFError): break
app.config.from_pyfile('config.py') db = SQLAlchemy(app) # load model global model_wrapper MODEL_FOLDER = "ml_core/model/" MODEL_NAME = "cnn_model_training.h5" CLASS_NAME = "cnn_class_training.pkl" model_class = pickle.load(open(MODEL_FOLDER + CLASS_NAME, 'rb')) model_wrapper = KerasClassifier(build_fn=get_cnn_model, epochs=25, batch_size=6) model_wrapper.model = load_model(MODEL_FOLDER + MODEL_NAME, custom_objects={ "rec": rec, "prec": prec, "f1": f1 }) model_wrapper.classes_ = model_class model_wrapper.model.summary() # Define models roles_users = db.Table( 'roles_users', db.Column('user_id', db.Integer(), db.ForeignKey('user.id')), db.Column('role_id', db.Integer(), db.ForeignKey('role.id'))) class Role(db.Model, RoleMixin): id = db.Column(db.Integer(), primary_key=True) name = db.Column(db.String(80), unique=True)