Пример #1
0
 def load_resources(self):
     """
     Loads initial resources from disk to application
     """
     # model_experimental is better but undocumented
     self.keras_two_channel_model = load_model(
         self.get_resource("FRET_2C_keras_model.h5")
     )  # type: Model
     self.keras_three_channel_model = load_model(
         self.get_resource("FRET_3C_keras_model.h5")
     )  # type: Model
     self.config = ConfigObj(self.get_resource("config.ini"))
Пример #2
0
def start():
    cap = cv2.VideoCapture(0)
    cap.set(3, 650)
    cap.set(4, 400)
    time.sleep(2)

    model_saved = load_model("sdr_model.h5")
    results = ['bkl', 'mel', 'nv', 'working']

    while True:
        # Capture frame-by-frame
        ret, frame = cap.read()
        # save and load again to process
        if frame is not None:
            cv2.imwrite('live.jpg', frame)
            live = cv2.imread('live.jpg')
        # extract features and predict label of image
        img_sample, img_px = etl_one_img(live)
        # print(img_sample.shape)
        label = model_saved.predict_classes(img_sample)
        cv2.putText(frame, results[label[0]], (50, 50),
                    cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2, cv2.LINE_AA)
        # Display the resulting frame
        cv2.imshow('Video', frame)
        # time.sleep(10)
        if cv2.waitKey(30) & 0xFF == ord('q'):
            cap.release()
            cv2.destroyAllWindows()
 def __init__(self, model_path=None, output_path=''):
     self.model = None
     self.model_name = None
     self.model_output = output_path + '/qr_{name}_model_[e{epoch}]_[p{precision}]_' \
                         + str(datetime.now().date()) + '.h5'
     if model_path:
         self.model = load_model(model_path)
         self.model.summary()
Пример #4
0
    def train_model(self):
        self.model = load_model(self.model)
        print('Тренируем модель...')
        self.history = self.model.fit(self.X_train,
                                      self.y_train,
                                      batch_size=self.batch_size,
                                      epochs=self.epochs)

        self.model.save('../model/save/model_new.h5')
Пример #5
0
    def predict(self):
        self.model = load_model(self.model)
        self.model.predict(self.X_train)

        text_labels = self.encoder.classes_

        prediction = self.model.predict([self.X_train])
        predicted_label = text_labels[np.argmax(prediction)]

        return predicted_label
Пример #6
0
def cross_validate_tfidf():
    """ Referenced implementation: https://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html """
    X_test, y_test, results = getData(fold="all")

    # best_model = findBestModel(X_test, y_test, results)
    model = load_model("doc2vec_grid_search_results/1574409738.h5")

    y_predict = model.predict(X_test)
    fpr, tpr, auc = get_roc_auc(y_test, y_predict)
    plotROC(fpr, tpr, auc)
Пример #7
0
def main(input_file: str, output_file: str):
    extracted_jsdoc = extract.extract_from_file(input_file)
    df = convert.convert_func_to_df(extracted_jsdoc)
    word2vec_code = Word2Vec.load('data/word_vecs/word2vec_model_code.bin')
    word2vec_lang = Word2Vec.load('data/word_vecs/word2vec_model_language.bin')
    vectors = vectorize.df_to_vec(df, word2vec_lang, word2vec_code)
    model = load_model('data/model.h5')
    with open("data/types.json") as f:
        types_map = json.load(f)
    predictions = predict.predict(model, vectors, types_map)
    annotate.annotate(df, predictions, input_file, output_file)
Пример #8
0
def test_model_should_predict_correct_intent():
    input_str = [
        "hi, balu", "hola", "greetings", "show me my leave balance",
        "cancel my leaves", "thank you", "stupid you", "bye", "what can you do"
    ]
    intent_labels = [2, 2, 2, 4, 4, 3, 8, 10, 11]
    vectorizer = pickle.load(open(constants.VECTORIZER_PATH, 'rb'))
    encoded_matrix = vectorizer.transform(input_str).toarray()
    model = load_model(constants.MODEL_PATH)
    print('Encoded Matrix', encoded_matrix)
    result = model.predict(encoded_matrix)
    print('result', result)
    print('result', np.argmax(result, axis=1))
    assert np.sum(np.equal(np.argmax(result, axis=1),
                           np.array(intent_labels))) >= 8
Пример #9
0
    def train(self):
        dict_changes = {
            'Positive': np.array([1., 0., 0., 0., 0., 0.]),
            'Negative': np.array([0., 1., 0., 0., 0., 0.]),
            'Hotline': np.array([0., 0., 1., 0., 0., 0.]),
            'Hooligan': np.array([0., 0., 0., 1., 0., 0.]),
            'Offer': np.array([0., 0., 0., 0., 1., 0.]),
            'SiteAndCoins': np.array([0., 0., 0., 0., 0., 1.]),
        }
        y_train = []
        for x in range(6):
            y_train.append(dict_changes[self.category])

        self.model = load_model(self.model)
        self.model.fit(self.X_train, np.array(y_train), batch_size=1, epochs=1)
        self.model.save('../model/save/model_new.h5')
def retrain_model(file):
    train_x, train_y = load_data(
        "data/training.1600000.processed.noemoticon.csv")

    model = load_model(file)
    model.fit(train_x, train_y, epochs=1000, batch_size=100)

    evaluate_model(model)
    model.save(file)
    del model


#train_model()

# model = load_model('models/BiLSTM_Sentiment.h5')
# evaluate_model(model)
Пример #11
0
def findBestModel(X_test, y_test, results):
    # Find best model to work with against the 1st fold of data.
    best_model = ""
    most_AUC = 0
    for _, row in results.iterrows():
        file_name = r'grid_search_results/' + row['file_name']
        if not isfile(file_name):
            continue

        model = load_model(file_name)
        y_predict = model.predict(X_test)
        fpr, tpr, auc = get_roc_auc(y_test, y_predict)

        if most_AUC < sum(auc.values()):
            most_AUC = sum(auc.values())
            best_model = file_name
    return best_model
Пример #12
0
def test_model_should_predict_correct_intent():
    labels, sentences = load_dataset('./resources/labels.csv')
    label_map = dict(zip(sentences, labels))
    print(label_map)
    input = np.array([["hi", label_map['greet']], ["balu", label_map['greet']],
                      ["hola", label_map['greet']],
                      ["can i cancel?", label_map['leave_annual_cancel']],
                      ["greetings", label_map['greet']],
                      ["show me my leave balance", label_map['leave_budget']],
                      ["cancel my leaves", label_map['leave_annual_cancel']],
                      ["thank you", label_map['thanks']],
                      ["stupid you", label_map['insult']],
                      ["bye", label_map['goodbye']],
                      ["what can you do", label_map['skills']]])

    result_arr = []
    for i in range(0, 5):
        intent_labels = [int(x) for x in input[:, -1].flatten()]
        input_str = input[:, 0:1].flatten()
        print('inputs', input_str, intent_labels)
        vectorizer = pickle.load(open(constants.VECTORIZER_PATH, 'rb'))
        encoded_matrix = vectorizer.texts_to_sequences(input_str)
        encoded_matrix = pad_sequences(encoded_matrix,
                                       padding="post",
                                       maxlen=SENTENCE_MAX_LENGTH,
                                       truncating="post")
        model = load_model(constants.MODEL_PATH)
        result = model.predict(encoded_matrix)
        print(
            'result', np.argmax(result, axis=1), '/n final comparision ',
            np.sum(np.equal(np.argmax(result, axis=1),
                            np.array(intent_labels))))
        result_arr.append(
            np.sum(np.equal(np.argmax(result, axis=1), np.array(
                intent_labels))) >= 7)

    assert np.array(result_arr).sum() >= 4
    models = []

    # 학습데이터 생성
    train, rain = train2_Generator()
    train = np.array(train)
    rain = np.array(rain)
    x_train = train[:, :, :, :10]
    y_train = train[:, :, :, 14].reshape(-1, 40, 40, 1)

    # train set, test set 분리
    # 학습데이터를 더 많이 사용하기 위해 7 : 3 비율을 유지하지 않습니다
    # 10%
    x_train, x_test, y_train, y_test = train_test_split(x_train, y_train, test_size=0.02, random_state=0)

    # 교차검증 학습
    train_model(x_train, y_train, rain, k)

    # 학습된 모델 저장
    for n in range(k):
        model = load_model('models/model' + str(n) + '.h5', custom_objects={'score':score,'fscore_keras':fscore_keras})
        models.append(model)

    # 모델 성능 일반화
    preds = []
    for model in models:
        preds.append(model.predict(x_test))
        print(mae_over_fscore(y_test, preds[-1]))

    pred = sum(preds) / len(preds)
    print(mae_over_fscore(y_test, pred))
Пример #14
0
def show(img):
    plt.imshow(img)
    plt.show()


current_dir = os.getcwd() + r"\SNR\stanford_car_dataset_by_classes"
image_path = current_dir + r"\test\Aston Martin V8 Vantage Convertible 2012\01633.jpg"

im = PIL.Image.open(image_path)
sqrWidth = np.ceil(np.sqrt(im.size[0] * im.size[1])).astype(int)
im_resize = im.resize((sqrWidth, sqrWidth))
im_resize.thumbnail((224, 224))
original_img = np.array(im_resize)
show(original_img)

base_model = load_model('learned_vgg16.h5').get_layer('vgg16')
base_model.summary()

# Maximize the activations of these layers
names = ['block4_pool']  # 92160
layers = [base_model.get_layer(name).output for name in names]

# Create the feature extraction model
dream_model = tf.keras.Model(inputs=base_model.input, outputs=layers)


def calc_loss(img, model):
    # Pass forward the image through the model to retrieve the activations.
    # Converts the image into a batch of size 1.
    img_batch = tf.expand_dims(img, axis=0)
    layer_activations = model(img_batch)
Пример #15
0
def loadNetwork(filename):
    return load_model(filename)
Пример #16
0
import pickle

import tensorflow as tf
from tensorflow_core.python.keras.models import load_model
from tensorflow.keras.preprocessing.sequence import pad_sequences
import numpy as np

from mowgli.model.datasets import load_dataset
from mowgli.utils import constants
from mowgli.utils.constants import LABEL_DATA_PATH
from mowgli.model.create_model import SENTENCE_MAX_LENGTH

VECTORIZER = pickle.load(open(constants.VECTORIZER_PATH, 'rb'))
MODEL = load_model(constants.MODEL_PATH)
LABELS, LABEL_SENTENCES = load_dataset(LABEL_DATA_PATH)


def classify(message):

    encoded_matrix = VECTORIZER.texts_to_sequences([message])
    encoded_matrix = pad_sequences(encoded_matrix,
                                   padding="post",
                                   maxlen=SENTENCE_MAX_LENGTH,
                                   truncating="post")
    result = MODEL.predict(encoded_matrix)
    index = np.argmax(result[0])
    return LABEL_SENTENCES[index], 1.0
Пример #17
0
 def load_model(self):
     self.model = load_model('my_model')
Пример #18
0
 def loadSavedModel(self, fileName="basic"):
     self.__model = load_model(fileName + '.h5')
     return self.__model
Пример #19
0
 def load_mode(self):
     self.model = load_model(LOAD_MODULE)
Пример #20
0
             tf.keras.metrics.SparseCategoricalAccuracy()],
)

print("fitting model...")
train_history = model.fit(
    x=df_train.drop('id', axis=1).drop('cuisine', axis=1),
    y=outputs,
    epochs=100,
    validation_split=0.3,
    shuffle=True,
    batch_size=64,
    verbose=True,
    # callbacks=[es, mc]
)

model = load_model("./models/best_model.h5")

loss = train_history.history['loss']
val_loss = train_history.history['val_loss']
plt.plot(loss)
plt.plot(val_loss)
plt.legend(['loss', 'val_loss'])
plt.show()

print('predicting...')

preds = model.predict_proba(x=df_test.drop('id', axis=1))


def find_cuisine(prediction_proba):
    highest = 0
Пример #21
0
 def init_generator_from_trained_model(self, path_model):
     scale_generator = load_model(path_model)
     self.generator.set_weights(scale_generator.get_weights())
Пример #22
0
import numpy as np
from tensorflow_core.python.keras.models import load_model
from nlp_tools import prepare_sentence, extract_fitures, target_names

model = load_model('my_model.h5')

sentence = "сижу на работе"
prepared = prepare_sentence(sentence)
features = extract_fitures(prepared)
featuresArr = np.asarray([features])
predictions = model.predict(featuresArr)

print(list(zip(target_names, predictions[0])))
Пример #23
0
 def __init__(self, path='models/digit_rec.h5'):
     self.model = models.load_model(path)
Пример #24
0
 def loadSavedModel(self, fileName):
     ext = fileName.split(".")[-1]
     assert ext == "h5", "file name must extension type of h5"
     self.__model = load_model(fileName)
     return self.__model
Пример #25
0
def test_models():
    ctx = mx.cpu()

    data_dir = 'data'
    models_dir = 'models'
    target_size = (128, 128, 1)

    dataset = pd.read_csv(os.path.join(data_dir, 'labels.csv'))
    dic = dict(zip(np.unique(dataset.breed), range(0, np.unique(dataset.breed).__len__() + 1)))

    net = build_model_mxnet(ctx)
    net.load_parameters(os.path.join(models_dir, 'model.params'))

    model = load_model(os.path.join(models_dir, 'dog-recognition.h5'))

    test_set = dataset.sample(20).reset_index()

    result = []

    res151 = models.resnet152_v1(pretrained=True, ctx=ctx).features
    with res151.name_scope():
        res151.add(gluon.nn.GlobalAvgPool2D())
    res151.collect_params().reset_ctx(ctx)
    res151.hybridize()

    inception = models.inception_v3(pretrained=True, ctx=ctx)
    inception_net = inception.features
    inception_net.collect_params().reset_ctx(ctx)
    inception_net.hybridize()

    for i in tqdm(range(20)):
        # -- Tensorflow
        img = tf_image.load_img(os.path.join(data_dir, 'train', test_set['id'][i]) + '.jpg', target_size=target_size,
                                grayscale=False)

        img = img_to_array(img)
        img = img / 255

        predict_tensorflow = model.predict_classes(np.array([img]))

        # -- MXNet

        img = mx.nd.array(cv2.imread(os.path.join(data_dir, 'train', test_set['id'][i]) + '.jpg'))
        img = transform_test(img)

        img_res151, img_inception = get_features_test(res151, inception_net, img, ctx)
        img_res151 = img_res151.reshape(img_res151.shape[:2])
        img_inception = img_inception.reshape(img_inception.shape[:2])

        img = nd.concat(mx.nd.array(img_inception), mx.nd.array(img_res151))

        predict_mx = nd.softmax(net(nd.array(img).as_in_context(ctx)))

        result.append({
            'id': test_set['id'][i],
            'expected': test_set['breed'][i],
            'tensor': list(dic.keys())[list(dic.values()).index(predict_tensorflow)],
            'mx': list(dic.keys())[list(dic.values()).index(predict_mx.topk(k=1).asnumpy()[0][0])],
            'mx_percentage': predict_mx[0, 0].asscalar()
        })
    print(tabulate(result))
    input("Press Enter to continue...")
Пример #26
0
        sequence = pad_sequences([sequence], maxlen=max_length)

        next_word = model.predict([photo, sequence], verbose=0)
        next_word = argmax(next_word)
        word = word_for_id(next_word, tokenizer)

        if word is None:
            break

        start += ' ' + word
        if word == 'endseq':
            break
    return start


model = load_model('model-ep003-loss3.638-val_loss3.869.h5')
tokenizer = load(open('tokenizer.pkl', 'rb'))

vgg = VGG16()
vgg._layers.pop()
vgg = Model(inputs=vgg.inputs, outputs=vgg.layers[-1].output)
image = load_img('../Webserver/assets/pic.jpg', target_size=(224, 224))
image = img_to_array(image)
image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
image = preprocess_input(image)
photo = vgg.predict(image, verbose=0)

caption = generate_desc(model, tokenizer, photo, 34)
caption = ' '.join(caption.split()[1:-1])
print(caption)
sys.stdout.flush()
Пример #27
0
 def __init__(self):
     print("Using loaded model to predict...")
     self.load_model = load_model("iris_model.h5")
Пример #28
0
import h5py
import numpy as np
from tensorflow_core.python.keras.models import load_model

from trainer import segment_data, display_report

model = load_model("best_model")

model.compile(optimizer='adam',
              loss='categorical_crossentropy',
              metrics=model.metrics + ["acc"], weighted_metrics=model.metrics + ["acc"])

data = h5py.File("class.hdf5")

data = [(np.log10(data["features"]), data["labels"]),]

new_data, new_labels = segment_data(data)

display_report(model, new_data, new_labels, "New Test", {})
Пример #29
0
from tensorflow_core.python.keras.models import load_model
import numpy as np
import cv2
from preprocessing import RES_X, RES_Y
import matplotlib.pyplot as plt

model = load_model("model/main.h5")
img = cv2.imread("test/dog.jpeg", cv2.IMREAD_COLOR)
# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.resize(img, (RES_X, RES_Y))
plt.imshow(img)
plt.show()
model.summary()
img = np.expand_dims(np.array(img), axis=0) / 255

out = model.predict(img)[0]
print(out)
Пример #30
0
The result is 420 test results divided into categories

[[193  17]
 [ 10 200]]

Y-Axis True labels (No/Yes) and X-Axis Predeiced lables (No/Yes)
1 2
3 4
1 = 193 No true & No predicted
2 = 200 No True & Yes predicted
"""

# save a model with all weights and architecture nodes, state optimizer to continue tuning
model_file_path = 'D:/My_Programming/Python/Data/medical_trial_model.h5'
model.save(model_file_path)
new_model = load_model(model_file_path)

new_model.summary()
new_model.get_weights()
"""
model.save_weights('my_model_weights.h5')
# architecture
model2 = Sequential([
    Dense(units=16, input_shape=(1,), activation='relu'),
    Dense(units=32, activation='relu'),
    Dense(units=2, activation='softmax')
])
# weights
model2.load_weights('my_model_weights.h5')
"""