Ejemplo n.º 1
0
def rebuild_model(model_path, channel_n):
    # Rebuild trained model, but change the first layer to accept different
    # input size
    old_model = k.load_model(model_path)
    temp_weights = [layer.get_weights() for layer in old_model.layers]

    # Rebuild the exact same but change input size
    input_time_length = old_model.layers[0].input_shape[1]
    inp = k.layers.Input((1, input_time_length, 1), name='input')
    l = inp
    # We don't care about the softmax
    for layer in old_model.layers[1:-1]:
        conf = layer.get_config()
        # Pick what we need from the original layer, change the dims
        change_weights = False
        if 'kernel_size' in conf:
            conf['kernel_size'][0] = 1
            change_weights = True
        new_layer = k.layers.deserialize({
            'class_name': layer.__class__.__name__,
            'config': conf
        })
        new_weights = layer.get_weights()[channel_n] if change_weights else \
            layer.get_weights()
        new_layer.set_weights(new_weights)
        l = new_layer(l)

    new_model = k.models.Model(inp, l)

    return new_model
Ejemplo n.º 2
0
def compute_nl_measure(model_path, measure='lyap'):
    idxs = pd.MultiIndex.from_product([list(range(1, 134)), ['a', 'b']],
                                      names=['patient', 'trial'])
    cols = ['channel_n', 'feature', 'value', 'layer', 'unit', 'trial']
    df = pd.DataFrame(index=idxs, columns=cols)
    df = df.astype({
        'channel': str,
        'value': float,
        'layer': int,
        'unit': int,
        'filter': int,
    })

    model = k.load_model(model_path)
    sizes, strides = receptive_field_sizes_and_strides(model)

    # contain model name, feature
    model_name, _ = splitext(split(model_path)[0])
    measures_path = join(CORRS_ROOT, '_'.join((model_name, measure)))

    for alg in algos.registered_algos:
        if alg.algo_name == 'measure':
            mem_alg = Memoize(alg)
            break
    else:
        raise Exception(f'Algorithm {measure} not registered.')

    for file in files_builder(DataKind.PROCESSED):
        idx = file.id
        trial = file.trial
        # Do the same TS processing we did to construct the model input
        logging.info(f'Trial {idx}-{trial}...')
        for channel in CHANNEL_NAMES:
            data = preprocess(file.df[channel])
            # Compute values for each start end layer
            logging.info(f'Channel {channel}...')
            for layer_n, layer in enumerate(model.layers):
                shape = layer.shape
                filters = range(shape[-1])
                units = range(shape[1])
                logging.info(f'Layer {layer_n} with {shape[-1]+1} '
                             'filters, each {shape[1]+1} units...')
                # for filter_n, unit_n in product(filters, units):
                for unit_n in units:
                    start, end = get_rf_start_end(sizes, strides, unit_n,
                                                  layer_n)
                    value = mem_alg(data, start, end)
                    df.loc[(idx, trial), 'channel'] = channel
                    df.loc[(idx, trial), 'value'] = value
                    df.loc[(idx, trial), 'layer'] = layer_n
                    df.loc[(idx, trial), 'unit'] = unit_n
                    # df.loc[(idx, trial), 'filter'] = filter_n

        df.to_pickle(measures_path)
Ejemplo n.º 3
0
def run_validation_cases(validation_keys_file, model_file, training_modalities, labels, hdf5_file,
                         output_label_map=False, output_dir=".", threshold=0.5, overlap=16, permute=False):
    validation_indices = pickle_load(validation_keys_file)
    model = load_model(model_file)
    data_file = tables.open_file(hdf5_file, "r")
    for index in validation_indices:
        if 'subject_ids' in data_file.root:
            case_directory = os.path.join(output_dir, data_file.root.subject_ids[index].decode('utf-8'))
        else:
            case_directory = os.path.join(output_dir, "validation_case_{}".format(index))
        run_validation_case(data_index=index, output_dir=case_directory, model=model, data_file=data_file,
                            training_modalities=training_modalities, output_label_map=output_label_map, labels=labels,
                            threshold=threshold, overlap=overlap, permute=permute)
    data_file.close()
Ejemplo n.º 4
0
    def _build_model(self, path):

        if path is None:
            state = Input(shape=(3, 21, 2))
            action = Input(shape=(5, ))

            x = Flatten()(state)
            x = Dense(100, activation="relu")(x)
            x = Dense(100, activation="relu")(x)
            x = Dropout(0.5)(x)
            q = Dense(1, activation="relu")(x)

            self._model = Model([state, action], q)
            self._model.compile(loss="mae", optimizer="SGD")
        else:
            self._model = keras.load_model(path)
Ejemplo n.º 5
0
 def __init__(self, patient):
     self.log_date = datetime.now()
     self.current_patient = patient
     # self.current_emotion = Emotion.select().where(Emotion.id == 6).get()
     self.video = cv2.VideoCapture(0)
     self.emotion_model_path = './models/emotion_model.hdf5'
     self.emotion_labels = get_labels('fer2013')
     # hyper-parameters for bounding boxes shape
     self.frame_window = 10
     self.emotion_offsets = (20, 40)
     # loading models
     self.face_cascade = cv2.CascadeClassifier(
         './models/haarcascade_frontalface_default.xml')
     self.emotion_classifier = load_model(self.emotion_model_path)
     # getting input model shapes for inference
     self.emotion_target_size = self.emotion_classifier.input_shape[1:3]
     # starting lists for calculating modes
     self.emotion_window = []
Ejemplo n.º 6
0
from keras import load_model
from load_data import load_data

model = load_model('./model.h5')

x_train, y_train, x_test, y_test = load_data()

# hyperparameters
epochs = 100

model.fit(x_train,
          y_train,
          epochs=epochs,
          verbose=1,
          validation_data=(x_test, y_test))
model.save('trained_model.h5')
Ejemplo n.º 7
0
lgbm = LGBMClassifier(learning_rate = lr[lgbm_best['learning_rate']],
    n_estimators = n_est[lgbm_best['n_estimators']],
    num_leaves = num_leaves[lgbm_best['num_leaves']], # large num_leaves helps improve accuracy but might lead to over-fitting
    boosting_type = bt[lgbm_best['boosting_type']], # for better accuracy -> try dart
    objective = objective[lgbm_best['objective']],
    max_bin = max_bin[lgbm_best['max_bin']], # large max_bin helps improve accuracy but might slow down training progress
    colsample_bytree = colsample_bytree[lgbm_best['colsample_bytree']],
    subsample = subsample[lgbm_best['subsample']],
    reg_alpha = reg_alpha[lgbm_best['reg_alpha']],
    reg_lambda = reg_lambda[lgbm_best['reg_lambda']],
    is_unbalance = is_unbalance[lgbm_best['is_unbalance']])

lgbm.fit(X_train, y_train)

# FFNN
ffnn = load_model('trained_models/XP_ffnn')

# LottyNet !
cnn = load_model('trained_models/XP_cnn')

#********************************
# Prediction of the models
#********************************

knn_preds = knn.predict(X_test)  
svm_preds = svm.predict(X_test)  
lgbm_preds = lgbm.predict(X_test) 
ffnn_preds = ffnn.predict(X_test) 
ffnn_preds = enc.inverse_transform(ffnn_preds)
cnn_preds = cnn.predict(X_test) 
cnn_preds = enc.inverse_transform(cnn_preds)
Ejemplo n.º 8
0
def test_captcha():
    with open(MODEL_LABELS_FILENAME, "rb") as f:
        label = pickle.load(f)
    # TODO need to find samples to test instead of training set
    model = kr.load_model(TRAINING_SAMPLES)
    captcha_image_files = list(imutils.paths.list_images(CAPTCHA_IMAGE_FOLDER))
    captcha_image_files = np.random.choice(captcha_image_files,
                                           size=(10, ),
                                           replace=False)

    #captcha_samples = "/data/captcha samples"
    # output for captcha samples that are split into separate letters
    #split = "/data/split images"

    # creating list of captcha file names/identities
    #image_list = os.listdir("data/captcha samples")

    # testing to see if the list import worked
    for item in image_list:

        # strip extension
        captcha_letters = os.path.splitext(item)[0]
        print(captcha_letters)

        # convert to gray scale
        current_image = cv2.imread("/data/captcha samples/3FNF.png", 0)
        print(type(current_image))
        cv2.imshow("pic", current_image)
        cv2.waitKey()
        break

        gray_image = cv2.cvtColor(current_image, cv2.COLOR_BGR2GRAY)
        # adding border for padding
        gray_image = cv2.copyMakeBorder(gray_image, 8, 8, 8, 8,
                                        cv2.BORDER_REPLICATE)
        # convert to black and white
        black = cv2.threshold(gray_image, 0, 255,
                              cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]

        # get contours detect blobs(each character)
        letters = cv2.findContours(black.copy(), cv2.RETR_EXTERNAL,
                                   cv2.CHAIN_APPROX_SIMPLE)
        letter_group = []

        # get letter from each pixel group
        for letter in letters:
            (x, y, width, height) = cv2.boundingRect(letter)
            # some letters are written over each other so we need to check if the
            # pixel group as a single letter
            if width / height > 1.25:
                # can split the letters into two images
                half = int(width / 2)
                letter_group.append((x, y, half, height))
                letter_group.append((x + half, y, half, height))
            else:
                letter_group.append((x, y, width, height))

        # this means the captcha has more than 4 letters or something went wrong
        # so we skip it for now
        if len(letter_group) != 4:
            continue

        # sort characters so we are reading from left to right using x coordinate
        letter_group = sorted(letter_group, key=lambda x: x[0])

        #make an output image
        output = cv2.merge([image] * 3)
        predictions = []

        # loop over letters to check each

        for piece in letter_group:
            # coordinates of letter
            x, y, width, height = piece
            # extract letter
            letter_image = image[y - 2:y + h + 2, x - 2:x + w + 2]
            # resize to fit training data 20x20
            letter_image = resize_to_fit(letter_image, 20, 20)
            # set image to 4d list to fit keras model
            letter_image = np.expand_dims(letter_image, axis=2)
            letter_image = np.expand_dims(letter_image, axis=0)

            # get a prediction
            prediction = model.predict(letter_image)
            # convert to normal letter
            letter = lb.inverse_transform(prediction)[0]
            predictions.append(letter)

            # draw the prediction on the output image
            cv2.rectangle(output, (x - 2, y - 2), (x + w + 4, y + h + 4),
                          (0, 255, 0), 1)
            cv2.putText(output, letter, (x - 5, y - 5),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.55, (0, 255, 0), 2)

        # print the predicted text
        captcha_text = "".join(predictions)
        print("CAPTCHA text is: {}".format(captcha_text))

        # Show the annotated image
        cv2.imshow("Output", output)
        cv2.waitKey()
Ejemplo n.º 9
0
os.chdir("C:/Users/Jeremy/Desktop/Senior Project Design")

from processing_libs import get_data, normalize, getAngle
from segment_blue_disk import segment_blue_disk


def set_keras_backend(
        backend):  #change the backend of keras to theano or tensorflow

    if K.backend() != backend:
        os.environ['KERAS_BACKEND'] = backend
        reload(K)
        assert K.backend() == backend


set_keras_backend("theano")

model = load_model("experimental_model.h5")

cap = cv2.VideoCapture(0)
ret, frame = cap.read()

if ret:

    cv2.imshow('frame', frame)

frame = segment_blue_disk(frame)
frame = normalize(frame)
frame = cv2.resize(frame)
print("angle estimated value is: " + str(getAngle(model.predict(frame))))
Ejemplo n.º 10
0
# Load the dataset
print("Loading dataset")
X_train = np.load(BASE_DIR + 'X_sample.npy')
y_train = np.load(BASE_DIR + 'y_sample.npy')

print("Checking if predictions exist, loading CLF and predicting otherwise")
y_preds_f = [BASE_DIR + 'y_sample_pred_' + c + '.npy' for c in CLFS]
y_pred_exists = [Path(y).is_file() for y in y_preds_f]

y_preds = [None] * len(CLFS)
for i in range(4):
    if y_pred_exists[i] is False:
        print("Needed to load classifier ", clfs_f[i])
        if i == 1:
            from keras import load_model
            clf = CLF(clf=load_model(clfs_f[i]),
                      clf_type="keras_cnn",
                      shape=(28, 28, 1))
        else:
            clf = CLF(clf=joblib.load(open(clfs_f[i], 'rb')),
                      clf_type="sklearn")
        y_preds[i] = clf.Predict(X_train)
        np.save(y_preds_f[i], y_preds[i])
    else:
        y_preds[i] = np.load(y_preds_f[i])

print("Loading projections")
projs = [LoadProjection(f) for f in projections_f]

print("Loading densemaps")
dmaps = [np.load(f) for f in dmaps_f]
Ejemplo n.º 11
0
#%% importing things
from tensorflow import keras as kr
from PIL import Image
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import os
from keras.layers.convolutional import Conv2D, MaxPooling2D
import pickle
from keras import load_model

#%% read in history

# load model
model = load_model('model')

# load history
history_file = r'C:\Users\oyina\Documents\src\measurement_principles\modeling-project-oyin_marcos\deep learning\history.pkl'
with open(history_file, 'rb') as pkl_file:
    history = pickle.load(pkl_file)

# load data
data_file = r'C:\Users\oyina\Documents\src\measurement_principles\modeling-project-oyin_marcos\deep learning\data.pkl'
with open(data_file, 'rb') as pkl_file:
    data = pickle.load(pkl_file)

#%% plot history
pd.DataFrame(history.history).plot()
plt.grid(True)
# plt.gca().set_ylim(0, 1)
Ejemplo n.º 12
0
from keras import load_model

model = load_model(path)


def predict_image(image):
    image = np.array(image, dtype='float32')
    image /= 255
    pred_array = model.predict(image)

    result = gesture_names[np.argmax(pred_array)]

    score = float("%0.2f" % (max(pred_array[0]) * 100))
    print(f'Result: {result}, Score: {score}')
    return result, score


camera = cv2.VideoCapture(0)  #uses webcam for video

while camera.isOpened():

    k = cv2.waitKey(10)
    if k == 32:  # if spacebar pressed
        frame = np.stack((frame, ) * 3, axis=-1)
        frame = cv2.resize(frame, (224, 224))
        frame = frame.reshape(1, 224, 224, 3)
        prediction, score = predict_image(frame)
Ejemplo n.º 13
0
# TODO: y_train contains the values 0 and 9, replace all 9s for 1s?
y_train[y_train == 9] = 1

# TODO: load y_pred if it exists
y_preds_f = [BASE_DIR + 'y_sample_bin_pred_' + c + '.npy' for c in CLFS]
y_pred_exists = [Path(y).is_file() for y in y_preds_f]
print("y_pred_exists")

clfs = [None] * len(CLFS)

y_preds = [None] * len(CLFS)
for i in range(4):
    if y_pred_exists[i] is False:
        if i == 1:
            from keras import load_model
            clfs[i] = CLF(clf=load_model(clfs_f[i]),
                          clf_type="keras_cnn",
                          shape=(28, 28, 1))
        else:
            clfs[i] = CLF(clf=joblib.load(open(clfs_f[i], 'rb')),
                          clf_type="sklearn")
        y_preds[i] = clfs[i].Predict(X_train)
        np.save(y_preds_f[i], y_preds[i])
    else:
        y_preds[i] = np.load(y_preds_f[i])

print("2 class fashion mnist - {} samples".format(len(y_train)))
for i in range(len(CLFS)):
    clf_name = CLFS[i]
    print('\t' + clf_name + " num errors: ", np.sum(y_train != y_preds[i]))