def classify(inputfile='slpdb.mat',
             epoch_length=30,
             make_plot=True,
             save=True,
             outputfile='classify.txt'):
    # Load datas
    dataset = loadData(inputfile)
    ECG = groupByEpoch(dataset['ECG'], 250, epoch_length)
    Resp_in, Resp_out = edr.main(dataset)

    # Feature dictionary construction
    d_test = dictionaryInitialization()
    d_test = dico_construction(ECG, Resp_in, Resp_out, d_test)
    Xarr_norm = createNormalizedDataframe(d_test)

    # Predecition
    model = joblib.load('model.pkl')
    pred_res = list()
    for j in range(0, len(ECG)):
        l = str(model.predict(Xarr_norm[j])[0])
        pred_res.append(l)

    if make_plot:
        # Plot result
        plot(pred_res, dataset['labels'], epoch_length)

    if save:
        # Write prediction into text file
        savePredictions(pred_res, outputfile)
def classify(inputfile='slpdb.mat', epoch_length=30, make_plot=True, save=True, outputfile='classify.txt'):
    # Load datas
    dataset = loadData(inputfile)
    ECG = groupByEpoch(dataset['ECG'], 250, epoch_length)
    Resp_in, Resp_out = edr.main(dataset)

    # Feature dictionary construction
    d_test = dictionaryInitialization()
    d_test = dico_construction(ECG, Resp_in, Resp_out, d_test)
    Xarr_norm = createNormalizedDataframe(d_test)

    # Predecition
    model = joblib.load('model.pkl')
    pred_res = list()
    for j in range(0, len(ECG)):
        l = str(model.predict(Xarr_norm[j])[0])
        pred_res.append(l)

    if make_plot:
        # Plot result
        plot(pred_res, dataset['labels'], epoch_length)

    if save:
        # Write prediction into text file
        savePredictions(pred_res, outputfile)
Exemplo n.º 3
0
import numpy as np
from sklearn.cross_validation import train_test_split
from data import loadData
from featureConstruction import dictionaryInitialization,dico_construction
from models import listModels,createNormalizedDataframe,savePredictions

mat_file = 'data_challenge.mat'

# Load datas
dataset = loadData(mat_file)

# Train dictionary construction
d_train = dictionaryInitialization()
d_train = dico_construction(dataset['X_train'],d_train)
labels = np.array(dataset['y_train'])
Xarr_norm = createNormalizedDataframe(d_train)

# Test our models
X_train_train, X_train_test, y_train_train, y_train_test = train_test_split(Xarr_norm,labels,test_size=0.2,random_state=42)

X_train_train = np.nan_to_num(X_train_train)
X_train_test = np.nan_to_num(X_train_test)

# Scores
models = listModels()
for m in models:
    print m.get_params()
    m.fit(X_train_train,y_train_train)
    print m.score(X_train_test,y_train_test)

# Real training model
Exemplo n.º 4
0
import numpy as np
from sklearn.cross_validation import train_test_split
from data import loadData
from featureConstruction import dictionaryInitialization, dico_construction
from models import listModels, createNormalizedDataframe, savePredictions

mat_file = 'data_challenge.mat'

# Load datas
dataset = loadData(mat_file)

# Train dictionary construction
d_train = dictionaryInitialization()
d_train = dico_construction(dataset['X_train'], d_train)
labels = np.array(dataset['y_train'])
Xarr_norm = createNormalizedDataframe(d_train)

# Test our models
X_train_train, X_train_test, y_train_train, y_train_test = train_test_split(
    Xarr_norm, labels, test_size=0.2, random_state=42)

X_train_train = np.nan_to_num(X_train_train)
X_train_test = np.nan_to_num(X_train_test)

# Scores
models = listModels()
for m in models:
    print m.get_params()
    m.fit(X_train_train, y_train_train)
    print m.score(X_train_test, y_train_test)