Ejemplo n.º 1
0
# ``SegPipe`` can be initialized with a scorer callable made with sklearn.metrics.make_scorer
# this can be used to cross_validate or grid search with any 1 score

scorer = make_scorer(f1_score, average='macro')
pipe = SegPipe(est, scorer=scorer)
cv_scores = cross_validate(pipe, X, y, cv=4, return_train_score=True)
print("CV F1 Scores: ", pd.DataFrame(cv_scores))

##################################################
# SCORING WORKAROUND 2: WORK OUTSIDE THE PIPELINE
##################################################

# If you want to have multiple score computed, the only way is as follows
#
# First transform the time series data into segments and then score the ``est`` part of the
# pipeline.
#
# The disadvantage of this is that the parameters of the ``seg`` pipeline cannot be
# optimized with this approach

segmenter = SegmentX()
X_seg, y_seg, _ = segmenter.fit_transform(X, y)
scoring = ['accuracy', 'precision_macro', 'recall_macro', 'f1_macro']
cv_scores = cross_validate(est,
                           X_seg,
                           y_seg,
                           cv=4,
                           return_train_score=False,
                           scoring=scoring)
print("CV Scores (workaround): ", pd.DataFrame(cv_scores))
Ejemplo n.º 2
0
    np.array(pd.read_csv('RF_BC_9600.csv', sep=",", header=None)),
    np.array(pd.read_csv('LB_BP_9600.csv', sep=",", header=None)),
    np.array(pd.read_csv('RB_BP_9600.csv', sep=",", header=None)),
    np.array(pd.read_csv('LF_BP_9600.csv', sep=",", header=None)),
    np.array(pd.read_csv('RF_BP_9600.csv', sep=",", header=None))
])

# create the label vector and the corresponding semantic vector
y = np.array([0, 1, 2, 3, 4, 5, 6, 7])
labels = [
    'LB_BC', 'RB_BC', 'LF_BC', 'RF_BC', 'LB_BP', 'RB_BP', 'LF_BP', 'RF_BP'
]

# segment the data and labels
segmenter = SegmentX(100, 0.5)
X_new, y_new, _ = segmenter.fit_transform(X, y)

###################################################################################################

# create a pipeline for LDA transformation of the feature representation
est = Pipeline([('features', FeatureRep()),
                ('lda', LinearDiscriminantAnalysis(n_components=2))])
pipe = SegPipe(est)

# plot embedding
X2, y2 = pipe.fit_transform(X_new, y_new)
plot_embedding(X2, y2.astype(int), labels)
plt.show()

###################################################################################################
Ejemplo n.º 3
0
# data = sio.loadmat('./data/shiftcorr%d'%idx) # poor results
# data = sio.loadmat('./data/shiftlinear%d'%idx) # poor results
# data = sio.loadmat('./data/singledimshiftfreq%d'%idx)# poor results
# data = sio.loadmat('./data/agotsshiftmean%d'%idx)
# data = sio.loadmat('./data/agotsshiftvar%d'%idx)
data = sio.loadmat('./data/extreme%d' % idx)  # good esults
ts = data['ts']  #.T # transpose is needed for shiftfreq
print(ts.shape)
bkps = data['bkps'][0]

scaler = StandardScaler()
ts = scaler.fit_transform(ts)

width = 10
step = 5
ts = [ts]
segment = SegmentX(width=width, step=step)
x = segment.fit_transform(ts, None)[0]
x = x.reshape([x.shape[0], -1])
x = torch.from_numpy(x).float()
bkss = bkps // 5  #bkss for break samples

model = AutoEncoder(input_dim=10, latent_dim=1, output_dim=10)

_, pred = model.fit_predict(x)

err = (pred - x).detach().numpy()
err = np.max(np.power(err, 2), axis=1)
rpt.display(err, true_chg_pts=bkss)
rpt.display(ts[0], true_chg_pts=bkps)
plt.show()