words = draw_words(stimuli, bars) stacked = np.concatenate([words1, words2, words], axis=1) # pad this slightly in order to be able to distinguish groups stacked = pad(stacked, [0, 10, 10]) num_x = 8 num_y = 12 start_at = 0 collage = make_collage(stacked[start_at:start_at + (num_x * num_y)].\ reshape(num_x, num_y, stacked.shape[1], stacked.shape[2])) import pylab as pl pl.figure() pl.imshow(collage) pl.gray() import scoring pl.figure() roc1 = scoring.roc(res1, stimuli) roc2 = scoring.roc(res2, stimuli) pl.plot(roc1) pl.plot(roc2) pl.plot([0, len(stimuli)], [0, 1]) pl.show()
0.5 * ((y_true == y_pred) * (1 - y_true)).sum() / (1 - y_true).sum() from sklearn.cross_validation import cross_val_score scores1 = cross_val_score(forest, res1 > .5, stimuli, cv=6, score_func=score_func) #scores2 = cross_val_score(, res1 > .5, stimuli, cv=6, # score_func=score_func) import pylab as pl pl.figure() pl.imshow(collage) pl.gray() import scoring pl.figure() roc1 = scoring.roc(p, stimuli) roc2 = scoring.roc(predictions, stimuli) pl.plot(roc1, c='b', label='Mot entier') pl.plot(roc2, c='g', label='Lettre par lettre') pl.grid() pl.title( 'Résultats de deuxième couche, avec une première couche logistique L2 et k=100 voxels' ) pl.plot([0, len(stimuli)], [0, 1]) pl.show()
words2 = draw_words(res1, bars) words = draw_words(stimuli, bars) stacked = np.concatenate([words1, words2, words], axis=1) # pad this slightly in order to be able to distinguish groups stacked = pad(stacked, [0, 10, 10]) num_x = 8 num_y = 12 start_at = 0 collage = make_collage(stacked[start_at:start_at + (num_x * num_y)].\ reshape(num_x, num_y, stacked.shape[1], stacked.shape[2])) import pylab as pl pl.figure() pl.imshow(collage) pl.gray() pl.show() import scoring pl.figure() roc1 = scoring.roc(res1, stimuli) #roc2 = scoring.roc(res2, stimuli) pl.plot(roc1) #pl.plot(roc2) pl.plot([0, len(stimuli)], [0, 1])
words = draw_words(stimuli, bars) stacked = np.concatenate([words1, words2, words], axis=1) # pad this slightly in order to be able to distinguish groups stacked = pad(stacked, [0, 10, 10]) num_x = 8 num_y = 12 start_at = 0 collage = make_collage(stacked[start_at:start_at + (num_x * num_y)].\ reshape(num_x, num_y, stacked.shape[1], stacked.shape[2])) import pylab as pl pl.figure() pl.imshow(collage) pl.gray() pl.show() import scoring pl.figure() roc1 = scoring.roc(res1, stimuli) pl.plot(roc1) pl.plot([0, len(stimuli)], [0, 1])
stacked = np.concatenate([words1, words2, words], axis=1) # pad this slightly in order to be able to distinguish groups stacked = pad(stacked, [0, 10, 10]) num_x = 8 num_y = 12 start_at = 0 collage = make_collage( stacked[start_at : start_at + (num_x * num_y)].reshape(num_x, num_y, stacked.shape[1], stacked.shape[2]) ) import pylab as pl pl.figure() pl.imshow(collage) pl.gray() pl.show() import scoring pl.figure() roc1 = scoring.roc(res1, stimuli) roc2 = scoring.roc(res2, stimuli) pl.plot(roc1) pl.plot(roc2) pl.plot([0, len(stimuli)], [0, 1])
reshape(num_x, num_y, stacked.shape[1], stacked.shape[2])) def score_func(y_true, y_pred): return 0.5 * ((y_true == y_pred) * y_true).sum() / y_true.sum() +\ 0.5 * ((y_true == y_pred) * (1 - y_true)).sum() / (1 - y_true).sum() from sklearn.cross_validation import cross_val_score scores1 = cross_val_score(forest, res1 > .5, stimuli, cv=6, score_func=score_func) #scores2 = cross_val_score(, res1 > .5, stimuli, cv=6, # score_func=score_func) import pylab as pl pl.figure() pl.imshow(collage) pl.gray() import scoring pl.figure() roc1 = scoring.roc(p, stimuli) roc2 = scoring.roc(predictions, stimuli) pl.plot(roc1, c='b', label = 'Mot entier') pl.plot(roc2, c='g', label = 'Lettre par lettre') pl.grid() pl.title('Résultats de deuxième couche, avec une première couche logistique L2 et k=100 voxels') pl.plot([0, len(stimuli)], [0, 1]) pl.show()