Exemplo n.º 1
0
 def test_list_string(self):
     for labels in [
             np.array(['a', u'b']), ['a', u'b'],
             np.array(['a', 'b'])
     ]:
         idx_to_string = IndexToString(labels=labels)
         self.assertListEqual(idx_to_string.getLabels(), ['a', 'b'])
     self.assertRaises(TypeError, lambda: IndexToString(labels=['a', 2]))
Exemplo n.º 2
0
# recall are, the higher the F1 score is. We can directly see from this formula, that
# if P=R, then F1=P=R, because:
#
# F1=2P∗R/(P+R)=2P∗P/(P+P)=P
# So this already explains why the F1 score is the same as precision and recall, if precision
# and recall are the same

# In[29]:

confusion_mat = metrics.confusionMatrix()

# In[30]:

fig, ax = plt.subplots(figsize=(10, 10))
sns.heatmap(confusion_mat.toArray(),
            xticklabels=indexer_to_eval.getLabels(),
            yticklabels=indexer_to_eval.getLabels(),
            cmap="YlGnBu")
plt.show()

# In[31]:

confusion_mat_norm = confusion_mat.toArray() / np.expand_dims(
    confusion_mat.toArray().sum(axis=1), axis=1)

# In[32]:

fig, ax = plt.subplots(figsize=(10, 10))
sns.heatmap(confusion_mat_norm,
            xticklabels=indexer_to_eval.getLabels(),
            yticklabels=indexer_to_eval.getLabels(),
Exemplo n.º 3
0
 def test_list_string(self):
     for labels in [np.array(['a', u'b']), ['a', u'b'], np.array(['a', 'b'])]:
         idx_to_string = IndexToString(labels=labels)
         self.assertListEqual(idx_to_string.getLabels(), ['a', 'b'])
     self.assertRaises(TypeError, lambda: IndexToString(labels=['a', 2]))