Example #1
0
def process(training_file, test_file, check, draw):
    # Load training data.
    with open(training_file) as f:
        class_1 = array(pickle.load(f))
        class_2 = array(pickle.load(f))
        labels = pickle.load(f)
    model = bayes.BayesClassifier()
    model.train([class_1, class_2], [1, -1])

    # Load test data.
    with open(test_file) as f:
        class_1 = array(pickle.load(f))
        class_2 = array(pickle.load(f))
        labels = pickle.load(f)

    if check:
        n = class_1.shape[0]
        n_correct = 0
        n_correct += sum(model.classify(class_1)[0] == labels[:n])
        n_correct += sum(model.classify(class_2)[0] == labels[n:])
        print 'percent correct:', 100 * n_correct / float(2 * n)

    if draw:

        def classify(x, y, model=model):
            points = vstack((x, y))
            return model.classify(points.T)[0]

        imtools.plot_2d_boundary([-6, 6, -6, 6], [class_1, class_2], classify,
                                 [1, -1])
        show()
Example #2
0
def build_bayes_graph(im, labels, sigma=1e2, kappa=1):
    """    Build a graph from 4-neighborhood of pixels. 
        Foreground and background is determined from
        labels (1 for foreground, -1 for background, 0 otherwise) 
        and is modeled with naive Bayes classifiers."""

    m, n = im.shape[:2]

    # RGB vector version (one pixel per row)
    vim = im.reshape((-1, 3))

    # RGB for foreground and background
    foreground = im[labels == 1].reshape((-1, 3))
    background = im[labels == -1].reshape((-1, 3))
    train_data = [foreground, background]

    # train naive Bayes classifier
    bc = bayes.BayesClassifier()
    bc.train(train_data)

    # get probabilities for all pixels
    bc_lables, prob = bc.classify(vim)
    prob_fg = prob[0]
    prob_bg = prob[1]

    # create graph with m*n+2 nodes
    gr = digraph()
    gr.add_nodes(range(m * n + 2))

    source = m * n  # second to last is source
    sink = m * n + 1  # last node is sink

    # normalize
    for i in range(vim.shape[0]):
        vim[i] = vim[i] / (linalg.norm(vim[i]) + 1e-9)

    # go through all nodes and add edges
    for i in range(m * n):
        # add edge from source
        gr.add_edge((source, i), wt=(prob_fg[i] / (prob_fg[i] + prob_bg[i])))

        # add edge to sink
        gr.add_edge((i, sink), wt=(prob_bg[i] / (prob_fg[i] + prob_bg[i])))

        # add edges to neighbors
        if i % n != 0:  # left exists
            edge_wt = kappa * exp(-1.0 * sum((vim[i] - vim[i - 1])**2) / sigma)
            gr.add_edge((i, i - 1), wt=edge_wt)
        if (i + 1) % n != 0:  # right exists
            edge_wt = kappa * exp(-1.0 * sum((vim[i] - vim[i + 1])**2) / sigma)
            gr.add_edge((i, i + 1), wt=edge_wt)
        if i // n != 0:  # up exists
            edge_wt = kappa * exp(-1.0 * sum((vim[i] - vim[i - n])**2) / sigma)
            gr.add_edge((i, i - n), wt=edge_wt)
        if i // n != m - 1:  # down exists
            edge_wt = kappa * exp(-1.0 * sum((vim[i] - vim[i + n])**2) / sigma)
            gr.add_edge((i, i + n), wt=edge_wt)

    return gr
Example #3
0
def build_bayes_graph(im, labels, sigma=1e2, kappa=2):
    ### """从像素四邻域建立一个图,前景和背景(前景用 1 标记,背景用 -1 标记,
    ### 其他的用 0 标记)由 labels 决定,并用朴素贝叶斯分类器建模 """

    m, n = im.shape[:2]

    # 每行是一个像素的 RGB 向量
    vim = im.reshape((-1, 3))

    # 前景和背景(RGB)
    foreground = im[labels == 1].reshape((-1, 3))
    background = im[labels == -1].reshape((-1, 3))
    train_data = [foreground, background]

    # 训练朴素贝叶斯分类器
    bc = bayes.BayesClassifier()
    bc.train(train_data)

    # 获取所有像素的概率
    bc_lables, prob = bc.classify(vim)
    prob_fg = prob[0]
    prob_bg = prob[1]

    # 用m*n+2 个节点创建图
    gr = digraph()
    gr.add_nodes(range(m * n + 2))
    source = m * n  # 倒数第二个是源点
    sink = m * n + 1  # 最后一个节点是汇点

    # 归一化
    for i in range(vim.shape[0]):
        vim[i] = vim[i] / linalg.norm(vim[i])

    # 遍历所有的节点,并添加边
    for i in range(m * n):
        # 从源点添加边
        gr.add_edge((source, i), wt=(prob_fg[i] / (prob_fg[i] + prob_bg[i])))
        # 向汇点添加边
        gr.add_edge((i, sink), wt=(prob_bg[i] / (prob_fg[i] + prob_bg[i])))

    # 向相邻节点添加边
    if i % n != 0:  #左边存在
        edge_wt = kappa * np.exp(-1.0 * np.sum(
            (vim[i] - vim[i - 1])**2) / sigma)
        gr.add_edge((i, i - 1), wt=edge_wt)
    if (i + 1) % n != 0:  # 如果右边存在
        edge_wt = kappa * np.exp(-1.0 * np.sum(
            (vim[i] - vim[i + 1])**2) / sigma)
        gr.add_edge((i, i + 1), wt=edge_wt)
    if i // n != 0:  #如果上方存在
        edge_wt = kappa * np.exp(-1.0 * np.sum(
            (vim[i] - vim[i - n])**2) / sigma)
        gr.add_edge((i, i - n), wt=edge_wt)
    if i // n != m - 1:  # 如果下方存在
        edge_wt = kappa * np.exp(-1.0 * np.sum(
            (vim[i] - vim[i + n])**2) / sigma)
        gr.add_edge((i, i + n), wt=edge_wt)

    return gr
Example #4
0
def build_bayes_grapy(im, labels, sigma=1e2, kappa=1):
    """从像素四邻域建立一个图,前景和背景(前景用1标记,背景用-1标记,其他的用0标记)
       由labels决定,并用朴素贝叶斯分类器建模"""
    m, n = im.shape[:2]
    vim = im.reshape((-1, 3))
    print vim.shape

    foreground = im[labels == 1].reshape((-1, 3))
    background = im[labels == -1].reshape((-1, 3))
    train_data = [foreground, background]

    bc = bayes.BayesClassifier()
    bc.train(train_data)

    bc_labels, prob = bc.classify(vim)
    prob_fg = prob[0]
    prob_bg = prob[1]
    gr = digraph()
    gr.add_nodes(range(m * n + 2))
    source = m * n
    sink = m * n + 1

    vim = vim.astype(dtype=np.float64)
    vim += 1e-6
    for i in range(vim.shape[0]):
        vim[i] = vim[i] / np.linalg.norm(vim[i])
    for i in range(m * n):
        # add edge from source
        gr.add_edge((source, i), wt=(prob_fg[i] / (prob_fg[i] + prob_bg[i])))

        # add edge to sink
        gr.add_edge((i, sink), wt=(prob_bg[i] / (prob_fg[i] + prob_bg[i])))

        # add edges to neighbors
        if i % n != 0:  # left exists
            edge_wt = kappa * np.exp(-1.0 * sum(
                (vim[i] - vim[i - 1])**2) / sigma)
            gr.add_edge((i, i - 1), wt=edge_wt)
        if (i + 1) % n != 0:  # right exists
            edge_wt = kappa * np.exp(-1.0 * sum(
                (vim[i] - vim[i + 1])**2) / sigma)
            gr.add_edge((i, i + 1), wt=edge_wt)
        if i // n != 0:  # up exists
            edge_wt = kappa * np.exp(-1.0 * sum(
                (vim[i] - vim[i - n])**2) / sigma)
            gr.add_edge((i, i - n), wt=edge_wt)
        if i // n != m - 1:  # down exists
            edge_wt = kappa * np.exp(-1.0 * sum(
                (vim[i] - vim[i + n])**2) / sigma)
            gr.add_edge((i, i + n), wt=edge_wt)

        return gr
import pca
import dsift_test as dsift
import sift
import numpy as np
import bayes

if __name__ == '__main__':
    url1 = '/home/aurora/hdd/workspace/PycharmProjects/data/pcv_img/gesture/train'
    url2 = '/home/aurora/hdd/workspace/PycharmProjects/data/pcv_img/gesture/test'
    featurelist, labels = dsift.read_gesture_features_labels(url1)
    test_featurelist, test_labels = dsift.read_gesture_features_labels(url2)
    V, S, m = pca.pca(featurelist)
    classnames = ['A', 'B', 'C', 'F', 'P', 'V']
    V = V[:50]
    features = np.array([np.dot(V, f - m) for f in featurelist])
    test_features = np.array([np.dot(V, f - m) for f in test_featurelist])
    bc = bayes.BayesClassifier()
    blist = [features[np.where(labels == c)[0]] for c in classnames]
    bc.train(blist, classnames)
    res = bc.classify(test_features)[0]
    acc = np.sum((res == test_labels) * 1.0) / len(test_labels)
    print acc
    dsift.print_confusion(res, test_labels, classnames)
Example #6
0
def build_bayes_graph(im, labels, sigma=1e2, kappa=2):
    """ 4近傍ピクセルからグラフを構成する。
    labelsにより前景か背景かを指定する
     (1:前景、-1:背景、0:それ以外)
    単純ベイズ分類器によりモデル化する """

    m, n = im.shape[:2]

    # RGBベクトル(1行に1ピクセル)にする
    vim = im.reshape((-1, 3))

    # 前景と背景のRGB
    foreground = im[labels == 1].reshape((-1, 3))
    background = im[labels == -1].reshape((-1, 3))
    train_data = [foreground, background]

    # 単純ベイズ分類器を学習させる
    bc = bayes.BayesClassifier()
    bc.train(train_data)

    # 全ピクセルの確率を取得する
    bc_lables, prob = bc.classify(vim)
    prob_fg = prob[0]
    prob_bg = prob[1]

    # m*n+2個のノードグラフを生成する
    gr = digraph()
    gr.add_nodes(range(m * n + 2))

    source = m * n  # 最後から2つ目のノードがソース
    sink = m * n + 1  # 最後のノードがシンク

    # 正規化
    for i in range(vim.shape[0]):
        tmp = linalg.norm(vim[i])
        if tmp == 0: tmp = 1e-6
        vim[i] = vim[i] / tmp

    # 全ノードを順番にあたり、エッジを追加する
    for i in range(m * n):
        # ソースからのエッジを追加する
        gr.add_edge((source, i), wt=(prob_fg[i] / (prob_fg[i] + prob_bg[i])))

        # シンクへのエッジを追加する
        gr.add_edge((i, sink), wt=(prob_bg[i] / (prob_fg[i] + prob_bg[i])))

        # 近傍ピクセルへのエッジを追加する
        if i % n != 0:  # 左に存在すれば
            edge_wt = kappa * exp(-1.0 * sum((vim[i] - vim[i - 1])**2) / sigma)
            gr.add_edge((i, i - 1), wt=edge_wt)
        if (i + 1) % n != 0:  # 右に存在すれば
            edge_wt = kappa * exp(-1.0 * sum((vim[i] - vim[i + 1])**2) / sigma)
            gr.add_edge((i, i + 1), wt=edge_wt)
        if i // n != 0:  # 上に存在すれば
            edge_wt = kappa * exp(-1.0 * sum((vim[i] - vim[i - n])**2) / sigma)
            gr.add_edge((i, i - n), wt=edge_wt)
        if i // n != m - 1:  # 下に存在すれば
            edge_wt = kappa * exp(-1.0 * sum((vim[i] - vim[i + n])**2) / sigma)
            gr.add_edge((i, i + n), wt=edge_wt)

    return gr
Example #7
0
 def __init__(self, token):
     self.token = token
     self.bayes_classifier = bayes.BayesClassifier()