예제 #1
0
def todo(path, zahyou):
    import numpy as np
    import cv2
    from pythonFile import click_pct, getVideoData
    import os
    import pickle

    padding = 10  # 特徴点検出領域の半径
    p = 10
    winsize = 25
    c = [[255, 0, 0], [0, 0, 255], [0, 255, 0], [0, 255, 255]]  # 特徴点の色
    selectDir = ['cat1', 'cat2', 'cat3', 'cat4']

    # 読み込む動画の設定
    videoDir = path[:path.rfind('/')]
    dirName = getVideoData.getDirName(path)
    videoName = getVideoData.getVideoName(path)
    savePath = '/media/koshiba/Data/opticalflow/point_data/' + dirName + '/' + videoName
    cap = cv2.VideoCapture(path)
    print(path[path.rfind('/') + 1:])

    # 動画の設定
    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    fps = cap.get(cv2.CAP_PROP_FPS)

    rot = 0
    # 動画が横向きならば縦向きに回転させる
    if width > height:
        rot = 1
        tmp = width
        width = height
        height = tmp

    print(videoName)

    # Shi-Tomashiのコーナー検出パラメータ
    feature_params = dict(
        maxCorners=255,  # 保持するコーナー数,int
        qualityLevel=0.2,  # 最良値(最大個数値の割合),double
        minDistance=7,  # この距離内のコーナーを棄却,double
        blockSize=7,  # 使用する近傍領域のサイズ,int
        useHarrisDetector=False,  # FalseならShi-Tomashi法
        # k=0.04,         # Harris法の測度に使用
    )

    # 最初のフレームを読み込む
    ret, first_frame = cap.read()
    if rot == 1:
        first_frame = np.rot90(first_frame, -1)

    #グレースケール変換
    first_gray = cv2.cvtColor(first_frame, cv2.COLOR_BGR2GRAY)

    # 読み込んだフレームの特徴点を探す
    prev_points = cv2.goodFeaturesToTrack(
        image=first_gray[p:-p][p:-p],  # 入力画像
        mask=None,  # mask=0のコーナーを無視
        **feature_params)
    flow_layer = np.zeros_like(first_frame)
    # 一度すべての点をノイズとする
    noise = [0] * len(prev_points)

    for i in prev_points:
        flow_layer = cv2.circle(
            flow_layer,  # 描く画像
            (int(i[0][0]), int(i[0][1])),  # 線を引く始点
            2,  # 線を引く終点
            color=c[0],  # 描く色
            thickness=3  # 線の太さ
        )
    frame = cv2.add(first_frame, flow_layer)
    save_layer_list = [np.zeros_like(first_frame)] * len(zahyou)
    selectDirList = ['/cat1/'] * len(zahyou)

    #######################################
    # クリックした特徴点を正常な特徴点とする
    #######################################
    while True:
        # クリックした座標を保存
        ret, points = click_pct.give_coorList(frame)
        points = np.array(points, dtype='int')
        # クリックした座標の周囲の点を正常な特徴点とする
        for p in points:
            area = [
                p[0] - padding, p[0] + padding, p[1] - padding, p[1] + padding
            ]
            for index, prev in enumerate(prev_points):
                if (area[0] <= int(prev[0][0])) and (area[1] >= int(
                        prev[0][0])) and (area[2] <= int(
                            prev[0][1])) and (area[3] >= int(prev[0][1])):
                    if (noise[index] == 0):
                        noise[index] = 1
                        #break
                    elif (noise[index] == 1):
                        noise[index] = 2
                        #break
                    elif (noise[index] == 2):
                        noise[index] = 3
                        #break
                    elif (noise[index] == 3):
                        noise[index] = 0
                        #break

        # 特徴点を描画するlayer
        cat1_layer = np.zeros_like(first_frame)
        cat2_layer = np.zeros_like(first_frame)
        cat3_layer = np.zeros_like(first_frame)
        cat4_layer = np.zeros_like(first_frame)

        for index, prev in enumerate(prev_points):
            save_layer = np.zeros_like(first_frame)
            save_layer_list[index] = cv2.circle(
                save_layer,  # 描く画像
                (int(prev[0][0]), int(prev[0][1])),  # 線を引く始点
                5,  # 線を引く終点
                color=c[noise[index]],  # 描く色
                thickness=3  # 線の太さ
            )
            if noise[index] == 0:

                selectDirList[index] = '/cat1/'
                cat1_layer = cv2.circle(
                    cat1_layer,  # 描く画像
                    (int(prev[0][0]), int(prev[0][1])),  # 線を引く始点
                    5,  # 線を引く終点
                    color=c[noise[index]],  # 描く色
                    thickness=3  # 線の太さ
                )
            elif noise[index] == 1:
                selectDirList[index] = '/cat2/'
                cat2_layer = cv2.circle(
                    cat2_layer,  # 描く画像
                    (int(prev[0][0]), int(prev[0][1])),  # 線を引く始点
                    5,  # 線を引く終点
                    color=c[noise[index]],  # 描く色
                    thickness=3  # 線の太さ
                )
            elif noise[index] == 2:
                selectDirList[index] = '/cat3/'
                cat3_layer = cv2.circle(
                    cat3_layer,  # 描く画像
                    (int(prev[0][0]), int(prev[0][1])),  # 線を引く始点
                    5,  # 線を引く終点
                    color=c[noise[index]],  # 描く色
                    thickness=3  # 線の太さ
                )
            elif noise[index] == 3:
                selectDirList[index] = '/cat4/'
                cat4_layer = cv2.circle(
                    cat4_layer,  # 描く画像
                    (int(prev[0][0]), int(prev[0][1])),  # 線を引く始点
                    5,  # 線を引く終点
                    color=c[noise[index]],  # 描く色
                    thickness=3  # 線の太さ
                )

        # フレームに特徴点layerを追加する
        frame = cv2.add(first_frame, cat1_layer)
        frame = cv2.add(frame, cat2_layer)
        frame = cv2.add(frame, cat3_layer)
        frame = cv2.add(frame, cat4_layer)
        layer_list = [cat1_layer, cat2_layer, cat3_layer, cat4_layer]
        if ret == 1:
            break

    # 結果画像の表示
    # cv2.namedWindow("frame", cv2.WINDOW_NORMAL)
    # cv2.imshow("frame", frame)
    # cv2.waitKey(0)
    cv2.destroyAllWindows()
    cv2.imwrite(savePath + '/' + videoName + '.jpg', frame)

    # 分類結果の画像を保存する
    for n, layer in enumerate(layer_list):
        frame = cv2.add(first_frame, layer)
        cv2.imwrite(
            savePath + '/' + selectDir[n] + '/' + videoName + '_' +
            selectDir[n] + '.jpg', frame)

    for index in range(len(zahyou)):
        save_frame = cv2.add(first_frame, save_layer_list[index])
        cv2.imwrite(
            savePath + '/' + selectDirList[index] + '/pict/' + videoName +
            '_' + str(index + 1) + '.jpg', save_frame)

    category = np.array(noise)
    print(path[:path.find('/video')] + "/opticalflow/point_data/" + dirName +
          '/' + videoName + '/category.txt')

    f = open(
        path[:path.find('/video')] + "/opticalflow/point_data/" + dirName +
        '/' + videoName + '/category.txt', "wb")
    pickle.dump(category, f)

    return zahyou
예제 #2
0
def doGet(path, videoName, savePath):
    import Make_wavedata, clusteringPoint, make_figure, make_fft
    from pythonFile import make_dirs, getVideoData
    import os
    import pickle
    import shutil

    print(path, videoName, savePath)

    dirName = getVideoData.getDirName(path)
    videoName = getVideoData.getVideoName(path)
    #保存ディレクトリの作成
    make_dirs.makeDir(savePath)

    zahyou = Make_wavedata.todo(path)  #オプティカルフローで各特徴点の移動を推定
    print(zahyou)
    if os.path.isdir('/media/koshiba/Data/opticalflow/point_data/' + dirName +
                     '/' + videoName + '/category.txt'):
        f = open(
            '/media/koshiba/Data/opticalflow/point_data/' + dirName + '/' +
            videoName + '/category.txt', 'rb')
        noise = pickle.load(f)
    else:
        noise = clusteringPoint.todo(path, zahyou)  #手動で分類する
    make_figure.todo(zahyou, savePath)  #取得した特徴点の動きをグラフにする

    make_fft.doGet(zahyou, savePath)  #取得した特徴点の動きをFFT変換する

    predList = [[], [], []]
    accuracy = ['-1', '-1', '-1']
    precision = ['-1', '-1', '-1']
    recall = ['-1', '-1', '-1']
    specificity = ['-1', '-1', '-1']
    tmp = 0

    for index1, pred in enumerate(zahyou):
        for index2, answer in enumerate(noise):
            #print(index1, index2)
            if (pred[index2] == 0 or pred[index2] == -1):
                if answer == 0:
                    predList[index1].append(0)
                else:
                    predList[index1].append(3)
            else:
                if answer == 1:
                    predList[index1].append(1)
                else:
                    predList[index1].append(2)

        predAll = len(predList[index1])
        tp = predList[index1].count(1)
        tn = predList[index1].count(0)
        fp = predList[index1].count(2)
        fn = predList[index1].count(3)

        print(predAll, tp, tn)
        accuracy[index1] = str((tp + tn) / predAll)
        if tp + fp != 0:
            precision[index1] = str(tp / (tp + fp))
        if tp + fn != 0:
            recall[index1] = str(tp / (tp + fn))
        if tn + fp != 0:
            specificity[index1] = str(tn / (fp + tn))

    print(zahyou)

    #elapsed_time = time.time() - start
    #print ("elapsed_time:{0}".format(elapsed_time) + "[sec]")

    print('accuracy:' + accuracy[0] + ' ' + accuracy[1] + ' ' + accuracy[2])
    print('precision' + precision[0] + ' ' + precision[1] + ' ' + precision[2])
    print('recall' + recall[0] + ' ' + recall[1] + ' ' + recall[2])
    print('specificity' + specificity[0] + ' ' + specificity[1] + ' ' +
          specificity[2])

    f = open(savePath + '/pointData_' + videoName + '.txt', 'wb')
    pickle.dump(zahyou, f)
예제 #3
0
from pythonFile import click_pct, k_means, timestump, getVideoData
import math
from tkinter import filedialog
import scipy.stats
import os
import time
import pickle

# ファイルダイアログからファイル選択
typ = [('','*')] 
dir = '/media/koshiba/Data/video'
path = filedialog.askopenfilename(filetypes = typ, initialdir = dir) 
time_data = timestump.get_time()
start = time.time()

dirName = getVideoData.getDirName(path)
videoName = getVideoData.getVideoName(path)

f = open('/media/koshiba/Data/opticalflow/point_data/' + dirName + '/' + videoName + '/category.txt', 'rb')
noise = pickle.load(f)

#noise = clusteringPoint.todo(path)
classList = Make_wavedata.todo(path, time_data)
print(noise)

predList = [[],[],[]]
accuracy = ['-1', '-1', '-1']
precision = ['-1', '-1', '-1']
recall = ['-1', '-1', '-1']
specificity = ['-1', '-1', '-1']
tmp = 0