Exemple #1
0
def main(frame_path, dataset_name):
    print(frame_path)
    filename, f_type = getBaseName(frame_path)
    createFolder('img')
    createFolder('img/out-simple')
    createFolder('img/out-simple/' + dataset_name)
    save_path = 'img/out/' + dataset_name + '/simple/'
    createFolder(save_path)

    img = cv2.imread(frame_path, 1)
    h, w, c = img.shape
    [b, g, r] = cv2.split(img)
    img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    img_H = img_hsv[:, :, 0]
    img_S = img_hsv[:, :, 1]
    img_V = img_hsv[:, :, 2]
    img_color_intensity = color_intensity(img)

    ### Light Detection
    img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) / 255.0
    ret, img_gray = cv2.threshold(img_gray, 0.4, 1, cv2.THRESH_TOZERO)
    #ret , img_gray_th = cv2.threshold(img_gray, 245/255, 1, cv2.THRESH_TOZERO)
    #ret, img_gray_th = cv2.threshold(img_gray, 245/255, 1, cv2.THRESH_TOZERO)
    #ret, img_color_intensity_th = cv2.threshold(img_color_intensity, 0.2, 1, cv2.THRESH_TOZERO)

    img_nakagami = Nakagami_image_enhancement(img_gray, 5) / 255.0
    img_nakagami_c = Nakagami_image_enhancement(img_color_intensity, 5) / 255.0

    ret, img_nakagami_norm_th = cv2.threshold(img_nakagami, 240 / 255, 1,
                                              cv2.THRESH_TOZERO)
    ret, img_nakagami_c_th = cv2.threshold(img_nakagami_c, 240 / 255, 1,
                                           cv2.THRESH_TOZERO)

    pipline = [
        'img', 'img_H', 'img_S', 'img_V', 'img_gray', 'img_color_intensity',
        'img_nakagami', 'img_nakagami_c', 'img_nakagami_norm_th',
        'img_nakagami_c_th'
    ]
    imgs = [
        img, img_H, img_S, img_V, img_gray, img_color_intensity, img_nakagami,
        img_nakagami_c, img_nakagami_norm_th, img_nakagami_c_th
    ]

    fig = plt.figure(figsize=(32, 64))

    for i in range(len(imgs)):
        ax = fig.add_subplot(len(imgs) // 2 + 1, 2, i + 1)
        img_tmp = BGR_to_RGB(imgs[i])
        ax.set_title(pipline[i])
        if pipline[i] in ['img_gray_th_', 'img_nakagami_']:
            plt.imshow(img_tmp)
            plt.colorbar()
        elif len(img_tmp.shape) == 2:
            plt.imshow(img_tmp, cmap='gray')
        else:
            plt.imshow(img_tmp)

    plt.savefig(save_path + dataset_name + '_' + filename + '.png', dpi=200)

    return -1, -1
Exemple #2
0
def main(frame_path):

    filename, f_type = getBaseName(frame_path)
    createFolder('img')
    createFolder('img/out')
    createFolder('img/out/' + filename)
    save_path = 'img/out/' + filename + '/'

    img = cv2.imread(frame_path, 1)
    h, w, c = img.shape
    [b, g, r] = cv2.split(img)
    img_red_filted, idx_red = Euclidean_filter(img, 1, 150, 0, [255, 0, 0],
                                               [b, g, r], save_path)
    img_white_filted, idx_white = Euclidean_filter(img, 1, 255, 7,
                                                   [255, 255, 255], [b, g, r],
                                                   save_path)

    img_symbolic_idx, img_symbolic_show = symbolic_image([idx_red, idx_white],
                                                         (h, w))

    img_light_b = np.where(img_symbolic_idx > 0, 255, 0).astype('uint8')

    img_ccl_origin, img_ccl_show, label_num = ccl(img_light_b)
    img_box, boxes = find_BoundingBox(img_ccl_origin, img)
    """
    cv2.imshow("img_ccl", img_ccl)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
    """

    #print(frame_path,label_num, img_ccl_origin.shape)

    if save:
        cv2.imwrite('img/out/' + filename + '/' + filename + '_origin.png',
                    img)
        cv2.imwrite(
            'img/out/' + filename + '/' + filename + '_img_red_filted.png',
            img_red_filted)
        cv2.imwrite(
            'img/out/' + filename + '/' + filename + '_img_white_filted.png',
            img_white_filted)

        cv2.imwrite(
            'img/out/' + filename + '/' + filename + '_img_symbolic.png',
            img_symbolic_show)
        cv2.imwrite(
            'img/out/' + filename + '/' + filename + '_img_light_b.png',
            img_light_b)
        cv2.imwrite('img/out/' + filename + '/' + filename + '_img_ccl.png',
                    img_ccl_show)
        cv2.imwrite('img/out/' + filename + '/' + filename + '_img_box.png',
                    img_box)
Exemple #3
0
def main(path):
    dataset_name = path.split('/')[0]
    filename = os.path.basename(path)
    filename = filename.split('.')[0]
    print(path, filename)

    imgcv = cv2.imread(path)
    dim = (1920, 1080)
    imgcv = cv2.resize(imgcv, dim)  #, interpolation = cv2.INTER_LINEAR

    result = tfnet.return_predict(imgcv)

    f_result = [a for a in result if a['label'] in set_of_car]
    img = np.copy(imgcv)
    h, w, c = img.shape
    img = filter_label_color(img)
    print(w, h, c)
    img_filtered_origin = np.copy(img)

    createFolder('out')
    createFolder('out/' + dataset_name)
    createFolder('out/' + dataset_name + '/origin')
    createFolder('out/' + dataset_name + '/binary')
    createFolder('out/' + dataset_name + '/yolo_data')

    with open('out/' + dataset_name + '/yolo_data/' + filename + '.json',
              'w') as f:
        json.dump(str(f_result), f)

    for r in f_result:
        print(r)
        x1 = r['topleft']['x']
        y1 = r['topleft']['y']
        x2 = r['bottomright']['x']
        y2 = r['bottomright']['y']
        color = []
        if r['label'] in ['car', 'motorbike']:
            color = [0, 255, 0]
            cv2.rectangle(img_filtered_origin, (x1, y1), (x2, y2), color,
                          cv2.FILLED)
            """
            cv2.imshow("img_nakagami", img_filtered_origin)
            cv2.waitKey(0)
            cv2.destroyAllWindows()
            """
        """
        if r['label']=='car':
            if x2>w/2:
                color = [0,255,255]
            else:
                color = [255,0,0]
        elif r['label']=='motorbike':
            if x2>w/2:
                color = [255,0,255]
            else:
                color=[0,255,0]
        elif r['label']=='bicycle':
            if x2>w/2:
                color = [255,255,0]
            else:
                color = [0,0,255]
        else:
            continue
        """

    img_b = binary_color_filter(img_filtered_origin, [0, 255, 0])
    cv2.imwrite('out/' + dataset_name + '/origin/' + filename + '.png',
                img_filtered_origin)
    cv2.imwrite('out/' + dataset_name + '/binary/' + filename + '.png', img_b)
Exemple #4
0
save = True
state = 'train'
nakagami = True

names = [
            'img', 'img_basic_hsv', 'img_basic_H', 'img_basic_S', 'img_basic_V', 'img_basic_gray', 
            'img_light_detection_gray_th', 'img_light_detection_nakagami_norm', 'img_light_detection_nakagami_norm_th','img_light_detection_nakagami_norm_th_clip',

            'img_color_white_filted', 'img_color_white_filted_gray', 'img_color_white_mor', 'img_color_white_multiply', 
            'img_color_red_filted', 'img_color_red_filted_gray', 'img_color_red_filted_gray_th', 'img_color_red_mor', 
            'img_color_red_mor_th', 'img_color_red_multiply', 'img_color_white_b', 'img_color_red_b', 
            'img_color_white_light', 'img_color_red_light', 'img_color_red_contour', 'img_color_white_contour', 
            'img_result_red_edgeboxes', 'img_result_white_edgeboxes', 'img_result_roi_combine'
        ]

createFolder('img')
createFolder('img/out')
for dataset_name in train_dataset:
    createFolder('img/out/'+dataset_name)
    for i in range(len(names)):
            createFolder('img/out/'+dataset_name+'/'+names[i])

def main(frame_path, dataset_name):
    
    print(frame_path)
    filename, f_type = getBaseName(frame_path)
    
    
    #createFolder('img/out/'+dataset_name+'/'+filename)
    #save_path = 'img/out/'+dataset_name+'/'+filename+'/'
Exemple #5
0
def main(frame_path, dataset_name, boxI=None):

    print(frame_path)
    filename, f_type = getBaseName(frame_path)

    #createFolder('img/out/'+dataset_name+'/'+filename)
    #save_path = 'img/out/'+dataset_name+'/'+filename+'/'

    img = cv2.imread(frame_path, 1)
    h, w, c = img.shape
    [b, g, r] = cv2.split(img)
    img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)

    img_hsv[:, :, 0] = img_hsv[:, :, 0] / img_hsv[:, :, 0].max() * 255.0
    img_hsv[:, :, 0] = img_hsv[:, :, 1] / img_hsv[:, :, 1].max() * 255.0
    img_hsv[:, :, 0] = img_hsv[:, :, 2] / img_hsv[:, :, 2].max() * 255.0

    img_H = img_hsv[:, :, 0]
    img_S = img_hsv[:, :, 1]
    img_V = img_hsv[:, :, 2]

    ### Light Detection
    img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) / 255.0
    ret, img_gray_th = cv2.threshold(img_gray, 0.4, 1, cv2.THRESH_TOZERO)

    img_sigmoid = img_gray * 255.0
    img_sigmoid = 1 / (1 + np.exp(-(img_gray - 128) / 20))
    print(img_sigmoid.min(), img_sigmoid.max())
    d = img_sigmoid.max() - img_sigmoid.min()

    img_sigmoid = 255 * (img_sigmoid - img_sigmoid.min()) / d
    print(img_sigmoid.min(), img_sigmoid.max())
    ret, img_sigmoid_th = cv2.threshold(img_sigmoid, 245, 255,
                                        cv2.THRESH_TOZERO)

    createFolder('img/out/' + dataset_name + '/' + "img_test_sigmoid")
    createFolder('img/out/' + dataset_name + '/' + "img_test_sigmoid_th")
    cv2.imwrite(
        'img/out/' + dataset_name + '/' + "img_test_sigmoid" + '/' + filename +
        '.png', img_sigmoid)
    cv2.imwrite(
        'img/out/' + dataset_name + '/' + "img_test_sigmoid_th" + '/' +
        filename + '.png', img_sigmoid_th)

    if nakagami:
        img_nakagami = Nakagami_image_enhancement(img_gray_th, 3)
    else:
        img_nakagami = img_gray_th
    img_nakagami_norm = img_nakagami / img_nakagami.max()  # resize to [0,1]

    ret, img_nakagami_norm_th = cv2.threshold(img_nakagami_norm, 200 / 255, 1,
                                              cv2.THRESH_TOZERO)

    ### clip center
    img_nakagami_norm_th_clip = clip_center(img_nakagami_norm_th, int(h / 3),
                                            int(h))

    img_white_filted = img_nakagami_norm_th_clip * 255.0
    #ret , img_white_filted = cv2.threshold(img_nakagami_norm_th_clip, 245/255.0, 1, cv2.THRESH_TOZERO)
    #img_white_filted = img_white_filted*255.0

    k = np.ones((3, 3), np.uint8)
    img_white_mor = cv2.morphologyEx(
        img_white_filted, cv2.MORPH_CLOSE, k, iterations=3) / 255.0

    ret, img_white_b = cv2.threshold(img_white_mor, 0.9, 255,
                                     cv2.THRESH_BINARY)

    ### Contour

    img_white_contour = contour_Detection(img_white_b)

    ### EdgeBox
    img_roi_combine = np.copy(img)
    #img_red_edgeboxes,img_roi_combine, boxes_red = Edgeboxes(img_gray=img_red_contour, img_origin=img, color=[0,255,0], img_roi_combine=img_roi_combine, state=state, filename=filename, base=base) #contour, img_origin, box color(BGR)
    img_white_edgeboxes, img_roi_combine, boxes_white = Edgeboxes(
        img_gray=img_white_contour,
        img_origin=img,
        color=[255, 0, 0],
        img_roi_combine=img_roi_combine,
        state=state,
        filename=filename,
        base=base)  #contour, img_origin, box color(BGR)

    ### Fusion
    ### fusion ###
    img_ground = cv2.imread(base + "ground_truth/" + filename + '.bmp',
                            1).astype('uint8')
    img_ground_mask = binary_color_filter(img_ground).astype('uint8')

    img_yolo_b = cv2.imread(base + "yolo_binary/" + filename + '.png',
                            0).astype('uint8')

    features_white, answers_white, bI = make_feature(
        boxes=boxes_white,
        version='v7-hand',
        img_ground=img_ground,
        img_ground_mask=img_ground_mask,
        state=state,
        img_S=img_S,
        img_yolo_b=img_yolo_b,
        filename=filename,
        img=img,
        boxI=boxI)

    features = features_white
    answers = answers_white

    imgs = [
        img, img_hsv, img_H, img_S, img_V, img_gray * 255.0,
        img_gray_th * 255.0, img_nakagami_norm * 255.0,
        img_nakagami_norm_th * 255.0, img_nakagami_norm_th_clip * 255.0,
        img_white_filted, img_white_mor * 255.0, img_white_contour,
        img_white_edgeboxes, img_roi_combine
    ]

    for i in range(len(names)):

        save_path = 'img/out/' + dataset_name + '/' + names[i] + '/' + filename
        try:
            cv2.imwrite(save_path + '.png', imgs[i])
        except:
            print('error on ', names[i])

    #img_white_edgeboxes,img_roi_combine,

    #cv2.imwrite(save_path+'_img_nakagami_thB.png', img_nakagami_thB*255.0)

    #img_white_filted, idx_white, img_white_d = Euclidean_filter(img=img, threshold=20, color=[255,255,255], img_BGR_spilt=[b,g,r], save_path=save_path)
    """    
    cv2.imshow("img_nakagami", img_gray)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
    """

    return features, answers, bI
import pandas as pd
import glob
import cv2
import numpy as np
import json
from utils.files import getBaseName, createFolder
import json

yolo_path = '/home/alanhc-school/yolo/darkflow/'
train_dataset = ['dataset_100', 'pic_100']
for dataset_name in train_dataset:
    base = '../../dataset/' + dataset_name + '/'
    dataset = "origin/"
    save_path = 'img/out/' + dataset_name + '/img_result_light_mask/'
    createFolder('img/out/' + dataset_name + '/img_result_light_mask')
    createFolder('img/out/' + dataset_name + '/img_result_light_mask/yolo')
    createFolder('img/out/' + dataset_name + '/img_result_light_mask/light')
    createFolder('img/out/' + dataset_name + '/img_result_light_mask/combine')
    createFolder('img/out/' + dataset_name +
                 '/img_result_light_mask/yolo_rect')
    createFolder('img/out/' + dataset_name +
                 '/img_result_light_mask/light_rect')
    createFolder('img/out/' + dataset_name +
                 '/img_result_light_mask/combine_rect')

    data = pd.read_csv(base + dataset + 'data-6.csv')
    test_data = pd.read_csv(base + dataset + 'data-6-test.csv')

    print(data.shape)
    print(test_data.shape)
Exemple #7
0
def main(frame_path, dataset_name):
    print(frame_path)
    filename, f_type = getBaseName(frame_path)
    createFolder('img')
    createFolder('img/out')
    createFolder('img/out/'+dataset_name)
    createFolder('img/out/'+dataset_name+'/'+filename)
    save_path = 'img/out/'+dataset_name+'/'+filename+'/'

    img = cv2.imread(frame_path, 1)
    h, w, c = img.shape
    
    img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    img_H = img_hsv[:,:,0]
    img_S = img_hsv[:,:,1]
    img_V = img_hsv[:,:,2]
    
    [b,g,r] = cv2.split(img)
    
    
    ### image enhacement
    img_th = color_intensity(img)
    img_nakagami = Nakagami_image_enhancement(img_th, 3)
    ret, img_nakagami_thB = cv2.threshold(img_nakagami, 100, 1, cv2.THRESH_BINARY)

    img_red_filted, idx_red = Euclidean_filter(img, 1,150,0,[255,0,0], [b,g,r], save_path)  #img, percent,up_threshold,lower_threshold color(RGB), save_path
    img_white_filted,idx_white = Euclidean_filter(img, 1,255,7, [255,255,255], [b,g,r], save_path)  #img, percent, color(RGB), save_path
    
    
    img_red_filted_gray = cv2.cvtColor(img_red_filted, cv2.COLOR_BGR2GRAY)
    img_white_filted_gray = cv2.cvtColor(img_white_filted, cv2.COLOR_BGR2GRAY)
    
    img_red_filted_gray_max = img_red_filted_gray.max()
    img_white_filted_gray_max = img_white_filted_gray.max()
    

    img_red_filted_gray_norm = img_red_filted_gray/img_red_filted_gray_max
    img_white_filted_gray_norm = img_white_filted_gray/img_white_filted_gray_max

    
    img_red_multiply = img_nakagami_thB * img_red_filted_gray_norm
    img_white_multiply = img_nakagami_thB * img_white_filted_gray_norm

    img_red_multiply = img_red_multiply*255.0
    img_white_multiply = img_white_multiply*255.0
    
    #img_red_nakagami = Nakagami_image_enhancement(img_red_filted_gray_norm, 3) #gray_img, kernel_size
    #img_white_nakagami = Nakagami_image_enhancement(img_white_filted_gray_norm, 3) #gray_img, kernel_size
    
    
    img_red_nakagami_cliped = clip_center(img_red_multiply, int(h/3), int(h)), # img, y_up, y_down
    img_white_nakagami_cliped = clip_center(img_white_multiply, int(h/3), int(h)), # img, y_up, y_down
    img_red_nakagami_cliped = img_red_nakagami_cliped[0]
    img_white_nakagami_cliped = img_white_nakagami_cliped[0]
   
    
    

    img_red_contour = contour_Detection(img_red_nakagami_cliped)
    img_white_contour = contour_Detection(img_white_nakagami_cliped)

    
    
    
    #print(type(img_red_contour_cliped[0]))
    
    """
    cv2.imshow("img_red_contour_cliped", img_red_contour_cliped)
    #cv2.imshow("img_white_filted_gray", img_white_filted_gray)
    #cv2.imshow("img_red_nakagami", img_red_nakagami)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
    """

    img_roi_combine = np.copy(img)
    img_red_edgeboxes,img_roi_combine, boxes_red = Edgeboxes(img_red_contour, img, [0,255,0], img_roi_combine, state=state, filename=filename, base=base) #contour, img_origin, box color(BGR)
    img_white_edgeboxes,img_roi_combine, boxes_white = Edgeboxes(img_white_contour, img, [255,0,0], img_red_edgeboxes, state=state, filename=filename, base=base) #contour, img_origin, box color(BGR)
    
    
    ### fusion ###
    
    

    img_ground = cv2.imread(base+"ground_truth/"+filename+'.bmp',1).astype('uint8')
    img_ground_mask = binary_color_filter(img_ground).astype('uint8')
    
    img_yolo_b = cv2.imread(base+"yolo_binary/"+filename+'.png',0).astype('uint8')
        
    features_red, answers_red = make_feature(boxes_red, img_ground=img_ground, img_ground_mask=img_ground_mask, state=state, img_H=img_H, img_yolo_b=img_yolo_b, filename=filename)
    features_white, answers_white = make_feature(boxes_white, img_ground=img_ground, img_ground_mask=img_ground_mask, state=state, img_H=img_H, img_yolo_b=img_yolo_b, filename=filename)
        
    features = features_red + features_white
    answers = answers_red + answers_white
        
       
    
        
    ### report ###
    
        

    if save:
        cv2.imwrite(save_path+'_origin.png', img)
        cv2.imwrite(save_path+'_color_intensity_with_threshold.png', img_th*255.0)
        cv2.imwrite(save_path+'_img_nakagami.png', img_nakagami)
        cv2.imwrite(save_path+'_img_nakagami_thB.png', img_nakagami_thB*255.0)
        
        
        
        cv2.imwrite(save_path+'_img_H.png', img_H)
        cv2.imwrite(save_path+'_img_S.png', img_S)
        cv2.imwrite(save_path+'_img_V.png', img_V)
        cv2.imwrite(save_path+'_img_red_filted.png', img_red_filted)
        cv2.imwrite(save_path+'_img_white_filted.png', img_white_filted)
        cv2.imwrite(save_path+'_img_red_filted_gray.png', img_red_filted_gray)
        
        #cv2.imwrite(save_path+'_img_red_nakagami.png', img_red_nakagami)
        #cv2.imwrite(save_path+'_img_white_nakagami.png', img_white_nakagami)
        
        cv2.imwrite(save_path+'_img_red_nakagami_cliped.png', img_red_nakagami_cliped)
        cv2.imwrite(save_path+'_img_white_nakagami_cliped.png', img_white_nakagami_cliped)

        cv2.imwrite(save_path+'_img_red_multiply.png', img_red_multiply)
        cv2.imwrite(save_path+'_img_white_multiply.png', img_white_multiply)
        
        
        cv2.imwrite(save_path+'_img_red_contour.png', img_red_contour)
        cv2.imwrite(save_path+'_img_white_contour.png', img_white_contour)

        
        

        cv2.imwrite(save_path+'_img_red_edgeboxes.png', img_red_edgeboxes)
        cv2.imwrite(save_path+'_img_white_edgeboxes.png', img_white_edgeboxes)
        cv2.imwrite(save_path+'_img_roi_combine.png', img_roi_combine)



        #cv2.imwrite(save_path+'_img_red.png', r)
    return features, answers
            boxes.append([r2_x1, r2_y1, r2_x2, r2_y2])
            boxes.sort()
            is_cross = rect_cross(boxes)
            if is_cross:
                x, y, w, h = mid_pos(boxes)
                name_check = data['filename'] == filename
                pos_check = data['position'] == str(n_data)
                data.loc[pos_check, 'position'] = str([x, y, w, h])
save_data = pd.DataFrame(data, columns=['filename', 'position'])
save_data.to_csv('demo-result-after.csv')

g = save_data.groupby(["filename"])

print(data.shape)

createFolder('img/demo-out/3/img_result_light_mask')
names = ['yolo', 'yolo_rect', 'light', 'light_rect', 'combine', 'combine_rect']
for i in range(len(names)):
    createFolder('img/demo-out/3/img_result_light_mask/' + names[i])

yolo_path = '/home/alanhc-school/yolo/darkflow/'
img_path = '/home/alanhc-school/Downloads/research/research-beta/architecture0714/img/demo-out/3/img/'
img_yolo_path = '/home/alanhc-school/yolo/darkflow/tlchia-dataset-v2_day/'
for filename in g.count().index:

    img = cv2.imread(img_path + str(filename) + '.png', 1)
    img_yolo = cv2.imread(img_yolo_path + str(filename) + '.jpg', 1)
    img_yolo = cv2.resize(img_yolo, (1920, 1080), interpolation=cv2.INTER_AREA)
    img_yolo_rect = np.copy(img_yolo)
    img_combine_rect = np.copy(img)
    img_light_rect = np.copy(img)
from utils.connect_compoent import *
from utils.symbolic import symbolic_image

names = [
    'img', 'img_basic_hsv', 'img_basic_H', 'img_basic_S', 'img_basic_V',
    'img_basic_gray', 'img_light_detection_gray_th',
    'img_light_detection_nakagami_norm',
    'img_light_detection_nakagami_norm_th',
    'img_light_detection_nakagami_norm_th_clip', 'img_color_white_filted',
    'img_color_white_mor', 'img_color_white_binary', 'img_color_white_contour',
    'img_result_white_edgeboxes', 'img_result_roi_combine'
]

datasets = glob.glob('img/demo-out/3/origin/*')
for i in range(len(names)):
    createFolder('img/demo-out/3/' + names[i])

save = True
nakagami = True
state = 'test'
yolo_b_path = '/home/alanhc-school/yolo/darkflow/out/tlchia-dataset-v2_day/binary/'


def main(frame_name):
    features = ""
    print(frame_name)
    filename, f_type = getBaseName(frame_name)

    img = cv2.imread(frame_name, 1)
    h, w, c = img.shape
    [b, g, r] = cv2.split(img)
Exemple #10
0
                print(confusion_matrix(y_pred, y_test))
                print(classification_report(y_pred, y_test))
                text_file = open(
                    test_path + "-result-model-" + model_dataset + '-' +
                    model_name + ".txt", "w+")
                text_file.write(
                    str(confusion_matrix(y_pred, y_test)) + '\n' +
                    classification_report(y_pred, y_test))
                text_file.close()
                y_pred = pd.DataFrame({'predict': y_pred})
                y_pred.to_csv(test_path + '-' + model_name + '-predict.csv')

            ### make result
            for model_name in ['rf', 'svm']:
                save_path = 'img/out/' + test_dataset + '/img_result_light_mask/'
                createFolder(save_path)
                save_path = 'img/out/' + test_dataset + '/img_result_light_mask/' + model_name + '/'
                createFolder(save_path)

                createFolder(save_path)
                createFolder(save_path + 'yolo')
                createFolder(save_path + 'light')
                createFolder(save_path + 'combine')
                createFolder(save_path + 'yolo_rect')
                createFolder(save_path + 'light_rect')
                createFolder(save_path + 'combine_rect')

                data = pd.read_csv(test_path + '.csv')

                test_data = pd.read_csv(test_path + '-' + model_name +
                                        '-predict.csv')
Exemple #11
0
from utils.files import getBaseName, createFolder

try:
    shutil.rmtree(
        '/home/alanhc-school/Downloads/research/research-beta/architecture0714/img/demo-out'
    )
except:
    print('folder clean.')

files = glob.glob('../../../dataset/highway_video/*')
video_path = files[1]
print(files)

filename = getBaseName(video_path)[0]
createFolder('../img')
createFolder('../img/demo-out')
createFolder('../img/demo-out/' + filename)
createFolder('../img/demo-out/' + filename + '/origin')

save_path = '../img/demo-out/' + filename + '/'
cyclegan_path = '/home/alanhc-school/Desktop/CycleGAN-Tensorflow-2/'

vidcap = cv2.VideoCapture(video_path)
success, image = vidcap.read()
count = 0
while success:
    try:
        # save frame as JPEG file
        success, image = vidcap.read()
        cv2.imshow('frame', image)
Exemple #12
0
import time
import pandas as pd
import os
import json
from sklearn.metrics import classification_report
import shutil

from utils.files import getBaseName, createFolder

base = '../../dataset/dataset_100/'
dataset = "origin/"
save_path = base + 'result_out'

if os.path.isdir(base + 'result_out'):
    shutil.rmtree(base + 'result_out')
createFolder(base + 'result_out')

data = pd.read_csv(base + dataset + 'data-5.csv')
test_data = pd.read_csv(base + dataset + 'data-5-test.csv')

print('report:')
y_test = data['answers']
y_pred = test_data['predict']
print(classification_report(y_pred, y_test))

data = data[['fliename', 'position']]
test_data = test_data[['predict']]
merge = pd.concat([data, test_data], axis=1)

for i in range(merge.shape[0]):
    tStart = time.time()