コード例 #1
0
def enforce_maximum_profit(categorical_results):
    mp_data = {}
    thresholds = {}

    for key, value in categorical_results.items():
        li = []
        max_acc = 0
        threshold = 0
        max_range_thresh = max(value, key=lambda x: x[0])
        min_range_thresh = min(value, key=lambda x: x[0])
        for thresh in np.arange(min_range_thresh[0], max_range_thresh[0],
                                0.01):
            result = utils.apply_threshold(value, thresh)
            total_num_cases = 0
            total_correct = 0
            for prediction, label in result:
                total_num_cases += 1.0
                if prediction == label:
                    total_correct += 1.0
            acc = total_correct / total_num_cases
            # print(key, thresh, acc)
            if acc > max_acc:
                max_acc = acc
                threshold = thresh
        thresholds[key] = threshold
    for key, value in categorical_results.items():
        result = utils.apply_threshold(value, thresholds[key])
        mp_data[key] = result

    acc = utils.get_total_accuracy(mp_data)
    # print(acc)
    # Must complete this function!
    # return mp_data, thresholds

    return mp_data, thresholds
コード例 #2
0
def enforce_single_threshold(categorical_results):
    single_threshold_data = {}
    thresholds = {}
    single_thresh = 0
    merged = []
    max_acc = 0
    for k, v in categorical_results.items():
        merged.extend(v)

    max_range_thresh = max(merged, key=lambda x: x[0])
    min_range_thresh = min(merged, key=lambda x: x[0])

    for thresh in np.arange(min_range_thresh[0], max_range_thresh[0], 0.01):
        for key, value in categorical_results.items():
            result = utils.apply_threshold(value, thresh)
            single_threshold_data[key] = result
        acc = utils.get_total_accuracy(single_threshold_data)
        if (acc > max_acc):
            max_acc = acc
            single_thresh = thresh

    for key, value in categorical_results.items():
        thresholds[key] = single_thresh
        result = utils.apply_threshold(value, thresholds[key])
        single_threshold_data[key] = result
    # Must complete this function!
    #return single_threshold_data, thresholds

    return single_threshold_data, thresholds
コード例 #3
0
def enforce_single_threshold(categorical_results):  # Shamus O'Connor
    import numpy as np
    import utils
    single_threshold_data = {}
    thresholds = {}

    full_list = []
    max_index = 0  # Index that returns max accuracy
    acc = []

    univ_thresh = 0
    thresh_num = 500  # Num of thresholds to try
    thresh = np.linspace(0, 1, thresh_num)

    for group in categorical_results.keys():
        full_list += categorical_results[group]

    for t in thresh:
        proc_data = []  # Data of each race sample for thresholding
        proc_data = utils.apply_threshold(full_list, t)
        acc.append(utils.get_num_correct(proc_data) /
                   len(proc_data))  # ACCURACY
#         acc.append(utils.apply_financials(proc_data))                 # COST

    max_index = acc.index(max(acc))
    univ_thresh = thresh[max_index]

    for race in categorical_results:
        subset = categorical_results[race]
        thresholds[race] = univ_thresh
        single_threshold_data[race] = utils.apply_threshold(
            subset, thresholds[race])

    return single_threshold_data, thresholds
コード例 #4
0
def enforce_maximum_profit(categorical_results):  # Shamus O'Connor
    import numpy as np
    import utils
    mp_data = {}
    thresholds = {}

    for race in categorical_results:
        subset = categorical_results[race]

        max_index = 0  # Index that returns max accuracy
        acc = []

        thresh_num = len(subset)  # Num of thresholds to try
        thresh = np.linspace(0, 1, thresh_num)

        for t in thresh:
            proc_data = []  # Data of each race sample for thresholding
            proc_data = utils.apply_threshold(subset, t)
            acc.append(utils.get_num_correct(proc_data) /
                       len(proc_data))  # ACCURACY
#             acc.append(utils.apply_financials(proc_data,True))                 # COST

        max_index = acc.index(max(acc))
        thresholds[race] = thresh[max_index]

        mp_data[race] = utils.apply_threshold(subset, thresholds[race])

    return mp_data, thresholds
コード例 #5
0
def enforce_equal_opportunity(categorical_results, epsilon):
    thresholds = {}
    equal_opportunity_data = {}

    tpr_data = {}

    for threshold in np.arange(0, 1, 0.01):
        for key, value in categorical_results.items():
            t_data = utils.apply_threshold(value, threshold)
            tpr = utils.get_true_positive_rate(t_data)
            if (key not in tpr_data):
                tpr_data[key] = []
            tpr_data[key].append([tpr, threshold])

    keys = [*tpr_data]
    tpr_data_refined = []
    for tpr_d_0 in tpr_data[keys[0]]:
        for tpr_d_1 in tpr_data[keys[1]]:
            if (abs(tpr_d_0[0] - tpr_d_1[0]) <= epsilon):
                tpr_data_refined.append([tpr_d_0, tpr_d_1])

    tpr_data_refined_2 = []
    for val in tpr_data_refined:
        for tpr_d_2 in tpr_data[keys[2]]:
            if (abs(tpr_d_2[0] - val[0][0]) <= epsilon):
                if (abs(tpr_d_2[0] - val[1][0]) <= epsilon):
                    tpr_data_refined_2.append([val[0], val[1], tpr_d_2])

    tpr_data_refined_3 = []
    for val in tpr_data_refined_2:
        for tpr_d_3 in tpr_data[keys[3]]:
            if (abs(tpr_d_3[0] - val[0][0]) <= epsilon):
                if (abs(tpr_d_3[0] - val[1][0]) <= epsilon):
                    if (abs(tpr_d_3[0] - val[2][0]) <= epsilon):
                        tpr_data_refined_3.append(
                            [val[0], val[1], val[2], tpr_d_3])

    #print(len(tpr_data_refined_3))
    max_acc = 0
    temp = {}
    for thresh in tpr_data_refined_3:
        temp['African-American'] = utils.apply_threshold(
            categorical_results['African-American'], thresh[0][1])
        temp['Caucasian'] = utils.apply_threshold(
            categorical_results['Caucasian'], thresh[1][1])
        temp['Hispanic'] = utils.apply_threshold(
            categorical_results['Hispanic'], thresh[2][1])
        temp['Other'] = utils.apply_threshold(categorical_results['Other'],
                                              thresh[3][1])
        acc = utils.get_total_accuracy(temp)
        if (acc > max_acc):
            max_acc = acc
            thresholds['African-American'] = thresh[0][1]
            thresholds['Caucasian'] = thresh[1][1]
            thresholds['Hispanic'] = thresh[2][1]
            thresholds['Other'] = thresh[3][1]
            equal_opportunity_data = temp.copy()

    return equal_opportunity_data, thresholds
コード例 #6
0
def enforce_demographic_parity(categorical_results, epsilon):
    demographic_parity_data = {}
    thresholds = {}
    npp = []
    npp_data = {}

    for threshold in np.arange(0, 1, 0.01):
        for key, value in categorical_results.items():
            t_data = utils.apply_threshold(value, threshold)
            npp = (utils.get_num_predicted_positives(t_data) / len(t_data))
            if (key not in npp_data):
                npp_data[key] = []
            npp_data[key].append([npp, threshold])

    keys = [*npp_data]
    npp_data_refined = []
    for npp_d_0 in npp_data[keys[0]]:
        for npp_d_1 in npp_data[keys[1]]:
            if (abs(npp_d_0[0] - npp_d_1[0]) <= epsilon):
                npp_data_refined.append([npp_d_0, npp_d_1])

    npp_data_refined_2 = []
    for val in npp_data_refined:
        for npp_d_2 in npp_data[keys[2]]:
            if (abs(npp_d_2[0] - val[0][0]) <= epsilon):
                if (abs(npp_d_2[0] - val[1][0]) <= epsilon):
                    npp_data_refined_2.append([val[0], val[1], npp_d_2])

    npp_data_refined_3 = []
    for val in npp_data_refined_2:
        for npp_d_3 in npp_data[keys[3]]:
            if (abs(npp_d_3[0] - val[0][0]) <= epsilon):
                if (abs(npp_d_3[0] - val[1][0]) <= epsilon):
                    if (abs(npp_d_3[0] - val[2][0]) <= epsilon):
                        npp_data_refined_3.append(
                            [val[0], val[1], val[2], npp_d_3])

    max_acc = 0
    temp = {}
    for thresh in npp_data_refined_3:
        temp['African-American'] = utils.apply_threshold(
            categorical_results['African-American'], thresh[0][1])
        temp['Caucasian'] = utils.apply_threshold(
            categorical_results['Caucasian'], thresh[1][1])
        temp['Hispanic'] = utils.apply_threshold(
            categorical_results['Hispanic'], thresh[2][1])
        temp['Other'] = utils.apply_threshold(categorical_results['Other'],
                                              thresh[3][1])
        acc = utils.get_total_accuracy(temp)
        if (acc > max_acc):
            max_acc = acc
            thresholds['African-American'] = thresh[0][1]
            thresholds['Caucasian'] = thresh[1][1]
            thresholds['Hispanic'] = thresh[2][1]
            thresholds['Other'] = thresh[3][1]
            demographic_parity_data = temp.copy()

    max_acc = 0
    return demographic_parity_data, thresholds
コード例 #7
0
def enforce_maximum_profit(categorical_results):
    mp_data = {}
    thresholds = {}
    
    
    
    a=categorical_results['African-American']
    b=categorical_results['Caucasian']
    c=categorical_results['Hispanic']
    d=categorical_results['Other']
    
    best_accuracy=0
    best_threshold=None
    
    
    l1=l2=l3=l4=[]
    for i in range(10):
        l1.append(i/10)
        
    l2=l3=l4=l1
    
    for i in (itertools.product(l1,l2,l3,l4)):
        arr1=u.apply_threshold(a, i[0])
        arr2=u.apply_threshold(b, i[1])
        arr3=u.apply_threshold(c, i[2])
        arr4=u.apply_threshold(d, i[3])
        
        
        d9={'African-American':arr1,'Caucasian':arr2,'Hispanic':arr3,'Other':arr4}
        acc=u.get_total_accuracy(d9)
        
        if(best_accuracy<acc):
            best_accuracy=acc
            best_threshold=[i[0],i[1],i[2],i[3]]
            mp_data={}
            mp_data=d9
        d9={}
        
        '''
        if(acc>=0.63):
            break
        '''
   
    
    thresholds={'African-American':best_threshold[0],
                'Caucasian':best_threshold[1],
                'Hispanic':best_threshold[2],
                'Other':best_threshold[3]}    
    
    
    
    

    # Must complete this function!
    return mp_data, thresholds
コード例 #8
0
def enforce_single_threshold(categorical_results):
    single_threshold_data = {}
    thresholds = {}
    
    
    a=categorical_results['African-American']
    b=categorical_results['Caucasian']
    c=categorical_results['Hispanic']
    d=categorical_results['Other']
    
    best_accuracy=0
    best_threshold=None
    
    for i in range(100):
        theshold=i/100
        arr1=u.apply_threshold(a, theshold)
        arr2=u.apply_threshold(b, theshold)
        arr3=u.apply_threshold(c, theshold)  
        arr4=u.apply_threshold(d, theshold)
        
        d9={'African-American':arr1,
           'Caucasian':arr2,
           'Hispanic':arr3,
           'Other':arr4}
    
        acc=u.get_total_accuracy(d9)
        
        if(best_accuracy<acc):
            best_accuracy=acc
            best_threshold=theshold
            single_threshold_data={}
            single_threshold_data =d9
        d9={}
        
    
    
    
    
    thresholds={'African-American':best_threshold,
                'Caucasian':best_threshold,
                'Hispanic':best_threshold,
                'Other':best_threshold}
    
    
    
    #Must complete this function!
    return single_threshold_data, thresholds
    
    #return None, None
コード例 #9
0
    def heat_and_threshold(self,
                           image,
                           box_list,
                           rolling_threshold=1,
                           current_threshold=1):
        heat = np.zeros_like(image[:, :, 0]).astype(np.float)

        # Add heat to each box in box list
        raw_heat = add_heat(heat, box_list)

        # Smoothen out heated windows based on time-averaging
        avg_heat = self.rolling_sum([heat])['heat']

        # Apply threshold to help remove false positives
        raw_heat = apply_threshold(raw_heat, CURRENT_FRAME_HEAT_THRESHOLD
                                   )  # SETTINGS.CURRENT_FRAME_HEAT_THRESHOLD
        avg_heat = apply_threshold(
            avg_heat,
            ROLLING_SUM_HEAT_THRESHOLD)  # SETTINGS.ROLLING_SUM_HEAT_THRESHOLD

        # Visualize the heatmap when displaying
        # TODO: if VideoMode; else (255)
        raw_heatmap = np.clip(raw_heat, 0, 255)
        avg_heatmap = np.clip(avg_heat, 0, 255)

        image = self.add_to_debugbar(image,
                                     avg_heatmap,
                                     'Rolling Sum Heatmap',
                                     position='right')
        image = self.add_to_debugbar(image,
                                     raw_heatmap,
                                     'Current Fr. Heatmap',
                                     position='left')

        # Find final boxes from heatmap using label function
        raw_labels = label(raw_heatmap)
        avg_labels = label(avg_heatmap)

        # Overlap Raw with Avg
        draw_img = draw_labeled_bboxes(image,
                                       raw_labels,
                                       color=(1, 0, 0),
                                       thickness=2,
                                       meta=False)  # red
        draw_img = draw_labeled_bboxes(draw_img,
                                       avg_labels,
                                       meta=HEATMAP_METRICS)
        return draw_img, avg_heatmap, avg_labels
コード例 #10
0
def process(image, svc, X_scaler):
	# Test the result on one single image
	image = mpimg.imread(image)
	draw_image = np.copy(image)

	windows = utils.slide_window(image, x_start_stop=x_start_stop, y_start_stop=y_start_stop, xy_window=(96, 96), xy_overlap=(0.75, 0.75))

	hot_windows = utils.search_windows(image, windows, svc, X_scaler, color_space=color_space, 
							spatial_size=spatial_size, hist_bins=hist_bins, 
							orient=orient, pix_per_cell=pix_per_cell, 
							cell_per_block=cell_per_block, 
							hog_channel=hog_channel, spatial_feat=spatial_feat, 
							hist_feat=hist_feat, hog_feat=hog_feat)                       

	window_img = utils.draw_boxes(draw_image, hot_windows, color=(0, 0, 255), thick=6)

	# Find the place were is the most overlapping boxes by drawing a Heatmap
	heat = np.zeros_like(window_img[:,:,0]).astype(np.float)
	heat = utils.add_heat(heat, hot_windows)
	heat = utils.apply_threshold(heat, 1)
	heatmap = np.clip(heat, 0, 255)
	labels = label(heatmap)
	draw_img = utils.draw_labeled_bboxes(image, labels)

	return draw_img
コード例 #11
0
def process_video(img):
    global previous_heatmap
    global previous_states

    windows1, _ = utils.find_cars(img, 400, 656, 2.0, clf, scaler, p['orient'],
                                  p['pix_per_cell'], p['cell_per_block'],
                                  p['spatial_size'], p['hist_bins'])
    hot_windows = windows1
    windows2, _ = utils.find_cars(img, 350, 550, 1.2, clf, scaler, p['orient'],
                                  p['pix_per_cell'], p['cell_per_block'],
                                  p['spatial_size'], p['hist_bins'])
    # hot_windows = windows1 + windows2
    # windows3, _ = utils.find_cars(img, 350, 500, 0.8, clf, scaler, p['orient'], p['pix_per_cell'], p['cell_per_block'],
    #                               p['spatial_size'], p['hist_bins'])
    # hot_windows = windows1 + windows2 + windows3
    # hot_windows = windows1 + windows2

    heatmap = utils.add_heat(previous_heatmap, hot_windows)
    previous_heatmap = heatmap * 0.5

    heatmap = utils.apply_threshold(heatmap, 10)
    img, states = utils.draw_labeled_bboxes(img, heatmap, previous_states)

    # Add new state, and remove last if bigger than 3
    previous_states.append(states)
    number_of_states_to_keep = 10
    if len(previous_states) > number_of_states_to_keep:
        previous_states = previous_states[-number_of_states_to_keep:]

    return img
コード例 #12
0
 def getThreshBuffHeatMap(self, thresh = 2):
     """
     Return thresholded buffered heat map results.
     Existing maps are added together. Then threshold is applied
     :param: threshold to use
     :return: 
     """
     heatmap_cum = np.sum(self.heatmaps, axis=0)
     return apply_threshold(heatmap_cum, threshold=thresh)
コード例 #13
0
    def draw_heatmap_labels(self, shape):
        all_windows = []
        for w in self.windows:
            all_windows.extend(w)

        heatmap = np.zeros(shape)
        heatmap = add_heat(heatmap, all_windows)
        heatmap = apply_threshold(heatmap, self.threshold)
        labels = label(heatmap)
        return labels, heatmap
コード例 #14
0
def func(prediction_label_pairs):
    true_positives = []
    
    for i in range(1, 101):
        threshold = float(i) / 100.0
        eval_copy = list.copy(prediction_label_pairs)
        eval_copy = u.apply_threshold(eval_copy, threshold)
        TPR = u.get_positive_predictive_value(eval_copy)
        true_positives.append(TPR)
        

    return (true_positives)
コード例 #15
0
def pipeline(img):
    img_draw_search = img.copy()
    img_draw_cars = img.copy()
    img_processed = process_image(img)
    img_search = img_processed[p_search.ystart:p_search.ystop, :, :]
    shape = img_search.shape
    img_search = cv2.resize(
        img_search,
        (np.int(shape[1] / p_search.scale), np.int(shape[0] / p_search.scale)))
    hog_features = get_hog_features(img_search, p_features)[0]
    heatmap = slide_and_search(img_search, img_draw_search, hog_features,
                               classifier, p_search, p_features)

    heatmaps.update(heatmap)
    heatmap_sum = heatmaps.get_sum()
    heatmap_thresh = heatmap_sum.copy()
    apply_threshold(heatmap_thresh, 25)
    boxes = scipy_label(heatmap_thresh)

    draw_car_boxes(img_draw_cars, boxes)

    return img_draw_search, img_draw_cars, heatmap_sum
コード例 #16
0
    def detect(self, image, do_hog_once=True):
        out_windows = []
        if do_hog_once:
            out_windows = self.find_multiscale(image)
            heatmap = utils.get_heatmap(image, out_windows)
            heatmap = self.smoother(heatmap)
            heatmap = utils.apply_threshold(heatmap,3)
            boxes = utils.get_labeled_boxes(heatmap)
        else:
            windows = utils.slide_window(image, x_start_stop=[None, None], y_start_stop=self.y_start_stop,
                        xy_window=(64, 64), xy_overlap=(0.85, 0.85))
            boxes = self.search_windows(image, windows)

        final_image = utils.draw_boxes(image, boxes, color=(0, 0, 255), thick=6)
        return final_image
コード例 #17
0
def enforce_maximum_profit(categorical_results):
    # Must complete this function!
    mp_data = {}
    thresholds = {}
    for key in categorical_results.keys():
        max_profit = float("-inf")
        data = categorical_results[key]
        thresholdList = [round(x[0], 2) for x in data]
        thresholdList = set(thresholdList)
        for t in thresholdList:
            threshed = apply_threshold(data, t)
            profit = apply_financials(threshed, True)
            if max_profit < profit:
                max_profit = profit
                mp_data[key] = threshed
                thresholds[key] = t

    return mp_data, thresholds
コード例 #18
0
def enforce_predictive_parity(categorical_results,
                              epsilon):  # Kedaar Raghavendra Rao
    import utils as u
    predictive_parity_data = {}
    thresholds = {}

    max_total_acc = 0
    temp_thresh = {}  # {race, [thresh]}
    max_thresh = {}  # {race, [thresh]}
    max_thresh_pred = {}  # {race, [thresholded_pred]}
    temp_thresh_pred = {}  # {race, [thresholded_pred]}

    race_cases = categorical_results  #categorical_results contains [predicted value, actual label]

    for p in range(1, 100):
        prob = p / 100
        temp_thresh_pred = {}
        temp_thresh = {}
        for race in race_cases:
            for thresh in range(1, 100):
                t = thresh / 100
                x = {}
                x[race] = u.apply_threshold(race_cases[race],
                                            t)  #thresholded_pred
                r_prob = u.get_positive_predictive_value(
                    x[race]
                )  # u.get_num_predicted_positives(x[race]) / len(x[race])
                if (compare_probs(r_prob, prob, epsilon)):
                    temp_thresh[race] = [t]
                    temp_thresh_pred[race] = x[race]
                    break

        if len(temp_thresh_pred) == len(race_cases):
            total_accuracy = u.get_total_accuracy(temp_thresh_pred)
            if total_accuracy > max_total_acc:
                max_total_acc = total_accuracy
                max_thresh_pred = temp_thresh_pred
                max_thresh = temp_thresh

    predictive_parity_data = max_thresh_pred
    thresholds = max_thresh

    return predictive_parity_data, thresholds
コード例 #19
0
def process_image(img):
    '''Pipeline to prepare video images with vehicle detection'''
    boxes = find_cars_multiscale(img, multiscale, clf, X_scaler, cspace, spatial_size,
                                 hist_bins, orient, pix_per_cell, cell_per_block, hog_channel)
    if len(Box_mem.boxes) >= n:
        Box_mem.boxes.pop(0)
    Box_mem.boxes.append(boxes)
    box_with_mem = []
    for box in Box_mem.boxes:
        box_with_mem.extend(box)
    heat = np.zeros_like(img[:, :, 0]).astype(np.float)
    heat = add_heat(heat, box_with_mem)
    heat = apply_threshold(heat, 5)
    heatmap = np.clip(heat, 0, 255)
    labels = label(heatmap)
    heatmap_small = cv2.resize(heatmap, (320, 180)).astype(np.float32)
    norm = Normalize(vmin=0, vmax=24)
    heatmap_small = np.delete(cm.hot(norm(heatmap_small))*255.0, 3, 2)
    draw_img = draw_labeled_bboxes(np.copy(img), labels)
    draw_img[50:50+180, 50:50+320] = heatmap_small
    return draw_img  # , heat
コード例 #20
0
def enforce_equal_opportunity(categorical_results, epsilon):  # Rishabh Sharma
    import utils as u
    thresholds = {}
    equal_opportunity_data = {}

    max_total_acc = 0
    temp_thresh = {}  # {race, [thresh]}
    max_thresh = {}  # {race, [thresh]}
    max_thresh_pred = {}  # {race, [thresholded_pred]}
    temp_thresh_pred = {}  # {race, [thresholded_pred]}

    race_cases = categorical_results  # categorical_results contains [predicted value, actual label]

    for p in range(1, 100):
        prob = p / 100
        temp_thresh_pred = {}
        temp_thresh = {}
        for race in race_cases:
            for thresh in range(1, 100):
                t = thresh / 100
                x = {}
                x[race] = u.apply_threshold(race_cases[race],
                                            t)  #thresholded_pred
                r_prob = u.get_true_positive_rate(x[race])
                if (compare_probs(r_prob, prob, epsilon)):
                    temp_thresh[race] = t
                    temp_thresh_pred[race] = x[race]
                    break

        if len(temp_thresh_pred) == len(race_cases):
            total_accuracy = u.get_total_accuracy(temp_thresh_pred)
            if total_accuracy > max_total_acc:
                max_total_acc = total_accuracy
                max_thresh_pred = temp_thresh_pred
                max_thresh = temp_thresh

    equal_opportunity_data = max_thresh_pred
    thresholds = max_thresh

    return equal_opportunity_data, thresholds
コード例 #21
0
def enforce_single_threshold(categorical_results):
    single_threshold_data = {}
    thresholds = {}

    thresholdList = []
    for key in categorical_results.keys():
        list = [round(x[0], 2) for x in categorical_results[key]]
        thresholdList += list
    thresholdList = set(thresholdList)
    max_profit = float("-inf")
    for t in thresholdList:
        dummy_data = {}
        for key in categorical_results.keys():
            data = categorical_results[key]
            threshed = apply_threshold(data, t)
            dummy_data[key] = threshed
        profit = apply_financials(dummy_data)
        if max_profit < profit:
            max_profit = profit
            single_threshold_data = dummy_data
            for key in categorical_results.keys():
                thresholds[key] = t

    return single_threshold_data, thresholds
コード例 #22
0
def enforce_predictive_parity(categorical_results, epsilon):
    predictive_parity_data = {}
    thresholds = {}
    
    
    a=categorical_results['African-American']
    b=categorical_results['Caucasian']
    c=categorical_results['Hispanic']
    d=categorical_results['Other']
    
    a1=func(a)
    b1=func(b)
    c1=func(c)
    d1=func(d)
    
    best_accuracy=0
    best_threshold=None
    
    arr=[]
    
    for i in range(100):
        n1=a1[i]-0.01
        n2=a1[i]+0.01
        for j in range(100):
            if(b1[j]>=n1 and b1[j]<=n2):
                for k in range(100):
                    if(c1[k]>=n1 and c1[k]<=n2):
                        for l in range(100):
                            if(d1[l]>=n1 and d1[l]<=n2):
                                p=[i/100,j/100,k/100,l/100]
                                arr.append(p)
                                
    
    
    
    
    best_accuracy=0
    best_threshold=None
    
   
    testdata = list(set(tuple(x) for x in arr))
    
    
    myarray = np.asarray(arr)
    
    unique_dict_a={}
    unique_dict_b={}
    unique_dict_c={}
    unique_dict_d={}
    
    uniqueValues=np.unique(myarray)
    
    for i in range(len(uniqueValues)):
        i1 = uniqueValues[i]
        unique_dict_a.update({ i1 : u.apply_threshold(a, i1) } )
        unique_dict_b.update({ i1 : u.apply_threshold(b, i1) } )
        unique_dict_c.update({ i1 : u.apply_threshold(c, i1) } )
        unique_dict_d.update({ i1 : u.apply_threshold(d, i1) } )
    
   
    
    
    for i in range(10000):
        
        if(arr[i][0] in unique_dict_a.keys()):
            arr1=unique_dict_a[arr[i][0]]
        else:
            arr1=u.apply_threshold(a, arr[i][0])
            
        if(arr[i][0] in unique_dict_a.keys()):
            arr2=unique_dict_b[arr[i][1]]
        else:
            arr2=u.apply_threshold(b, arr[i][1])
            
            
        if(arr[i][0] in unique_dict_a.keys()):
            arr3=unique_dict_c[arr[i][2]]
        else:
            arr3=u.apply_threshold(c, arr[i][2])
            
        if(arr[i][0] in unique_dict_a.keys()):
            arr4=unique_dict_d[arr[i][3]]
        else:
            arr4=u.apply_threshold(d, arr[i][3])
        
        
         
        
    
        d9={'African-American':arr1,
           'Caucasian':arr2,
           'Hispanic':arr3,
           'Other':arr4}
    
        acc=u.get_total_accuracy(d9)
        
        
        if(best_accuracy<acc):
            best_accuracy=acc
            best_threshold=arr[i]
            predictive_parity_data={}
            predictive_parity_data =d9
        d9={}
        
    
    
    thresholds={'African-American':best_threshold[0],
                'Caucasian':best_threshold[1],
                'Hispanic':best_threshold[2],
                'Other':best_threshold[3]}
    
    
    # Must complete this function!
    return predictive_parity_data, thresholds
コード例 #23
0
def search_car(img, dist_pickle):
    svc = dist_pickle["clf"]
    X_scaler = dist_pickle["scaler"]
    orient = dist_pickle["orient"]
    pix_per_cell = dist_pickle["pix_per_cell"]
    cell_per_block = dist_pickle["cell_per_block"]
    spatial_size = dist_pickle["spatial_size"]
    hist_bins = dist_pickle["hist_bins"]

    ystart = 400
    ystop = 656
    scale = 1.5

    test_imgs = []
    out_imgs = []
    #plt.figure(figsize=(20,68))

    draw_img = np.copy(img)

    windows = []

    colorspace = 'YUV'  # Can be RGB, HSV, LUV, HLS, YUV, YCrCb
    orient = 11
    pix_per_cell = 16
    cell_per_block = 2
    hog_channel = 'ALL'  # Can be 0, 1, 2, or "ALL"

    ystart = 400
    ystop = 464
    scale = 1.0
    windows += (find_cars(img, ystart, ystop, scale, colorspace, hog_channel,
                          svc, None, orient, pix_per_cell, cell_per_block,
                          None, None))
    ystart = 416
    ystop = 480
    scale = 1.0
    windows += (find_cars(img, ystart, ystop, scale, colorspace, hog_channel,
                          svc, None, orient, pix_per_cell, cell_per_block,
                          None, None))
    ystart = 400
    ystop = 496
    scale = 1.5
    windows += (find_cars(img, ystart, ystop, scale, colorspace, hog_channel,
                          svc, None, orient, pix_per_cell, cell_per_block,
                          None, None))
    ystart = 432
    ystop = 528
    scale = 1.5
    windows += (find_cars(img, ystart, ystop, scale, colorspace, hog_channel,
                          svc, None, orient, pix_per_cell, cell_per_block,
                          None, None))
    ystart = 400
    ystop = 528
    scale = 2.0
    windows += (find_cars(img, ystart, ystop, scale, colorspace, hog_channel,
                          svc, None, orient, pix_per_cell, cell_per_block,
                          None, None))
    ystart = 432
    ystop = 560
    scale = 2.0
    windows += (find_cars(img, ystart, ystop, scale, colorspace, hog_channel,
                          svc, None, orient, pix_per_cell, cell_per_block,
                          None, None))
    ystart = 400
    ystop = 596
    scale = 3.5
    windows += (find_cars(img, ystart, ystop, scale, colorspace, hog_channel,
                          svc, None, orient, pix_per_cell, cell_per_block,
                          None, None))
    ystart = 464
    ystop = 660
    scale = 3.5
    windows += (find_cars(img, ystart, ystop, scale, colorspace, hog_channel,
                          svc, None, orient, pix_per_cell, cell_per_block,
                          None, None))

    #    window_list = utils.slide_window(img)

    heat_map = np.zeros(img.shape[:2])
    heat_map = utils.add_heat(heat_map, windows)
    heat_map_thresholded = utils.apply_threshold(heat_map, 1)
    labels = label(heat_map_thresholded)
    draw_img = utils.draw_labeled_bboxes(draw_img, labels)

    return draw_img
コード例 #24
0
ファイル: pipeline.py プロジェクト: YeSei/DL_vehicle-detector
def search_car(img):
    draw_img = np.copy(img)

    windows = []

    colorspace = 'YUV'  # Can be RGB, HSV, LUV, HLS, YUV, YCrCb
    orient = 11
    pix_per_cell = 16
    cell_per_block = 2
    hog_channel = 'ALL'  # Can be 0, 1, 2, or "ALL"

    ystart = 400
    ystop = 464
    scale = 1.0
    windows += (find_cars(img, ystart, ystop, scale, colorspace, hog_channel,
                          svc, None, orient, pix_per_cell, cell_per_block,
                          None, None))
    ystart = 416
    ystop = 480
    scale = 1.0
    windows += (find_cars(img, ystart, ystop, scale, colorspace, hog_channel,
                          svc, None, orient, pix_per_cell, cell_per_block,
                          None, None))
    ystart = 400
    ystop = 496
    scale = 1.5
    windows += (find_cars(img, ystart, ystop, scale, colorspace, hog_channel,
                          svc, None, orient, pix_per_cell, cell_per_block,
                          None, None))
    ystart = 432
    ystop = 528
    scale = 1.5
    windows += (find_cars(img, ystart, ystop, scale, colorspace, hog_channel,
                          svc, None, orient, pix_per_cell, cell_per_block,
                          None, None))
    ystart = 400
    ystop = 528
    scale = 2.0
    windows += (find_cars(img, ystart, ystop, scale, colorspace, hog_channel,
                          svc, None, orient, pix_per_cell, cell_per_block,
                          None, None))
    ystart = 432
    ystop = 560
    scale = 2.0
    windows += (find_cars(img, ystart, ystop, scale, colorspace, hog_channel,
                          svc, None, orient, pix_per_cell, cell_per_block,
                          None, None))
    ystart = 400
    ystop = 596
    scale = 3.5
    windows += (find_cars(img, ystart, ystop, scale, colorspace, hog_channel,
                          svc, None, orient, pix_per_cell, cell_per_block,
                          None, None))
    ystart = 464
    ystop = 660
    scale = 3.5
    windows += (find_cars(img, ystart, ystop, scale, colorspace, hog_channel,
                          svc, None, orient, pix_per_cell, cell_per_block,
                          None, None))

    #    window_list = utils.slide_window(img)

    heat_map = np.zeros(img.shape[:2])
    heat_map = utils.add_heat(heat_map, windows)
    heat_map_thresholded = utils.apply_threshold(heat_map, 1)
    labels = label(heat_map_thresholded)
    draw_img = utils.draw_labeled_bboxes(draw_img, labels)

    return draw_img
コード例 #25
0
                     vis=True, feature_vec=True)
    plt.imsave(r"./output_images/" + str(i) + "_3_hog.jpg", hog_im, cmap='gray')


for fname in images:
    print('processing ', fname, '...')
    img = mpimg.imread(fname)
    boxes = find_cars_multiscale(img, multiscale, clf, X_scaler, cspace, spatial_size,
                                 hist_bins, orient, pix_per_cell, cell_per_block, hog_channel)
    out_img = draw_boxes(img, boxes)
    plt.imsave(r"./output_images/" + fname.split('\\')[-1].split('.')[0] + "_5_bbox.jpg", out_img)
    heat = np.zeros_like(img[:, :, 0]).astype(np.float)
    # Add heat to each box in box list
    heat = add_heat(heat, boxes)
    # Apply threshold to help remove false positives
    heat = apply_threshold(heat, 2)
    # Visualize the heatmap when displaying
    heatmap = np.clip(heat, 0, 255)
    # Find final boxes from heatmap using label function
    labels = label(heatmap)
    # Prepare heatmap image overlay
    heatmap_small = cv2.resize(heatmap, (320, 180)).astype(np.float32)
    norm = Normalize(vmin=0, vmax=12)
    heatmap_small = np.delete(cm.hot(norm(heatmap_small))*255.0, 3, 2)
    draw_img = draw_labeled_bboxes(np.copy(img), labels)
    # Insert heatmap image overlay
    draw_img[50:50+180, 50:50+320] = heatmap_small
    plt.imsave(r"./output_images/" + fname.split('\\')[-1].split('.')[0] + "_6_heat.jpg", draw_img)


# class to contain last n batch of boxes
コード例 #26
0
def search_car(img):
    draw_img = np.copy(img)
    # img = img.astype(np.float32) / 255
    
    # all_windows = []

    # X_start_stop = [[None, None], [None, None], [None, None], [None, None]]
    # w0, w1, w2, w3 = 64, 96, 128, 196
    # o0, o1, o2, o3 = 0.75, 0.75, 0.75, 0.75
    # XY_window = [(w0, w0), (w1, w1), (w2, w2), (w3, w3)]
    # XY_overlap = [(o0, o0), (o1, o1), (o2, o2), (o3, o3)]
    # yi0, yi1, yi2, yi3 = 400, 400, 400, 400
    # Y_start_stop = [[yi0, yi0 + w0 * 1.25], [yi1, yi1 + w1 * 1.25], [yi2, yi2 + w2 * 1.25], [yi3, yi3 + w3 * 1.25]]
    #
    #
    #
    # for i in range(len(Y_start_stop)):
    #     windows = utils.slide_window(img, x_start_stop=X_start_stop[i], y_start_stop=Y_start_stop[i],
    #                         xy_window=XY_window[i], xy_overlap=XY_overlap[i])
    #
    #     all_windows += windows

    # on_windows = utils.search_windows(img, all_windows, svc, X_scaler, spatial_feat=True, hist_feat=True,
    #                                   hog_channel='ALL')
    windows = []

    colorspace = 'YUV'  # Can be RGB, HSV, LUV, HLS, YUV, YCrCb
    orient = 11
    pix_per_cell = 16
    cell_per_block = 2
    hog_channel = 'ALL'  # Can be 0, 1, 2, or "ALL"

    ystart = 400
    ystop = 464
    scale = 1.0
    windows+=(find_cars(img, ystart, ystop, scale, colorspace, hog_channel, svc, None,
                                orient, pix_per_cell, cell_per_block, None, None))
    ystart = 416
    ystop = 480
    scale = 1.0
    windows+=(find_cars(img, ystart, ystop, scale, colorspace, hog_channel, svc, None,
                                orient, pix_per_cell, cell_per_block, None, None))
    ystart = 400
    ystop = 496
    scale = 1.5
    windows+=(find_cars(img, ystart, ystop, scale, colorspace, hog_channel, svc, None,
                                orient, pix_per_cell, cell_per_block, None, None))
    ystart = 432
    ystop = 528
    scale = 1.5
    windows += (find_cars(img, ystart, ystop, scale, colorspace, hog_channel, svc, None,
                          orient, pix_per_cell, cell_per_block, None, None))
    ystart = 400
    ystop = 528
    scale = 2.0
    windows += (find_cars(img, ystart, ystop, scale, colorspace, hog_channel, svc, None,
                          orient, pix_per_cell, cell_per_block, None, None))
    ystart = 432
    ystop = 560
    scale = 2.0
    windows += (find_cars(img, ystart, ystop, scale, colorspace, hog_channel, svc, None,
                          orient, pix_per_cell, cell_per_block, None, None))
    ystart = 400
    ystop = 596
    scale = 3.5
    windows += (find_cars(img, ystart, ystop, scale, colorspace, hog_channel, svc, None,
                          orient, pix_per_cell, cell_per_block, None, None))
    ystart = 464
    ystop = 660
    scale = 3.5
    windows += (find_cars(img, ystart, ystop, scale, colorspace, hog_channel, svc, None,
                          orient, pix_per_cell, cell_per_block, None, None))
    
#    window_list = utils.slide_window(img)

    
    heat_map = np.zeros(img.shape[:2])
    heat_map = utils.add_heat(heat_map,windows)
    heat_map_thresholded = utils.apply_threshold(heat_map,1)
    labels = label(heat_map_thresholded)
    draw_img = utils.draw_labeled_bboxes(draw_img,labels)
    
    
#    draw_img = utils.draw_windows(draw_img,on_windows)
    return draw_img
コード例 #27
0
def enforce_predictive_parity(categorical_results, epsilon):
    predictive_parity_data = {}
    thresholds = {}
    thresholds_new = {}
    ppv = []
    max_range_thresh = [1, 1, 1, 1]
    min_range_thresh = [0, 0, 0, 0]
    threshold_set = []

    for key, val in categorical_results.items():
        max_range_thresh.append(
            max(categorical_results[key], key=lambda x: x[0])[0])
        min_range_thresh.append(
            min(categorical_results[key], key=lambda x: x[0])[0])

    ppv_data = {}

    for threshold in np.arange(0, 1, 0.01):
        for key, value in categorical_results.items():
            t_data = utils.apply_threshold(value, threshold)
            ppv = utils.get_positive_predictive_value(t_data)
            if (key not in ppv_data):
                ppv_data[key] = []
            ppv_data[key].append([ppv, threshold])

    keys = [*ppv_data]
    ppv_data_refined = []
    for ppv_d_0 in ppv_data[keys[0]]:
        for ppv_d_1 in ppv_data[keys[1]]:
            if (abs(ppv_d_0[0] - ppv_d_1[0]) <= epsilon):
                ppv_data_refined.append([ppv_d_0, ppv_d_1])

    ppv_data_refined_2 = []
    for val in ppv_data_refined:
        for ppv_d_2 in ppv_data[keys[2]]:
            if (abs(ppv_d_2[0] - val[0][0]) <= epsilon):
                if (abs(ppv_d_2[0] - val[1][0]) <= epsilon):
                    ppv_data_refined_2.append([val[0], val[1], ppv_d_2])

    ppv_data_refined_3 = []
    for val in ppv_data_refined_2:
        for ppv_d_3 in ppv_data[keys[3]]:
            if (abs(ppv_d_3[0] - val[0][0]) <= epsilon):
                if (abs(ppv_d_3[0] - val[1][0]) <= epsilon):
                    if (abs(ppv_d_3[0] - val[2][0]) <= epsilon):
                        ppv_data_refined_3.append(
                            [val[0], val[1], val[2], ppv_d_3])

    #print(len(ppv_data_refined_3))
    max_acc = 0
    temp = {}
    for thresh in ppv_data_refined_3:
        temp['African-American'] = utils.apply_threshold(
            categorical_results['African-American'], thresh[0][1])
        temp['Caucasian'] = utils.apply_threshold(
            categorical_results['Caucasian'], thresh[1][1])
        temp['Hispanic'] = utils.apply_threshold(
            categorical_results['Hispanic'], thresh[2][1])
        temp['Other'] = utils.apply_threshold(categorical_results['Other'],
                                              thresh[3][1])
        acc = utils.get_total_accuracy(temp)
        if (acc > max_acc):
            max_acc = acc
            thresholds['African-American'] = thresh[0][1]
            thresholds['Caucasian'] = thresh[1][1]
            thresholds['Hispanic'] = thresh[2][1]
            thresholds['Other'] = thresh[3][1]
            predictive_parity_data = temp.copy()

    max_acc = 0
    return predictive_parity_data, thresholds
コード例 #28
0
def search_car(img, framecount):
    draw_img = np.copy(img)

    windows = []

    colorspace = 'YUV'  # Can be RGB, HSV, LUV, HLS, YUV, YCrCb
    orient = 11
    pix_per_cell = 16
    cell_per_block = 2
    hog_channel = 'ALL'  # Can be 0, 1, 2, or "ALL"

    ystart = 400
    ystop = 464
    scale = 1.0
    windows += (find_cars(img, ystart, ystop, scale, colorspace, hog_channel,
                          svc, None, orient, pix_per_cell, cell_per_block,
                          None, None))
    ystart = 416
    ystop = 480
    scale = 1.0
    windows += (find_cars(img, ystart, ystop, scale, colorspace, hog_channel,
                          svc, None, orient, pix_per_cell, cell_per_block,
                          None, None))
    ystart = 400
    ystop = 496
    scale = 1.5
    windows += (find_cars(img, ystart, ystop, scale, colorspace, hog_channel,
                          svc, None, orient, pix_per_cell, cell_per_block,
                          None, None))
    ystart = 432
    ystop = 528
    scale = 1.5
    windows += (find_cars(img, ystart, ystop, scale, colorspace, hog_channel,
                          svc, None, orient, pix_per_cell, cell_per_block,
                          None, None))
    ystart = 400
    ystop = 528
    scale = 2.0
    windows += (find_cars(img, ystart, ystop, scale, colorspace, hog_channel,
                          svc, None, orient, pix_per_cell, cell_per_block,
                          None, None))
    ystart = 432
    ystop = 560
    scale = 2.0
    windows += (find_cars(img, ystart, ystop, scale, colorspace, hog_channel,
                          svc, None, orient, pix_per_cell, cell_per_block,
                          None, None))
    ystart = 400
    ystop = 596
    scale = 3.5
    windows += (find_cars(img, ystart, ystop, scale, colorspace, hog_channel,
                          svc, None, orient, pix_per_cell, cell_per_block,
                          None, None))
    ystart = 464
    ystop = 660
    scale = 3.5
    windows += (find_cars(img, ystart, ystop, scale, colorspace, hog_channel,
                          svc, None, orient, pix_per_cell, cell_per_block,
                          None, None))

    #    window_list = utils.slide_window(img)

    heat_map = np.zeros(img.shape[:2])
    heat_map = utils.add_heat(heat_map, windows)
    heat_map_thresholded = utils.apply_threshold(heat_map, 1)
    labels = label(heat_map_thresholded)
    i = 0
    VehicalPath = "./capture"
    if not os.path.exists(VehicalPath):
        os.makedirs(VehicalPath)
    draw_img, bboxlist = utils.draw_labeled_bboxes(draw_img, labels)
    for bbox in bboxlist:
        x1, y1 = bbox[0]
        x2, y2 = bbox[1]
        i = i + 1
        if (x2 - x1 > 96) and (y2 - y1) > 96:
            cv2.imwrite("./capture/Vehical{}_{}.jpg".format(framecount, i),
                        draw_img[x1:x2, y1:y2])
    return VehicalPath
コード例 #29
0
ystart = 300
ystop = 680
y_start_stop = [(350, 500), (400, 550), (350, 650)]
scale = [1, 2, 3]
cells_per_step = [1, 1, 1]

t = time.time()
hot_windows = detector.search_windows(image,
                                      y_start_stop=y_start_stop,
                                      scale=scale,
                                      cells_per_step=cells_per_step)
t2 = time.time()
print(round(t2 - t, 2),
      'Seconds to search and identify {} windows'.format(len(hot_windows)))
draw_heatmap = False
if draw_heatmap:
    heatmap = np.zeros(draw_image.shape[:2])
    heatmap = add_heat(heatmap, hot_windows)
    heatmap = apply_threshold(heatmap, 2)
    labels = label(heatmap)
    window_img = draw_labeled_bboxes(draw_image, labels)
else:
    window_img = draw_boxes(draw_image,
                            hot_windows,
                            color=(0, 0, 255),
                            thick=6)

plt.imshow(window_img)
# plt.savefig(ROOT + 'output_images/car/{}_labeled.jpg'.format(test_img))
plt.show()
コード例 #30
0
def enforce_equal_opportunity(categorical_results, epsilon):

    thresholds = {}
    equal_opportunity_data = {}
    
    
    a=categorical_results['African-American']
    b=categorical_results['Caucasian']
    c=categorical_results['Hispanic']
    d=categorical_results['Other']
    
    
    (c1,d1,b1)=u.get_ROC_data(a, 'African-American')
    (c2,d2,b2)=u.get_ROC_data(b, 'Caucasian')
    (c3,d3,b3)=u.get_ROC_data(c, 'Hispanic')
    (c4,d4,b4)=u.get_ROC_data(d, 'Other')
   
  
    arr=[]
   
    for i in range(100):
        
        n1=c1[i]-0.01
        n2=c1[i]+0.01
        
        for j in range(100):
            if(c2[j]>=n1 and c2[j]<=n2):
                
                for k in range(100):
                    if(c3[k]>=n1 and c3[k]<=n2):
                        
                        for l in range(100):
                            if(c4[l]>=n1 and c4[l]<=n2):
                                #print("i am coming here")
                                p=[i/100.,j/100.,k/100.,l/100.]
                                arr.append(p)
                                
   
    
    accValue=0
    thresholdList=None
    abc = arr.copy()
    for i in range(len(arr)):
        arr1=u.apply_threshold(a, abc[i][0])
        arr2=u.apply_threshold(b, abc[i][1])
        arr3=u.apply_threshold(c, abc[i][2])
        arr4=u.apply_threshold(d, abc[i][3])
        
        
        
            
        d9={'African-American':arr1,
           'Caucasian':arr2,
           'Hispanic':arr3,
           'Other':arr4}
    
        acc=u.get_total_accuracy(d9)
        
        
        if(accValue<acc):
            accValue=acc
            thresholdList=arr[i]
            equal_opportunity_data={}
            equal_opportunity_data =d9
            
        d9={}
        
    
    
    thresholds={'African-American':thresholdList[0],
                'Caucasian':thresholdList[1],
                'Hispanic':thresholdList[2],
                'Other':thresholdList[3]}
    
   
    
    
    # Must complete this function!
    return equal_opportunity_data, thresholds