def calculateHogEstadistic(path, tipo): with open( "/home/cristina/Documentos/TFG/TFG-EndoscopySegementation/Results/estadisticHog.csv", 'a') as csvfile: fieldnames = ['Tipo', 'Media', 'Mediana', 'Mínimo', 'Máximo'] writer = csv.DictWriter(csvfile, fieldnames=fieldnames) writer.writeheader() hog = [] i = 0 for file in os.listdir(path): f = frame(path, file) hog.append(f.descriptorHOG()) # i+=1 # if i>= 10: break statsH = [] indice = 0 while indice < len(hog) - 1: statsH.append(distancia_euclidea(hog[indice], hog[indice + 1])) indice += 1 writer.writerow({ 'Tipo': tipo, 'Media': stats.mean(statsH), 'Mediana': stats.median(statsH), 'Mínimo': min(statsH), 'Máximo': max(statsH) })
def mouse_click(event, x, y, flags, param): global mouseX, mouseY global histo_img global crop_img if event == cv2.EVENT_LBUTTONDOWN: console.print("MouseX: ", x, "\t| MouseY: ", y) mouseX, mouseY = x, y crop_img = car[y - 6:y + 6, x - 6:x + 6] if crop_img.shape != (12, 12): console.print( ":warning: Selected Area isn't 12x12 area, Please select a correct zone :warning:", style="red on white") else: hog = [] for x in range(4): for y in range(4): hog.append((gradiant(crop_img, (x * 3) + 1, (y * 3) + 1) * (180 / np.pi))) console.print("Hog angles : ", hog) fig = plt.figure("Selected_Output") ax = fig.add_subplot(1, 2, 1) plt.imshow(crop_img, cmap='gray') ax.set_title('Cropped Image') ax = fig.add_subplot(1, 2, 2) bins = [ -22.5, 22.5, 67.5, 112.5, 157.5, 202.5, 247.5, 292.5, 337.5 ] plt.hist(hog, bins=bins, rwidth=0.5) xticks = [(bins[idx + 1] + value) / 2 for idx, value in enumerate(bins[:-1])] plt.xticks(xticks, labels=[1, 2, 3, 4, 5, 6, 7, 8]) ax.set_title('Histogramme') plt.show()
def calculateHogEstadisticComparation(path1, path2, tipo): with open( "/home/cristina/Documentos/TFG/TFG-EndoscopySegementation/Results/estadisticHog.csv", 'a') as csvfile: fieldnames = ['Tipo', 'Media', 'Mediana', 'Mínimo', 'Máximo'] writer = csv.DictWriter(csvfile, fieldnames=fieldnames) inside = os.listdir(path1) outside = os.listdir(path2) writer.writeheader() hog = [] i = 0 while i < len(inside) and i < len(outside): f1 = frame(path1, inside[i]) f2 = frame(path2, outside[i]) hog.append( distancia_euclidea(f1.descriptorHOG(), f2.descriptorHOG())) i += 1 writer.writerow({ 'Tipo': tipo, 'Media': stats.mean(hog), 'Mediana': stats.median(hog), 'Mínimo': min(hog), 'Máximo': max(hog) })
def find_cars(img, ystart, ystop, scale, svc, orient, pix_per_cell, cell_per_block, show_all_rectangles=False): # array of rectangles where cars were detected rectangles = [] img = img.astype(np.float32) / 255 img_tosearch = img[ystart:ystop, :, :] ctrans_tosearch = cv2.cvtColor(img_tosearch, cv2.COLOR_RGB2YUV) # rescale image if other than 1.0 scale if scale != 1: imshape = ctrans_tosearch.shape ctrans_tosearch = cv2.resize(ctrans_tosearch, None, fx=1 / scale, fy=1 / scale) # Define blocks and steps as above nxblocks = (ctrans_tosearch[:, :, 0].shape[1] // pix_per_cell) + 1 #-1 nyblocks = (ctrans_tosearch[:, :, 0].shape[0] // pix_per_cell) + 1 #-1 nfeat_per_block = orient * cell_per_block**2 # 64 was the orginal sampling rate, with 8 cells and 8 pix per cell window = 64 nblocks_per_window = (window // pix_per_cell) - 1 cells_per_step = 2 # Instead of overlap, define how many cells to step nxsteps = (nxblocks - nblocks_per_window) // cells_per_step nysteps = (nyblocks - nblocks_per_window) // cells_per_step # Compute individual channel HOG features for the entire image hog = [] for i in range(0, 3): hog.append( get_hog_features(ctrans_tosearch[:, :, i], orient, pix_per_cell, cell_per_block, feature_vec=False)) for xb in range(nxsteps): for yb in range(nysteps): ypos = yb * cells_per_step xpos = xb * cells_per_step # Extract HOG for this patch hog_feat1 = hog[0][ypos:ypos + nblocks_per_window, xpos:xpos + nblocks_per_window].ravel() hog_feat2 = hog[1][ypos:ypos + nblocks_per_window, xpos:xpos + nblocks_per_window].ravel() hog_feat3 = hog[2][ypos:ypos + nblocks_per_window, xpos:xpos + nblocks_per_window].ravel() hog_features = np.hstack((hog_feat1, hog_feat2, hog_feat3)) xleft = xpos * pix_per_cell ytop = ypos * pix_per_cell test_prediction = svc.predict([hog_features]) if test_prediction[0] == 1 or show_all_rectangles: xbox_left = np.int(xleft * scale) ytop_draw = np.int(ytop * scale) win_draw = np.int(window * scale) rectangles.append( ((xbox_left, ytop_draw + ystart), (xbox_left + win_draw, ytop_draw + win_draw + ystart))) return rectangles
def dataset_prepare(hfile): # Начало работы print "Preparing dataset..." i = 0 img = [] hog = [] lbl = [] for fname in os.listdir(train_dir): # Проверка входных файлов if not (fname.endswith(".png") or fname.endswith(".bmp") or fname.endswith(".jpg")): continue # Читаем файлы fl = os.path.join(train_dir, fname) src = io.imread(fl) # Ресайз src = transform.resize(src, (64, 64)) # Как насчет Гаусса здесь? #src = gaussian_filter(src, sigma = 1.0) # Конвертируем в LAB - лучше воспроизводит человеческое восприятие src = color.rgb2lab(src) l = src[:, :, 0] a = src[:, :, 1] b = src[:, :, 2] # Нормализация l = exposure.rescale_intensity(l) # Преобразуем в массивы l = np.asarray(l, 'float64') a = np.asarray(a, 'float64') b = np.asarray(b, 'float64') # Получаем HOG l_ftr = get_hog(l) # Получаем цветовые особенности H, W = l.shape a_ftr = a[hog_sz / 2:H:hog_sz, hog_sz / 2:W:hog_sz].copy().flatten() b_ftr = b[hog_sz / 2:H:hog_sz, hog_sz / 2:W:hog_sz].copy().flatten() # Получаем вектор фич src_ftr = np.array(list(l_ftr) + list(a_ftr) + list(b_ftr), 'float64') # Коррекция на всякий случай s = np.sum(src_ftr) if np.isnan(s) or np.isinf(s) or np.isneginf(s): print 'Sample is grayscaled:', i, 'Will fix feature vector!' for j in range(0, len(src_ftr)): s = src_ftr[j] if np.isnan(s) or np.isinf(s) or np.isneginf(s): src_ftr[j] = 0.0 # Добавляем рисунок img.append(l) # Добавляем Фичи hog.append(src_ftr) # Добавляем метки if 'dog' in fname: lbl.append(0) else: lbl.append(1) # Индикация работы программы if i % 100 == 0: print 'File number:', i, 'File name:', fname i = i + 1 # Формируем датасет dataset = (np.array(img), np.array(hog), np.array(lbl)) # Соханяем в кеш print "Save to file..." np.savez(hfile, img=dataset[0], hog=dataset[1], lbl=dataset[2]) return dataset
def find_cars(img, ystart, ystop, scale, svc, X_scaler, colorspace, \ orient, pix_per_cell, cell_per_block, hog_channel, spatial_size, hist_bins): if not ystart: ystart = 0 if not ystop: ystop = img.shape[0] draw_img = np.copy(img) img_tosearch = img[ystart:ystop, :, :] ctrans_tosearch = convert_color(img_tosearch, conv=colorspace) ctrans_tosearch = ctrans_tosearch.astype(np.float64) if scale != 1: imshape = ctrans_tosearch.shape ctrans_tosearch = cv2.resize( ctrans_tosearch, (np.int(imshape[1] / scale), np.int(imshape[0] / scale))) # Define blocks and steps as above nxblocks = (ctrans_tosearch.shape[1] // pix_per_cell) - 1 nyblocks = (ctrans_tosearch.shape[0] // pix_per_cell) - 1 nfeat_per_block = orient * cell_per_block**2 # 64 was the orginal sampling rate, with 8 cells and 8 pix per cell window = 64 nblocks_per_window = (window // pix_per_cell) - 1 cells_per_step = 2 # Instead of overlap, define how many cells to step nxsteps = (nxblocks - nblocks_per_window) // cells_per_step nysteps = (nyblocks - nblocks_per_window) // cells_per_step # Compute individual channel HOG features for the entire image if hog_channel == 'ALL': hog_features = [] for channel in range(feature_image.shape[2]): hog.append(get_hog_features(ctrans_tosearch[:,:,channel], orient, pix_per_cell, cell_per_block, \ vis=False, feature_vec=False)) else: hog = get_hog_features(ctrans_tosearch[:,:,hog_channel], orient, pix_per_cell, cell_per_block, \ vis=False, feature_vec=False) bounding_boxes = list() for xb in range(nxsteps): for yb in range(nysteps): ypos = yb * cells_per_step xpos = xb * cells_per_step # Extract HOG for this patch if hog_channel == 'ALL': hog_feat0 = hog[0][ypos:ypos + nblocks_per_window, xpos:xpos + nblocks_per_window].ravel() hog_feat1 = hog[1][ypos:ypos + nblocks_per_window, xpos:xpos + nblocks_per_window].ravel() hog_feat2 = hog[2][ypos:ypos + nblocks_per_window, xpos:xpos + nblocks_per_window].ravel() hog_features = np.hstack((hog_feat0, hog_feat1, hog_feat2)) else: hog_features = hog[ypos:ypos + nblocks_per_window, xpos:xpos + nblocks_per_window].ravel() xleft = xpos * pix_per_cell ytop = ypos * pix_per_cell # Extract the image patch subimg = cv2.resize( ctrans_tosearch[ytop:ytop + window, xleft:xleft + window], (64, 64)) # Get color features spatial_features = bin_spatial(subimg, size=spatial_size) hist_features = color_hist(subimg, nbins=hist_bins) # Scale features and make a prediction test_features = X_scaler.transform( np.hstack((spatial_features, hist_features, hog_features)).reshape(1, -1)) test_prediction = svc.predict(test_features) if test_prediction == 1: xbox_left = np.int(xleft * scale) ytop_draw = np.int(ytop * scale) win_draw = np.int(window * scale) cv2.rectangle( draw_img, (xbox_left, ytop_draw + ystart), (xbox_left + win_draw, ytop_draw + win_draw + ystart), (0, 0, 255), 6) bounding_boxes.append( ((xbox_left, ytop_draw + ystart), (xbox_left + win_draw, ytop_draw + win_draw + ystart))) return draw_img, bounding_boxes
def single_img_features(img, color_space='RGB', spatial_size=(32, 32), hist_bins=32, orient=9, pix_per_cell=8, cell_per_block=2, hog_channel=0, spatial_feat=True, hist_feat=True, hog_feat=True, img_name='unknown'): #1) Define an empty list to receive features img_features = [] #2) Apply color conversion if other than 'RGB' if color_space != 'RGB': if color_space == 'HSV': feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HSV) elif color_space == 'LUV': feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2LUV) elif color_space == 'HLS': feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HLS) elif color_space == 'YUV': feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YUV) elif color_space == 'YCrCb': feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb) else: feature_image = np.copy(img) #3) Compute spatial features if flag is set if spatial_feat == True: spatial_features = bin_spatial(feature_image, size=spatial_size) #4) Append features to list img_features.append(spatial_features) #5) Compute histogram features if flag is set if hist_feat == True: hist_features = color_hist(feature_image, nbins=hist_bins) #6) Append features to list img_features.append(hist_features) #7) Compute HOG features if flag is set hog = [] ch1 = feature_image[:, :, 0] ch2 = feature_image[:, :, 1] ch3 = feature_image[:, :, 2] if hog_feat == True: if hog_channel == 'ALL': hog_features = [] for channel in range(feature_image.shape[2]): print(channel) featurehog, hog_img = get_hog_features(feature_image[:, :, channel], orient, pix_per_cell, cell_per_block, vis=True, feature_vec=True) hog.append(hog_img) hog_features.extend( get_hog_features(feature_image[:, :, channel], orient, pix_per_cell, cell_per_block, vis=False, feature_vec=True)) else: hog_features = get_hog_features(feature_image[:, :, hog_channel], orient, pix_per_cell, cell_per_block, vis=False, feature_vec=True) #8) Append features to list img_features.append(hog_features) print('hog length', hog[0].shape) #9) Return concatenated array of features if 0: f, axarr2 = plt.subplots(3, 2) f.tight_layout() axarr2[0, 0].imshow(ch1, cmap='gray') axarr2[0, 0].set_title(img_name + ' CH1', fontsize=10) axarr2[0, 1].imshow(hog[0], cmap='gray') axarr2[0, 1].set_title(img_name + ' CH1 HOG', fontsize=10) axarr2[1, 0].imshow(ch2, cmap='gray') axarr2[1, 0].set_title(img_name + ' CH2', fontsize=10) axarr2[1, 1].imshow(hog[1], cmap='gray') axarr2[1, 1].set_title(img_name + ' CH2 HOG', fontsize=10) axarr2[2, 0].imshow(ch3, cmap='gray') axarr2[2, 0].set_title(img_name + ' CH3', fontsize=10) axarr2[2, 1].imshow(hog[2], cmap='gray') axarr2[2, 1].set_title(img_name + ' CH3 HOG', fontsize=10) plt.show() return np.concatenate(img_features)
def hog_feature(orig_img): img = orig_img if len(img.shape) > 2: img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) print('Shape of image = ', img.shape) l = 0 w = 0 img = cv2.resize(img, (64, 128), interpolation=cv2.INTER_AREA) print('Resize shape of image = ', img.shape) if (len(img) % 16 == 0): l = len(img) else: l = len(img) - (len(img) % 16) if (len(img[0]) % 16 == 0): w = len(img[0]) else: w = len(img[0]) - (len(img[0]) % 16) lm = len(img) % 16 wm = len(img[0]) % 16 reshaped = np.zeros([l, w]) reshaped = img[lm // 2:len(img) - lm // 2, wm // 2:len(img[0]) - wm // 2] #reshaped=img.reshape(l,w) #re12=img.reshape(128,64) grx = np.zeros([l, w]) gry = np.zeros([l, w]) mag = np.zeros([l, w]) ang = np.zeros([l, w]) Fx = [[1, 2, 1], [0, 0, 0], [1, 2, 1]] Fy = [[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]] for i in range(1, l - 1): for j in range(1, w - 1): tempx = 0 tempy = 0 for k1 in range(-1, 2): for l1 in range(-1, 2): tempx = tempx + reshaped[i + k1][j + l1] * Fx[k1 + 1][l1 + 1] tempy = tempy + reshaped[i + k1][j + l1] * Fy[k1 + 1][l1 + 1] grx[i][j] = tempx gry[i][j] = tempy mag[i][j] = math.sqrt( math.pow(grx[i][j], 2) + math.pow(gry[i][j], 2)) if grx[i][j] == 0: ang[i][j] = math.atan(gry[i][j] / 0.01) * (180 / math.pi) else: ang[i][j] = math.atan(gry[i][j] / grx[i][j]) * (180 / math.pi) hog = [] for i in range(0, l, 8): ro = [] for j in range(0, w, 8): bini = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] for i1 in range(i, i + 8): for j1 in range(j, j + 8): lb = int(ang[i1][j1] / 20) ab = ang[i][j] % 20 if (lb > 160): bini[8] = bini[8] + mag[i1][j1] else: ba = 20 - ab bini[lb] = bini[lb] + (ba * mag[i1][j1]) / 20 bini[lb + 1] = bini[lb + 1] + (ab * mag[i1][j1]) / 20 ro.append(bini) hog.append(ro) out = [] for i in range(0, len(hog) - 1): for j in range(0, len(hog[0]) - 1): temp = [] temp = temp + hog[i][j] temp = temp + hog[i + 1][j] temp = temp + hog[i][j + 1] temp = temp + hog[i + 1][j + 1] s = 0 for i1 in range(0, len(temp)): s = s + (temp[i1] * temp[i1]) sq = math.sqrt(s) for i1 in range(0, len(temp)): temp[i1] = temp[i1] / sq out = out + temp print('Length of HOG vector =', len(out)) return out
for i in range(0, l, 8): ro = [] for j in range(0, w, 8): bini = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] for i1 in range(i, i + 8): for j1 in range(j, j + 8): lb = int(ang[i1][j1] / 20) ab = ang[i][j] % 20 if (lb > 160): bini[8] = bini[8] + mag[i1][j1] else: ba = 20 - ab bini[lb] = bini[lb] + (ba * mag[i1][j1]) / 20 bini[lb + 1] = bini[lb + 1] + (ab * mag[i1][j1]) / 20 ro.append(bini) hog.append(ro) out = [] for i in range(0, len(hog) - 1): for j in range(0, len(hog[0]) - 1): temp = [] temp = temp + hog[i][j] temp = temp + hog[i + 1][j] temp = temp + hog[i][j + 1] temp = temp + hog[i + 1][j + 1] s = 0 for i1 in range(0, len(temp)): s = s + (temp[i1] * temp[i1]) sq = math.sqrt(s) for i1 in range(0, len(temp)):