コード例 #1
0
 def hog_features(im,
                  orients=8,
                  cell_size=8,
                  block_size=2,
                  vector=True,
                  show=False):
     if show == True:
         features, im_hog = hog(im,
                                orientations=orients,
                                pixels_per_cell=(cell_size, ) * 2,
                                cells_per_block=(block_size, ) * 2,
                                feature_vector=vector,
                                transform_sqrt=True,
                                visualise=show)
         show_image([im, im_hog],
                    ncols=2,
                    window_title='HOG',
                    titles=['original', 'hog'],
                    cmaps=['gray', 'gray'])
     else:
         features = hog(im,
                        orientations=orients,
                        pixels_per_cell=(cell_size, ) * 2,
                        cells_per_block=(block_size, ) * 2,
                        feature_vector=vector,
                        transform_sqrt=True,
                        visualise=show)
     return features
コード例 #2
0
 def binspatial_features(im, dst_size=32, show=False):
     features = cv.resize(im, (dst_size, ) * 2)
     if show == True:
         show_image([im, features],
                    ncols=2,
                    window_title='Bin Spatial',
                    titles=['original', 'resized'])
     return features.ravel()
コード例 #3
0
def single_image(filename, model_path='./data/model.p'):
    if not clb.initialized():
        clb.find_pictures(directory='./camera_cal/')
        clb.calibrate_camera(9, 6)
    im = common.load_image(filename, color='RGB')
    common.show_image(im)
    m = model.CarModel()
    m.load(filename=model_path)
    t = track.FrameVehiclePipeline(m, shape=im.shape[:2])
    t.process(im, show=True)
コード例 #4
0
ファイル: track.py プロジェクト: awav/carnd-p5
 def _draw_car_boxes(self, im, show=False):
     n = self._labels[1] + 1
     for car in range(1, n):
         ## x and y coordinates
         y, x = (self._labels[0] == car).nonzero()
         nw, se = (np.min(x), np.min(y)), (np.max(x), np.max(y))
         cv.rectangle(im, nw, se, (255, 255, 0), 2)
     if show == True:
         common.show_image(im,
                           window_title='Cars Heat Map',
                           titles='Detected cars')
     return im
コード例 #5
0
ファイル: track.py プロジェクト: awav/carnd-p5
 def _find_cars_heatmap(self, im, show=False):
     shape = self._model.input_shape
     #_show = show if show == True else False
     _show = False
     for nw, se in self._slicer.wins:
         ys, ye = nw[1], se[1]
         xs, xe = nw[0], se[0]
         #print(nw, se)
         car = self._model.predict(cv.resize(im[ys:ye, xs:xe, :],
                                             shape[:2]),
                                   show=_show)
         #_show = False
         if car == 1:
             self._heatmap[ys:ye, xs:xe] += 1
             if show == True:
                 cv.rectangle(im, nw, se, (0, 0, 255), 2)
                 #common.show_image(im[ys:ye,xs:xe,:], titles='resized-car')
     if show == True:
         common.show_image([im, self._heatmap],
                           ncols=2,
                           window_title='Cars Heat Map',
                           titles=['original', 'heatmap'])
コード例 #6
0
    data_set['low_texture_row'] = low_texture_row
    data_set['low_texture_column'] = low_texture_column

    stereo.compute_cost()
    t_diff = stereo.aggregate_cost()
    my_result = stereo.get_result()
    my_result = my_result * (255.0 / d_max / 16)
    data_set['my_result_7'] = my_result

    diff_result = stereo.left_right_check()
    diff_result = diff_result * (255.0 / d_max / 16)
    data_set['diff_result'] = diff_result

    post_result = stereo.post_processing()
    post_result = post_result * (255.0 / d_max / 16)
    data_set['post_result'] = post_result

    post_result2 = stereo.fix_low_texture()
    post_result2 = post_result2 * (255.0 / d_max / 16)
    post_result2 = filters.median_filter(post_result2, 5)
    data_set['post_result2'] = post_result2
    print time.time() - tt
    show_image(data_set)

    save_image(diff_result, 'diff_result')
    save_image(post_result, 'post_result')
    save_image(post_result2, 'post_result_2')
    save_image(low_texture_column, 'low_texture_column')
    save_image(low_texture_row, 'low_texture_row')
    save_image(my_result, 'window method 7')
コード例 #7
0
    :param d_max:最大深度
    :return:视差值
    """
    start_pos = (pixel_pos - d_max) if (pixel_pos - d_max) > 0 else 0
    row_right = row_right[start_pos:pixel_pos]
    diff = map(lambda value: abs(value - pixel_value), row_right)
    diff = diff[::-1]  # 逆序
    data_min = 0
    for depth in range(len(diff)):
        if diff[data_min] == 0:
            break
        if diff[depth] < diff[data_min]:
            data_min = depth
    return data_min


# 扫描像素
for row_pos in range(len(left)):
    row_left = left[row_pos]
    row_right = right[row_pos]
    for pixel_pos in range(len(row_left)):
        pixel = row_left[pixel_pos]
        depth = calculate_diff_naive(pixel, row_right, pixel_pos)
        my_result[row_pos][pixel_pos] = depth * 255 / 10

data_set['my_result'] = my_result
show_image(data_set)
save_image(my_result, 'pixel naive method')
if __name__ == '__main__':
    pass
コード例 #8
0
#sobel
sobel_x = np.array([[-1, -2, -1], [0, 0, 0], [1, 2, 1]])
sobel_y = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]])
output_1 = cv2.filter2D(after_lap, -1, sobel_x)
output_2 = cv2.filter2D(after_lap, -1, sobel_y)

output_1 = np.abs(output_1)
output_2 = np.abs(output_2)

output = output_1 + output_2
output = co.shrink(output, 255, 6)
#co.show_image(output)
output = after_lap - output

#qu zao dian
co.show_image(output)
output = cv2.medianBlur(output, 5)
#co.show_image(output)
imsave(dic + "sobel.png", output)

#kai yun suan
image = morphology.opening(output[:, :, 0], morphology.disk(12))
image_g = co.gray_rgb(image)
imsave(dic + "open_grey.png", image_g)

#laplacian
image_g = cv.imread(dic + "open_grey.png")
gray_lap = np.abs(cv2.Laplacian(image_g, cv2.CV_16S, ksize=3))
gray_lap = co.shrink(gray_lap, 255, 5)
#co.show_image(gray_lap)
after_lap = image_g + gray_lap
コード例 #9
0
from skimage import morphology
from skimage.segmentation import random_walker
import matplotlib.pyplot as plt
from scipy import ndimage
from skimage import io
from matplotlib.image import imsave
import cv2
import cv2.cv2 as cv
from skimage.filters import thresholding, _rank_order
import time
from skimage.morphology import rectangle
import common as co

dic = "data/tree/"
pic = "img_close.png"
'''
img_contrasted = cv.imread(dic + "DJI_0330.JPG")
print img_contrasted.shape
#img_contrasted = co.green_digree(img_contrasted)
#img_contrasted = img_contrasted[:,:,0]
img_contrasted = np.array(cv2.cvtColor(img_contrasted, cv2.COLOR_BGR2GRAY))
img_contrasted = co.get_derode(img_contrasted)
print img_contrasted.shape
img_close = morphology.closing(img_contrasted, morphology.disk(20)) #close
co.show_image(img_close)
'''

img_close = cv.imread(dic + pic)
img_close = img_close[:, :, 0]

#define region of interset
コード例 #10
0
                item_path = self.imagesTop[idx]
            elif self.view == 'Side':
                item_path = self.imagesSide[idx]
            elif self.view == 'Front':
                item_path = self.imagesFront[idx]

        data_input = Image.open(item_path).convert('L')

        # data augmentation
        if self.mode == 'train':
            data_input = self.augment_data(data_input)
        else:
            data_input = self.pre_process(data_input)

        data_input = 1. - data_input / 255.

        return (data_input, data_truth)


if __name__ == '__main__':
    loader = DataLoader('../../../../Courses_data/LeChairs/chairs-data/',
                        mode='train',
                        view='Top')
    train_data_loader = torch.utils.data.DataLoader(loader,
                                                    batch_size=1,
                                                    shuffle=True,
                                                    num_workers=0)
    idx, (image, label) = next(enumerate(train_data_loader))

    show_image(image, is_tensor=True)
コード例 #11
0
ファイル: 1.2.py プロジェクト: wuhu/mohbf
from common import import_pics, show_image

# 1.2
# import an image
im = import_pics("imk01765.tiff")

# a)
# add white noise using different variances
for i in (100, 1000, 5000, 10000, 30000):
    # generate white noise
    whiten = np.random.normal(0, i, (1020, 1532))
    # add the noise to the image
    imnoise = im + whiten
    # show image
    show_image(imnoise, "Noise added (sigma=%d)" % i)

# b)

# generate white noise
whiten = np.random.normal(0, 15000, (1020, 1532))
kernels = (3, 5, 9, 15, 29)
for n in kernels:
    # create smooth white noise by convolving white noise with a rectangular window
    tmp = convolve2d(whiten, np.ones((n, n)) / (n * n), mode="same", boundary="symm")
    show_image(im + tmp, "Noise kernel (%d,%d)" % (n, n))

# first manual approach (with 1-d vector)
# for i in range(1532*1020):
#      tmp = 0
#     for j in range(-n/2,n/2):
コード例 #12
0
ファイル: 1.1.py プロジェクト: wuhu/mohbf
import pylab as mpl

from common import import_pics, show_image

# (see common.py for the implementation of the functions used below)
# import an image from the database
im = import_pics("imk01765.tiff")
# show it
show_image(im, "Original")

mpl.show()
コード例 #13
0
    data_set = get_data_set(0)
    # get data
    left = data_set['left']
    right = data_set['right']
    result = data_set['result']
    import time

    window_size = 5
    d_max = 15
    tt = time.time()
    stereo = StereoVisionBM1(left, right, window_size, d_max)
    '''
    stereo.get_sad_all()
    my_result = stereo.get_result()
    my_result = my_result * 255 / d_max
    data_set['my_result_6'] = my_result
    print time.time() - tt
    save_image(my_result, 'window method 6')
    show_image(data_set)'''
    data_set = get_data_set(0, is_color=True)
    left = data_set['left']
    right = data_set['right']
    result = data_set['result']
    stereo = StereoVisionBM1(left, right, window_size, d_max, is_color=True)
    stereo.get_sad_all()
    my_result = stereo.get_result()
    my_result = my_result * 255 / d_max
    data_set['my_result_6'] = my_result
    show_image(data_set,is_color=True)
    save_image(my_result, 'window method 6')
コード例 #14
0
    data_set = get_data_set(0)
    # get data
    left = data_set['left']
    right = data_set['right']
    result = data_set['result']
    import time

    window_size = 5
    d_max = 15
    tt = time.time()
    stereo = StereoVisionBM1(left, right, window_size, d_max)
    '''
    stereo.get_sad_all()
    my_result = stereo.get_result()
    my_result = my_result * 255 / d_max
    data_set['my_result_6'] = my_result
    print time.time() - tt
    save_image(my_result, 'window method 6')
    show_image(data_set)'''
    data_set = get_data_set(0, is_color=True)
    left = data_set['left']
    right = data_set['right']
    result = data_set['result']
    stereo = StereoVisionBM1(left, right, window_size, d_max, is_color=True)
    stereo.get_sad_all()
    my_result = stereo.get_result()
    my_result = my_result * 255 / d_max
    data_set['my_result_6'] = my_result
    show_image(data_set, is_color=True)
    save_image(my_result, 'window method 6')
コード例 #15
0
dic = "data/clip1/"
pic = "dsm_clip1.tif"

dsm = misc.imread(dic + pic)  #tif
print dsm.shape

a = 3929 / 2268.0
b = 978 / 564.0
print a, b

#dsm = cv2.imread(dic + pic)
#dsm = dsm[:,:,0].astype(np.float32)

print type(dsm[0][0])
print dsm.shape
co.show_image(dsm)
'''
maxn = np.max(dsm)
dsm[dsm == np.min(dsm)] = maxn + 1
co.show_image(dsm)
'''


def slide_window_grey(image, px):
    #resize image
    width = int(math.ceil(float(image.shape[0]) / px) * px)
    length = int(math.ceil(float(image.shape[1]) / px) * px)
    img_resize = cv2.resize(image, (length, width))
    print img_resize.shape
    #divide windows
    wds = []
コード例 #16
0
import numpy as np
from scipy import misc
from PIL import Image
from matplotlib import pyplot as plt
from matplotlib.image import imsave
import common as co
import math
import cv2

dic = "data/dsm/"

dsm_100 = cv2.imread(dic + "dsm_grey_sw100.png")
dsm_200 = cv2.imread(dic + "dsm_grey_sw200.png")

dsm_100200 = (dsm_100 + dsm_200) / 2
co.show_image(dsm_100200)
imsave(dic + "dsm_grey_sw1_2.png", dsm_100200)