Example #1
0
def test_image_processing_bad_image():
    ip_obj = ImageProcessing(yolo_path="../YOLO")
    try:
        ip_obj.load_file("../static-images/bad-image.jpeg")
        assert False
    except AttributeError as e:
        assert True
Example #2
0
    def __init__(self, wholeImg_dir, faceImg_dir, anns):
        self.img_prc = ImageProcessing()
        self.wholeImg_dir = wholeImg_dir
        self.faceImg_dir = faceImg_dir
        self.anns = anns
        #
        self.face_paths = glob.glob(os.path.join(faceImg_dir, '*.jpg'))
        self.whole_paths = glob.glob(os.path.join(wholeImg_dir, '*.jpg'))
        self.names = get_names(self.face_paths)
        #
        f = open(self.anns, 'r')
        lines = f.readlines()
        # not include first line
        self.infos = {}
        #
        print '# line {}'.format(len(lines))

        for line in lines[1:][:]:
            fname, x1, x2, y1, y2, w, h = line.split(',')
            x1, x2, y1, y2, w, h = map(lambda x: int(x.strip()),
                                       [x1, x2, y1, y2, w, h])
            coord = [x1, y1, x2, y2, w, h]

            whole_img = self.img_prc.path2img(os.path.join(
                self.wholeImg_dir, fname),
                                              resize=None)
            face_img = self.img_prc.path2img(os.path.join(
                self.faceImg_dir, fname),
                                             resize=None)
            self.infos[fname] = {
                'coord': coord,
                'whole_img': whole_img,
                'face_img': face_img
            }
Example #3
0
def test_image_processing_prerequisite_saves_reload():
    ip_obj = ImageProcessing(yolo_path="../YOLO")
    ip_obj.load_file("../static-images/4_or_more_people_clinic.jpeg")
    ip_obj.preprocess_image()
    ip_obj.process_bounding_boxes()
    ip_obj.output_adjusted_image("what2.jpg")
    assert ip_obj.people_count >= 4 and ip_obj.people_count <= 8
Example #4
0
def test_image_processing_no_image():
    ip_obj = ImageProcessing(yolo_path="../YOLO")
    try:
        ip_obj.load_file("../static-images/does-not-exist-image.jpeg")
        assert False
    except IOError as e:
        assert True
Example #5
0
    def __init__(self):
        self.utility = Utility()
        self.file_setup = FileSetup()
        self.cv_operations = CvOperations()
        self.image_processing = ImageProcessing()

        self.file_setup.setup_file_structure()
        self.date_id = self.utility.get_date_id()
Example #6
0
 def __init__(self, video_file="data/test_5/drone_eye.avi"):
     self.cap = cv2.VideoCapture(video_file)
     self.cap.set(cv.CV_CAP_PROP_FPS, 30)
     self.image_processing = ImageProcessing(area_treshold=300)
     self.writer = cv2.VideoWriter(filename="kalman_tracking5.avi", fps=30, frameSize=(
         320, 240), fourcc=cv.CV_FOURCC('M', 'J', 'P', 'G'))
     self.cam_altitude = []
     self.observations = []
Example #7
0
def test_image_processing_save_modified():
    ip_obj = ImageProcessing(yolo_path="../YOLO")
    ip_obj.load_file("../static-images/no_people.jpg")
    ip_obj.preprocess_image()
    ip_obj.process_bounding_boxes()
    ip_obj.output_adjusted_image("what.jpg")
    img = cv2.imread("what.jpg")
    assert img is not None
    def open_from_file(self,file_name):
        with open(file_name, 'rb') as handle:
            b = pickle.load(handle)


        for i in range(len(b)):
            data = b[i]
            print(data)

            if isinstance(data[0],str):            # means image
                pos = data[2][1:-1]
                pos = pos.split(",")
                x = pos[0].strip()
                x= x
                y = pos[1].strip()
                y = y

                img = self.base(source=data[0],width=400, size_hint_y=None,size_hint_x=None, height=400, pos=(x,y))
                o = ImageProcessing(img,data[1])

                o.bydefaultrotation()
                self.where_to_add.container.add_widget(img, -1)



            else:                                       # means wire

                #print(data)
                item = data[3]
                k = self.wirebase()
                if 1==1:
                    tobeadded = self.dic[item]()

                    tobeadded.topmost.width = data[0]
                    tobeadded.topmost.height = data[1]

                    pos = data[4][1:-1]
                    pos = pos.split(",")
                    x = pos[0].strip()
                    x = x
                    y = pos[1].strip()
                    y = y

                    for j in tobeadded.topmost.children:
                        for u in j.children:
                            if isinstance(u, self.DraggableWire):
                                # u.actualwire.canvas.before.children[0].rgba = data[2]
                                u.children[0].canvas.before.children[0].rgba=data[2]



                    tobeadded.topmost.pos = (x,y)

                    k.external_container.add_widget(tobeadded)

                self.where_to_add.container.add_widget(k, 0)
Example #9
0
 def __init__(self):
     self.img_processing = ImageProcessing()
     self.src_dir = 'test_imgs'
     self.imgs_coords = self.img_processing.generate_copped_imgs(
         self.src_dir, 32, 32, 64, 64)
     self.answer_sheet = {
         '1.png': [73, 74],
         '2.png': [875, 874],
         '3.png': [146, 147]
     }
     """
Example #10
0
    def capture(self):
        '''
        Function to capture the images and give them the names
        according to their captured time and date.
        '''


        self.camerascreen.camera.export_to_png("temp.png")
        print("Captured")
        obj = ImageProcessing()
        obj.removeBackground()
Example #11
0
    def filegenerator(self):


        with open('new.png', 'wb') as f:
            l = self.s.recv(99216)

            while len(l)!=0:
                f.write(l)
                l = self.s.recv(99216)
            time.sleep(0.2)
            obj = ImageProcessing()
            obj.removeBackGroundWithApi()
Example #12
0
 def __init__(self, config_file_path):
     """
     __init__ - self
     """
     LOGGER.info("Initializing UATU.")
     self.config_file = config_file_path
     self.cfg_organizer = ConfigOrganizer(config_file=self.config_file)
     self.cfg_organizer.read_config_data()
     self.acq_obj = Acquisition()
     self.current_max_count()
     LOGGER.info("Completed initialization.")
     self.img_processing = ImageProcessing(
         yolo_path=self.cfg_organizer.config_handler['system']['yolo_dir'])
Example #13
0
    def rotate_image_right(self):
        if uiApp.current_selected_widget != None:
            if isinstance(uiApp.current_selected_widget,
                          DraggableImageButtonWithDoubleTouch):
                if self.allow_image_rotation == True:
                    obj = ImageProcessing(uiApp.current_selected_widget, 90)
                    obj.rotate_right()
                else:
                    if uiApp.current_selected_widget != None:
                        uiApp.current_selected_widget = None

            else:
                pass
        else:
            toast("you havn't selected object")
Example #14
0
    def __init__(self, fg_dir, bg_dir, resize):
        self.WALLY = 0
        self.NOT_WALLY = 1
        self.fg_paths = glob.glob(os.path.join(fg_dir, '*'))
        self.bg_paths = glob.glob(os.path.join(bg_dir, '*'))
        self.resize = resize
        print '# Foreground : {} \t # Background : {}'.format(
            len(self.fg_paths), len(self.bg_paths))

        # foreground images
        image_process = ImageProcessing()
        self.fg_imgs = image_process.paths2imgs(self.fg_paths, self.resize)
        self.n_fg = len(self.fg_imgs)
        # background imaegs
        self.bg_imgs = image_process.paths2imgs(self.bg_paths[:], self.resize)
        self.n_bg = len(self.bg_imgs)
Example #15
0
    def __init__(self, client):
        self.on_command_list = []
        self.on_reaction_list = []
        self.on_message_list = []
        self.applications = [
            KoishiMentionContext(),
            KoishiJyanken(),
            KoishiLaugh(),
            KoishiReactionEcho(),

            # BaseCaption(cmd_keys=["test"]),
            KeywordReply(keywords_path="keywords.pickle",
                         min_time_gap=60 * 10,
                         recover_time=60 * 60 * 2),
            KoishiSimpleCaption(),
            Choose(),
            StatEmoji(how_long=7 * 4),
            Mahjong(),
            FileManager(link_dict_path="link_dict.pickle"),
            History(),
            EmojiRaw(),
            Puzzle(735780993784610816, prefix="koishi"),
            ImageProcessing(),
            LinkExtractor(),
            # Balloon(us=us),
            Help(self),
            # us,
        ]

        self.regist_events()
Example #16
0
    def __init__(self, parent=None):
        QtGui.QMainWindow.__init__(self, parent)

        self.frame = QtGui.QFrame()
        self.vl = QtGui.QGridLayout()

        self.vtkWidget_right_femur = QVTKRenderWindowInteractor(self.frame)
        self.vtkWidget_right_hip = QVTKRenderWindowInteractor(self.frame)
        self.vtkWidget_left_femur = QVTKRenderWindowInteractor(self.frame)
        self.vtkWidget_left_hip = QVTKRenderWindowInteractor(self.frame)

        title_right_femur = QtGui.QLabel('Femur derecho')
        title_right_hip = QtGui.QLabel('Cadera derecha')
        title_left_femur = QtGui.QLabel('Femur izquierdo')
        title_left_hip = QtGui.QLabel('Cadera izquierda')
        self.vl.addWidget(title_right_femur, 0, 0)
        self.vl.addWidget(self.vtkWidget_right_femur, 1, 0)
        self.vl.addWidget(title_right_hip, 0, 1)
        self.vl.addWidget(self.vtkWidget_right_hip, 1, 1)

        self.vl.addWidget(title_left_femur, 2, 0)
        self.vl.addWidget(self.vtkWidget_left_femur, 3, 0)
        self.vl.addWidget(title_left_hip, 2, 1)
        self.vl.addWidget(self.vtkWidget_left_hip, 3, 1)

        self.ren_right_femur = vtk.vtkRenderer()
        self.ren_right_hip = vtk.vtkRenderer()
        self.ren_left_femur = vtk.vtkRenderer()
        self.ren_left_hip = vtk.vtkRenderer()

        self.vtkWidget_right_femur.GetRenderWindow().AddRenderer(self.ren_right_femur)
        self.vtkWidget_right_hip.GetRenderWindow().AddRenderer(self.ren_right_hip)
        self.vtkWidget_left_femur.GetRenderWindow().AddRenderer(self.ren_left_femur)
        self.vtkWidget_left_hip.GetRenderWindow().AddRenderer(self.ren_left_hip)

        self.iren_right_femur = self.vtkWidget_right_femur.GetRenderWindow().GetInteractor()
        self.iren_right_hip = self.vtkWidget_right_hip.GetRenderWindow().GetInteractor()
        self.iren_left_femur = self.vtkWidget_left_femur.GetRenderWindow().GetInteractor()
        self.iren_left_hip = self.vtkWidget_left_hip.GetRenderWindow().GetInteractor()

        openFile = QtGui.QAction(QtGui.QIcon('open.png'), 'Open', self)
        openFile.setShortcut('Ctrl+O')
        openFile.setStatusTip('Open new File')
        openFile.triggered.connect(self.show_dialog)

        menubar = self.menuBar()
        fileMenu = menubar.addMenu('&File')
        fileMenu.addAction(openFile)

        self.image_processing = ImageProcessing()

        self.frame.setLayout(self.vl)
        self.setCentralWidget(self.frame)
        self.setGeometry(50, 50, 1200, 800)
        self.show()
Example #17
0
def start():
    filename = filedialog.askopenfilename()
    img = cv2.imread(filename)
    img = ImageProcessing.gray(img)
    img = ImageProcessing.get_text_crop(img)
    img = numpy.asarray(img)

    img = ImageProcessing.blur(img)
    img = ImageProcessing.adaptive_thresholding(img)
    PIL.Image.fromarray(img).save("images\\3_.jpg")
    img_thr = ImageProcessing.global_thresholding(img)
    PIL.Image.fromarray(img_thr).save('images\\2_.jpg')
    img_thr = PIL.Image.fromarray(img_thr)
    ImageProcessing.print_text(img_thr)
def extract_wallybody(dirpath, anns_path):
    img_prc = ImageProcessing()
    anns = open(anns_path, 'r')

    paths = glob.glob('{}/*.jpg'.format(dirpath))
    names = os.listdir(dirpath)

    # Body 가 들어있는 이미지의 정보를 가져옵니다
    lines = anns.readlines()
    body_dict = {}
    for line in lines[1:]:
        fpath, x1, x2, y1, y2 = line.split(',')
        x1, x2, y1, y2 = map(lambda ele: int(ele.strip()), [x1, x2, y1, y2])
        name = utils.get_name(fpath)
        # first
        if not name in body_dict.keys():
            body_dict[name] = [(x1, x2, y1, y2)]
        else:
            body_dict[name].append((x1, x2, y1, y2))

    fg_imgs_list = []
    bg_imgs_list = []
    fgs = []
    # get wally face list
    for p, path in enumerate(paths):
        name = utils.get_name(path)
        img = np.asarray(Image.open(path).convert('RGB'))
        # extract wally
        if name in body_dict.keys():
            for i, coord in enumerate(body_dict[name]):
                x1, x2, y1, y2 = coord
                fg = img[y1:y2, x1:x2, :]

                fgs.append(fg)
                fg_imgs, fg_coords = img_prc.guarantee_stride_cropping(
                    img, (400, 400), [x1, y1, x2, y2], (25, 25))
                if len(fg_imgs) == 0:
                    print path, x2 - x1, y2 - y1
                else:
                    fg_imgs = img_prc.resize_npImages(fg_imgs, (80, 80))
                    fg_imgs_list.append(fg_imgs)

                img = copy.deepcopy(img)
                # fill rectangle for extract back grounds images
                cv2.rectangle(img, (x1, y1), (x2, y2), (0, 0, 0), -1)

        bg_imgs, bg_coords = img_prc.stride_cropping(img, 200, 200, 400, 400)
        bg_imgs = img_prc.resize_npImages(bg_imgs, (80, 80))
        bg_imgs_list.append(bg_imgs)

    fgs = np.vstack(fg_imgs_list)
    bgs = np.vstack(bg_imgs_list)

    return fgs, bgs
Example #19
0
"""
2018.10.2 일날 rasp 로 찍은 월리 사진을 데이터 셋에 넣음

"""
import glob , os
from image_processing import ImageProcessing
import numpy as np
from PIL import Image
import utils

root_root_dir = '/mnt/Find_Wally/wally_dataset'
second_dir=os.path.join( root_root_dir ,'second_dataset')
thrid_dir = os.path.join(root_root_dir , 'third_dataset')
root_save_dir = 'wally_raspCam_np'

img_prc = ImageProcessing()
sec_paths = glob.glob(os.path.join(second_dir , '*.jpg'))
trd_paths = glob.glob(os.path.join(thrid_dir , '*.jpg'))

assert len(sec_paths) != 0 and len(trd_paths) != 0

tmp_dict = {'second' : sec_paths , 'thrid' : trd_paths}
for key in tmp_dict:
    paths = tmp_dict[key]
    save_dir = os.path.join(root_save_dir, key)
    utils.makedir(save_dir)
    for ind,path in enumerate(paths) :
        utils.show_progress(ind , len(paths))
        name = utils.get_name(path)
        img = np.asarray(Image.open(path).convert('RGB'))
        # Cropping
Example #20
0
    def data_clean_and_data_integration(self, article_title_summary_content):
        """
        数据清洗
        找到所有文章的图片下载地址,下载图片并按照数据库文章图片路径存储
        构建图片地址替换字典
        把html_content里面的图片地址替换成线上服务器的图片路径地址
        :param article_title_summary_content:
        :return: article_title_summary_content
        """
        try:
            number = 0
            for data in article_title_summary_content:
                img_url_dict_old = {}
                img_url_dict_new = {}
                html_content = etree.HTML(data['content'])
                # 图片地址列表
                image_list = html_content.xpath(
                    "//div[@id='cnblogs_post_body']//img/@src")
                if image_list:
                    # 创建目录名,按day来创建
                    year = time.strftime('%Y',
                                         time.localtime(time.time()))  # 年
                    month = time.strftime('%m',
                                          time.localtime(time.time()))  # 月
                    dirname_day = time.strftime('%d',
                                                time.localtime(
                                                    time.time()))  # 日
                    dir_path = self.create_dir.folder()  # 创建存储图片目录
                    os.chdir(dir_path)
                    n = 1
                    for i in image_list:
                        suffix_name = os.path.splitext(i)[1]  # 获取img后缀名
                        # 随机生成一个32位字符串与后缀名拼接,作为img的完整名
                        ran_str = ''.join(
                            random.sample(string.ascii_letters + string.digits,
                                          32))
                        img_name = ran_str + suffix_name  # 完整img名字
                        # 线上服务器图片地址
                        server_img_url = '/Public/images/article/picture/{}/{}/{}/{}'.format(
                            year, month, dirname_day, img_name)
                        # 下载图片:  需要两个参数(图片下载路径,图片名)
                        try:
                            urllib.request.urlretrieve(i, img_name)
                        except Exception as e:
                            self.log.info("下载异常:{}".format(e))

                        # 判断图片大小是否符合规定,需要传两个参数(图片绝对路径,图片名)
                        ImageProcessing(dir_path, img_name)
                        time.sleep(3)
                        # 构建图片地址替换字典
                        # 博客园 old = {1:'url',2:'url'}
                        # 线上服务器 new = {1: 'server_img_url', 2:'server_img_url'}
                        img_url_dict_old[n] = i
                        img_url_dict_new[n] = server_img_url
                        n += 1

# 替换url地址
                    for x in img_url_dict_old:
                        for y in img_url_dict_new:
                            if x == y:
                                article_title_summary_content[number]['content'] = \
                                article_title_summary_content[number]['content'].replace(img_url_dict_old[x],
                                                                                         img_url_dict_new[y])
                else:
                    self.log.info("{}文章没有图片".format(data['title']))
                number += 1
            self.log.info("所有图片下载到本地完毕")
            # 切回根目录
            os.chdir(os.path.dirname(os.path.abspath(__file__)))
            # 上传文件到远程服务器,并递归删除本地创建目录
            res = upload()
            if res:
                self.log.info("上传图片到服务器成功")
            else:
                self.log.info("上传图片到服务器失败")
            self.log.info("替换文章图片url地址,清洗url数据完毕")
            return article_title_summary_content

        except Exception as e:
            self.log.info(e)
            return article_title_summary_content
Example #21
0
class WallyDataset_ver4(Wally_dataset):
    """
    Class Usage :

    1. self.infos 는
        {이미지이름.jpg :
        {'whole_img' : 이름 원본 이미지와(Numpy)} ,
        {'face_img'  , 원본 이미지 (Numpy)}
        {'coords': face coord }
        형태로 저장되어 있습니다.

    2. 월리 얼굴 형태를 포함해서 cropping 합니다
        def cropping_with_face:
        src_img : 원본 이미지입니다  , self.infos['whole_img] 에서 가져와 사용하면 됩니다.
        crop_size : 얼마만큼 crop 할지 결정합니다
        coord : 얼굴 좌표를 말합니다  ,self.infos['coords'] 에서 가져와 사용하면 됩니다.
        stride_size
    """
    def __init__(self, wholeImg_dir, faceImg_dir, anns):
        self.img_prc = ImageProcessing()
        self.wholeImg_dir = wholeImg_dir
        self.faceImg_dir = faceImg_dir
        self.anns = anns
        #
        self.face_paths = glob.glob(os.path.join(faceImg_dir, '*.jpg'))
        self.whole_paths = glob.glob(os.path.join(wholeImg_dir, '*.jpg'))
        self.names = get_names(self.face_paths)
        #
        f = open(self.anns, 'r')
        lines = f.readlines()
        # not include first line
        self.infos = {}
        #
        print '# line {}'.format(len(lines))

        for line in lines[1:][:]:
            fname, x1, x2, y1, y2, w, h = line.split(',')
            x1, x2, y1, y2, w, h = map(lambda x: int(x.strip()),
                                       [x1, x2, y1, y2, w, h])
            coord = [x1, y1, x2, y2, w, h]

            whole_img = self.img_prc.path2img(os.path.join(
                self.wholeImg_dir, fname),
                                              resize=None)
            face_img = self.img_prc.path2img(os.path.join(
                self.faceImg_dir, fname),
                                             resize=None)
            self.infos[fname] = {
                'coord': coord,
                'whole_img': whole_img,
                'face_img': face_img
            }

    def cropping_with_face(self, src_img, crop_size, coord, stride_size):
        # src_img = 원본 이미지
        # crop = (crop_h , crop_w)
        # coord = [x1,y1,x2,y2]
        # stride_size = stride_h , stride_w
        cropped_imgs, coords = self.img_prc.guarantee_stride_cropping(
            src_img, crop_size, coord, stride_size)
        return cropped_imgs

    def generate_tfrecord(self, tfrecord_path, n_fg, fg_imgs, n_bg, bg_imgs):
        self.img_prc.make_tfrecord(tfrecord_path, None, (n_fg, fg_imgs),
                                   (n_bg, bg_imgs))

    def get_wallyface(self):
        fg_train_savepath = os.path.join('Wally_ver3', 'numpy_imgs',
                                         'fg_train.npy')
        fg_test_savepath = os.path.join('Wally_ver3', 'numpy_imgs',
                                        'fg_test.npy')
        fg_val_savepath = os.path.join('Wally_ver3', 'numpy_imgs',
                                       'fg_val.npy')

        if os.path.exists(fg_train_savepath) and os.path.exists(
                fg_test_savepath) and os.path.exists(fg_val_savepath):
            self.fg_train_imgs = np.load(fg_train_savepath)
            self.fg_test_imgs = np.load(fg_test_savepath)
            self.fg_val_imgs = np.load(fg_val_savepath)
        else:
            print 'Generating WallyFace Data....'
            fg_list = []
            for key in wally_dp.infos.keys():
                target_coords = self.infos[key]['coord']
                x1, y1, x2, y2 = target_coords[:4]
                whole_img = self.infos[key]['whole_img']
                # Wally 얼굴을 포함하는걸 보장하며 crop 합니다
                cropped_imgs = self.cropping_with_face(whole_img, (64, 64),
                                                       [x1, y1, x2, y2],
                                                       (1, 1))
                # Save crop
                np.save('./Wally_ver3/cropped_img_with_face/{}'.format(
                    os.path.splitext(key)[0]),
                        arr=cropped_imgs)
                fg_imgs = np.load(
                    './Wally_ver3/cropped_img_with_face/{}.npy'.format(
                        os.path.splitext(key)[0]))
                fg_list.append(fg_imgs)
            fg_imgs = np.vstack(fg_list)
            print 'foreground shape : {}'.format(np.shape(fg_imgs))

            # divide Train , Val , Test
            self.fg_test_imgs = fg_imgs[:5000]
            self.fg_val_imgs = fg_imgs[5000:5000 * 2]
            self.fg_train_imgs = fg_imgs[5000 * 2:]

            # save imgs to numpy
            np.save(fg_train_savepath, self.fg_train_imgs)
            np.save(fg_test_savepath, self.fg_test_imgs)
            np.save(fg_val_savepath, self.fg_val_imgs)

    def get_train(self):
        imgs = np.vstack([self.fg_train_imgs, self.bg_train_imgs])
        labs = np.asarray([0] * len(self.fg_train_imgs) +
                          [1] * len(self.bg_train_imgs))
        trainImgs_savepath = os.path.join('Wally_ver3', 'numpy_imgs',
                                          'train_imgs.npy')
        trainLabs_savepath = os.path.join('Wally_ver3', 'numpy_imgs',
                                          'train_labs.npy')
        np.save(trainImgs_savepath, imgs)
        np.save(trainLabs_savepath, labs)
Example #22
0
from image_capture import ImageCapture
from image_processing import ImageProcessing
import cv2
import numpy as np

camera = ImageCapture()
#camera = cv2.VideoCapture(0)
processor = ImageProcessing()

while True:
    img_raw = camera.capture()
    img_cut = img_raw[:,
                      int(np.shape(img_raw)[1] * 1 /
                          5):int(np.shape(img_raw)[1] * 4 / 5), :]
    img_gray = processor.gray(img_cut)
    edge = processor.canny(img_gray)
    contour = processor.max_contour(edge)
    cv2.drawContours(img_cut, contour, -1, (0, 255, 0), 3)
    bounding_box = processor.bounding_box(contour)
    print(bounding_box)
    #if bounding_box != -1:
    #    print("success!")
    #else:
    #    print("failure!")
    cv2.imshow('raw_image', img_raw)
    cv2.imshow('cut_image', img_cut)
    cv2.imshow('gray_image', img_gray)
    cv2.imshow('canny_image', edge)
    cv2.waitKey(20)
Example #23
0
from image_processing import ImageProcessing

import cv2

beforeL = cv2.imread(
    "/home/tino/smt_image_data/SmtImageData/342411_20161007-175850_L.tif")
afterL = cv2.imread(
    "/home/tino/smt_image_data/SmtImageData/342411_20161007-183433_L.tif")

beforeR = cv2.imread(
    "/home/tino/smt_image_data/SmtImageData/388903_20161028-044310_R.tif")
afterR = cv2.imread(
    "/home/tino/smt_image_data/SmtImageData/388903_20161028-044623_R.tif")

processing = ImageProcessing()
result = processing.determineDisplacement(beforeL, beforeR, afterL, afterR)
print(result)
Example #24
0
def processing(job_id, visits):
    image_processing_obj = ImageProcessing(job_id, visits)
    image_processing_obj.process()
Example #25
0
    def __init__(self, **kwargs):
        self.mainloop = gobject.MainLoop()
        self.pipeline = gst.Pipeline("pipeline")
        self.verbose = kwargs.get('verbose', True)
        self.debug = kwargs.get('debug', False)
        cam_width = kwargs.get('cam_width', 640)
        cam_height = kwargs.get('cam_height', 480)
        host = kwargs.get('host', '127.0.0.1')
        port = kwargs.get('port', 5000)
        h264 = kwargs.get('h264', False)
        self.marker_spotted = False
        self.image_processing = ImageProcessing(area_threshold=10)
        self.state_estimate = StateEstimationAltitude()
        self.autopilot = AutoPilot(self.state_estimate)
        self.position_controller = PositionController(
            self.autopilot, self.state_estimate)
        if h264:
            self.videosrc = gst.parse_launch(
                'uvch264_src device=/dev/video0 name=src auto-start=true src.vfsrc')
        else:
            self.videosrc = gst.element_factory_make('v4l2src', 'v4l2src')
        fps = 30
        self.vfilter = gst.element_factory_make("capsfilter", "vfilter")
        self.vfilter.set_property('caps', gst.caps_from_string(
            'image/jpeg, width=%s, height=%s, framerate=20/1' % (str(cam_width), str(cam_height))))
        self.queue = gst.element_factory_make("queue", "queue")

        self.udpsink = gst.element_factory_make('udpsink', 'udpsink')
        self.rtpjpegpay = gst.element_factory_make('rtpjpegpay', 'rtpjpegpay')
        self.udpsink.set_property('host', host)
        self.udpsink.set_property('port', port)

        self.pipeline.add_many(
            self.videosrc,
            self.queue,
            self.vfilter,
            self.rtpjpegpay,
            self.udpsink)
        gst.element_link_many(
            self.videosrc,
            self.queue,
            self.vfilter,
            self.rtpjpegpay,
            self.udpsink)

        pad = next(self.queue.sink_pads())
        # Sending frames to onVideBuffer where openCV can do processing.
        pad.add_buffer_probe(self.onVideoBuffer)
        self.pipeline.set_state(gst.STATE_PLAYING)
        self.i = 0
        gobject.threads_init()
        context = self.mainloop.get_context()
        #previous_update = time.time()
        fpstime = time.time()
        while True:
            try:
                context.iteration(False)
                self.autopilot._read_sensors()
                if self.autopilot.auto_switch > 1700:
                    self.position_controller.headingHold()
                    self.position_controller.holdAltitude()
                    self.autopilot.send_control_commands()
                else:
                    self.position_controller.reset_targets()
                    print self.autopilot.print_commands()
            except KeyboardInterrupt:
                fps = self.i / (time.time() - fpstime)
                print 'fps %f ' % fps
                self.autopilot.dump_log()
                self.autopilot.disconnect_from_drone()
Example #26
0
def test_image_processing_prerequisite_does_not_crash():
    ip_obj = ImageProcessing(yolo_path="../YOLO")
    ip_obj.load_file("../static-images/4_or_more_people_clinic.jpeg")
    ip_obj.preprocess_image()
    assert ip_obj.people_count >= 4 and ip_obj.people_count <= 8
Example #27
0
class MainWindow(QtGui.QMainWindow):
    def __init__(self, parent=None):
        QtGui.QMainWindow.__init__(self, parent)

        self.frame = QtGui.QFrame()
        self.vl = QtGui.QGridLayout()

        self.vtkWidget_right_femur = QVTKRenderWindowInteractor(self.frame)
        self.vtkWidget_right_hip = QVTKRenderWindowInteractor(self.frame)
        self.vtkWidget_left_femur = QVTKRenderWindowInteractor(self.frame)
        self.vtkWidget_left_hip = QVTKRenderWindowInteractor(self.frame)

        title_right_femur = QtGui.QLabel('Femur derecho')
        title_right_hip = QtGui.QLabel('Cadera derecha')
        title_left_femur = QtGui.QLabel('Femur izquierdo')
        title_left_hip = QtGui.QLabel('Cadera izquierda')
        self.vl.addWidget(title_right_femur, 0, 0)
        self.vl.addWidget(self.vtkWidget_right_femur, 1, 0)
        self.vl.addWidget(title_right_hip, 0, 1)
        self.vl.addWidget(self.vtkWidget_right_hip, 1, 1)

        self.vl.addWidget(title_left_femur, 2, 0)
        self.vl.addWidget(self.vtkWidget_left_femur, 3, 0)
        self.vl.addWidget(title_left_hip, 2, 1)
        self.vl.addWidget(self.vtkWidget_left_hip, 3, 1)

        self.ren_right_femur = vtk.vtkRenderer()
        self.ren_right_hip = vtk.vtkRenderer()
        self.ren_left_femur = vtk.vtkRenderer()
        self.ren_left_hip = vtk.vtkRenderer()

        self.vtkWidget_right_femur.GetRenderWindow().AddRenderer(self.ren_right_femur)
        self.vtkWidget_right_hip.GetRenderWindow().AddRenderer(self.ren_right_hip)
        self.vtkWidget_left_femur.GetRenderWindow().AddRenderer(self.ren_left_femur)
        self.vtkWidget_left_hip.GetRenderWindow().AddRenderer(self.ren_left_hip)

        self.iren_right_femur = self.vtkWidget_right_femur.GetRenderWindow().GetInteractor()
        self.iren_right_hip = self.vtkWidget_right_hip.GetRenderWindow().GetInteractor()
        self.iren_left_femur = self.vtkWidget_left_femur.GetRenderWindow().GetInteractor()
        self.iren_left_hip = self.vtkWidget_left_hip.GetRenderWindow().GetInteractor()

        openFile = QtGui.QAction(QtGui.QIcon('open.png'), 'Open', self)
        openFile.setShortcut('Ctrl+O')
        openFile.setStatusTip('Open new File')
        openFile.triggered.connect(self.show_dialog)

        menubar = self.menuBar()
        fileMenu = menubar.addMenu('&File')
        fileMenu.addAction(openFile)

        self.image_processing = ImageProcessing()

        self.frame.setLayout(self.vl)
        self.setCentralWidget(self.frame)
        self.setGeometry(50, 50, 1200, 800)
        self.show()

    def show_dialog(self):
        fname = QtGui.QFileDialog.getExistingDirectory(None, 'Select a folder:', '', QtGui.QFileDialog.ShowDirsOnly)
        if fname:
            self.image_processing.execute(str(fname))
            self.update_gui()

    def update_gui(self):
        self.clean_gui()
        self.process_femurs()
        self.process_hips()
        self.show()

    def clean_gui(self):
        self.ren_right_femur = vtk.vtkRenderer()
        self.ren_right_hip = vtk.vtkRenderer()
        self.ren_left_femur = vtk.vtkRenderer()
        self.ren_left_hip = vtk.vtkRenderer()

        self.vtkWidget_right_femur.GetRenderWindow().AddRenderer(self.ren_right_femur)
        self.vtkWidget_right_hip.GetRenderWindow().AddRenderer(self.ren_right_hip)
        self.vtkWidget_left_femur.GetRenderWindow().AddRenderer(self.ren_left_femur)
        self.vtkWidget_left_hip.GetRenderWindow().AddRenderer(self.ren_left_hip)

        self.iren_right_femur = self.vtkWidget_right_femur.GetRenderWindow().GetInteractor()
        self.iren_right_hip = self.vtkWidget_right_hip.GetRenderWindow().GetInteractor()
        self.iren_left_femur = self.vtkWidget_left_femur.GetRenderWindow().GetInteractor()
        self.iren_left_hip = self.vtkWidget_left_hip.GetRenderWindow().GetInteractor()

    def process_hips(self):
        hips = self.image_processing.segmented_hips.copy()
        for key in hips.keys():
            actor = self.process_image(hips[key])
            if key == self.image_processing.RIGHT_LEG:
                self.ren_right_hip.AddActor(actor)
                self.ren_right_hip.ResetCamera()
                self.iren_right_hip.Initialize()
            else:
                self.ren_left_hip.AddActor(actor)
                self.ren_left_hip.ResetCamera()
                self.iren_left_hip.Initialize()

    def process_femurs(self):
        femurs = self.image_processing.segmented_legs.copy()
        for key in femurs.keys():
            actor = self.process_image(femurs[key])
            if key == self.image_processing.RIGHT_LEG:
                self.ren_right_femur.AddActor(actor)
                self.ren_right_femur.ResetCamera()
                self.iren_right_femur.Initialize()
            else:
                self.ren_left_femur.AddActor(actor)
                self.ren_left_femur.ResetCamera()
                self.iren_left_femur.Initialize()

    def process_image(self, image):
        dims = image.shape
        width = dims[1]
        height = dims[2]
        depth = dims[0]
        vtk_data = numpy_support.numpy_to_vtk(num_array=image.ravel(), deep=True, array_type=vtk.VTK_FLOAT)

        imgdat = vtk.vtkImageData()
        imgdat.GetPointData().SetScalars(vtk_data)
        imgdat.SetDimensions(height, width, depth)
        imgdat.SetOrigin(0, 0, 0)
        spacing = self.image_processing.spacing
        imgdat.SetSpacing(spacing[0], spacing[1], spacing[2])

        dmc = vtk.vtkDiscreteMarchingCubes()
        dmc.SetInputData(imgdat)
        dmc.GenerateValues(1, 1, 1)
        dmc.Update()

        smoothing_iterations = 15
        pass_band = 0.001
        feature_angle = 120.0

        smoother = vtk.vtkWindowedSincPolyDataFilter()
        smoother.SetInputConnection(dmc.GetOutputPort())
        smoother.SetNumberOfIterations(smoothing_iterations)
        smoother.BoundarySmoothingOff()
        smoother.FeatureEdgeSmoothingOff()
        smoother.SetFeatureAngle(feature_angle)
        smoother.SetPassBand(pass_band)
        smoother.NonManifoldSmoothingOn()
        smoother.NormalizeCoordinatesOn()
        smoother.Update()

        mapper = vtk.vtkPolyDataMapper()
        mapper.SetInputConnection(smoother.GetOutputPort())

        actor = vtk.vtkActor()
        actor.SetMapper(mapper)
        return actor
Example #28
0
from sprites import Player
from sprites import Enemy
from sprites import Level
from sprites import Level_01
from sprites import Platform
from sprites import Bullet
from sprites import Coin

from game import Game
from image_processing import ImageProcessing

# Global constants
#scaling factor
SCALE = 2

im_pr = ImageProcessing()

# Screen dimensions
#get the images dimensions
img_png, img, SCREEN_HEIGHT, SCREEN_WIDTH = im_pr.getImage()

#scaled dimensions of the image
SCREEN_HEIGHT = SCALE * SCREEN_HEIGHT
SCREEN_WIDTH = SCALE * SCREEN_WIDTH

#dimensions of the display window
DISPLAY_WIDTH = 1000
DISPLAY_HEIGHT = 600

#starting player position & world shift thresholds
init_x = 300
Example #29
0
from image_processing import ImageProcessing
import matplotlib.pyplot as plt


path = "/Volumes/Files/imagenes/ALMANZA_RUIZ_JUAN_CARLOS/TAC_DE_PELVIS - 84441/_Bone_10_4/"
# path = "/Volumes/Files/imagenes/AVILA_MALAGON_ZULMA_IVONNE/TAC_DE_PELVIS_SIMPLE - 89589/_Bone_10_2/"
# path = "/Volumes/Files/imagenes/ALVAREZ_PATINO_SOFIA/PELVIS - 73864/_Bone_10_5/"
# path = "/Volumes/Files/imagenes/CHACON_BARBA_SERGIO_ANDRES/CADERA_SIMPLE - 103000/_Bone_10_4/"
# path = "/Volumes/Files/imagenes/LOAIZA_ORTIZ_JONATHAN_ESTEVEN/CADERA_IZQUIERDA - 94647/_Bone_10_5/"
# path = "/Volumes/Files/imagenes/VARGAS_DIAZ_CLAUDIA_PATRICIA/PELVIS_3D - 84024/_Bone_10_5/"

c = ImageProcessing()
c.execute(path)
legs = c.legs
segmented_legs = c.segmented_legs
segmented_hips = c.segmented_hips

for leg_key in legs.keys():

    for k in range(100, legs[leg_key].shape[0]):
        fig = plt.figure(k)
        a = fig.add_subplot(1, 2, 1)
        img = plt.imshow(segmented_legs[leg_key][k, :, :], cmap='Greys_r', interpolation="none")
        a = fig.add_subplot(1, 2, 2)
        img = plt.imshow(segmented_hips[leg_key][k, :, :], cmap='Greys_r', interpolation="none")
        if k % 20 == 0:
            plt.show()
    plt.show()
Example #30
0
class OfflineVideo():

    def __init__(self, video_file="data/test_5/drone_eye.avi"):
        self.cap = cv2.VideoCapture(video_file)
        self.cap.set(cv.CV_CAP_PROP_FPS, 30)
        self.image_processing = ImageProcessing(area_treshold=300)
        self.writer = cv2.VideoWriter(filename="kalman_tracking5.avi", fps=30, frameSize=(
            320, 240), fourcc=cv.CV_FOURCC('M', 'J', 'P', 'G'))
        self.cam_altitude = []
        self.observations = []
        #self.autopilot = AutoPilot(simulate=True, thrust_step=30, pixel_threshold=10, cam_width=320, cam_height=240)
        # self.ras_MIN = np.array([150, 80, 80], np.uint8)
       # self.ras_MAX = np.array([175, 255, 255], np.uint8)

    def run(self):
        i = 0
        try:
            while True:
                flag, frame = self.cap.read()
                if i % 3 == 0:
                    marker = self.image_processing.recognize_marker(frame)
                    if marker:
                        self.cam_altitude.append(marker.z)
                        self.observations.append(marker)
                        cv2.circle(frame, (marker.x, marker.y), 5, 255, -1)
                       # print marker.rect
                        # print rect[]
                        x, y, w, h = cv2.boundingRect(marker.best_cnt)
                        cv2.rectangle(
                            frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
                        cv2.putText(frame, 'altitude %s ' % marker.z, (
                            20, 30), cv2.FONT_HERSHEY_PLAIN, 2, (0, 255, 0))
                        cv2.putText(frame, 'X: %s Y: %s ' %
                                    (str(marker.x), str(marker.y)),
                                     (20, 50),
                                     cv2.FONT_HERSHEY_PLAIN,
                                     2,
                                     (0, 255, 0))
                    else:
                        self.cam_altitude.append(None)
                        self.observations.append(None)
                i += 1
                #time.sleep(0.3)
                self.writer.write(frame)
                cv2.imshow('drone eye', frame)
                cv2.waitKey(5)
        except:
            # draw estimates
            pickle.dump(self.cam_altitude, open('cam_alt.dump', 'wb'))
            pickle.dump(
                self.observations,
                open('marker_observations5.dump', 'wb'))
            # pl.figure(size=(320,240))
            x = [o.x for o in self.observations if o]
            y = [o.y for o in self.observations if o]
            # obs_scatter = pl.scatter(x, y, marker='x', color='b',
            #             label='observations')

            position_line = pl.plot(x, y,
                                    linestyle='-', marker='o', color='r',
                                    label='position est.')
            #lines_true = pl.plot(self.cam_altitude, color='b')
           # observations = pl.plot(x, color='b')
            # lines_filt = pl.plot(filtered_state_means, color='r')
            # pl.legend((lines_true[0]), ('Camera altitude'))
            pl.show()
Example #31
0
    print("Select other by --gpu option: CPU")
    print("-----------------------------------------------------")
    print("Index file with names: cat_to_name.json (default)")
    print("Select other by --indexfile option")
    print("-----------------------------------------------------")

    active_dir = os.getcwd()

    parameter_in = get_input_predict_args()
    filepath = parameter_in.filepath
    load_directory = parameter_in.loaddir
    modelname = parameter_in.modelname
    topk = parameter_in.topk
    cat_names = parameter_in.c_n
    dev = parameter_in.dev

    print(parameter_in)
    print(active_dir)

    p_object = prediction_model()
    p_object.load_model(dev, load_directory, modelname)

    np_test_img = ImageProcessing(filepath)
    np_image = np_test_img.process_image()
    top_p, top_class = p_object.predict(np_image, dev, topk)

    print(top_p)
    print(top_class)

    p_object.show_me_probs(top_p, top_class, cat_names)
Example #32
0
def test_image_processing_prerequisite_no_people():
    ip_obj = ImageProcessing(yolo_path="../YOLO")
    ip_obj.load_file("../static-images/no_people.jpg")
    ip_obj.preprocess_image()
    assert ip_obj.people_count == 0
Example #33
0
# Test
import cv2

from image_processing import ImageProcessing, bilateral_filter
from image_processing import convert_color, midpoint, blur, median_blur
from image_processing import info, show, contrast, threshold, draw_contour

image = ImageProcessing(r'images_heap\sym.png')
image.read()

info(image.img)
show(image.img)

# contrast
show(contrast(image.img, 3, 100))

# convert color
img = convert_color(image.img)

# threshold image
thresh = threshold(img, 55, cv2.THRESH_BINARY_INV)
show(thresh)

# midpoint on the original image
show(midpoint(img, 5))

# midpoint on the threshold image
show(midpoint(thresh, 3))

# blur on the threshold image
show(blur(thresh, 5))
#!/usr/bin/env python
from flask import Flask, render_template, Response
from camera import Camera
import cv2

from image_processing import ImageProcessing

ip = ImageProcessing()

app = Flask(__name__)


@app.route('/')
def index():
    return render_template('webcam.html')


def gen(camera):
    while True:
        frame = camera.get_frame()
        image_with_boxes = ip.object_detection(frame)
        retval, buffer = cv2.imencode('.jpg', image_with_boxes)
        frame = buffer.tobytes()
        #response = make_response(buffer.tobytes())
        #response.headers['Content-Type'] = 'image/jpeg'
        yield (b'--frame\r\n'
               b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')


@app.route('/video_feed')
def video_feed():
Example #35
0
    if not os.path.exists(newpath):
        os.makedirs(newpath)
    imagefile = datafolder + filename + '.png'
    textfile = open(datafolder + filename + '.txt', 'a')

    file_operations.save_to_folder(textfile, imagefile, bounding_box, final)


# todo get classes through GUI
classes = []

#interface = CLI()
file_operations = FileOperations()
motor = MotorControl()
camera = ImageCapture(RPI_CAMERA)
image_processor = ImageProcessing(camera.capture())

delay = 1 / 1000.0
#images = input("How many images do you want per category (5 categories)?")
images = 10000
STEPS_FOR_FULL_CIRCLE = 12360
steps = int(STEPS_FOR_FULL_CIRCLE / images)
classes = ["Banana"]  #, "Rolle"]

only_snippets = False
only_train_images = True

## Section for the configuration
# Make images for every class
for label in classes:
    if only_train_images:
Example #36
0
class Uatu:
    """
    Uatu - he who watches
    """
    def __init__(self, config_file_path):
        """
        __init__ - self
        """
        LOGGER.info("Initializing UATU.")
        self.config_file = config_file_path
        self.cfg_organizer = ConfigOrganizer(config_file=self.config_file)
        self.cfg_organizer.read_config_data()
        self.acq_obj = Acquisition()
        self.current_max_count()
        LOGGER.info("Completed initialization.")
        self.img_processing = ImageProcessing(
            yolo_path=self.cfg_organizer.config_handler['system']['yolo_dir'])

    def __repr__(self):
        return "string"

    def current_max_count(self):
        """
        current_max_count - returns the current maximum count for the cameras
        """
        namelist = ['name', 'timestamp', 'count']

        df = pd.read_csv(
            self.cfg_organizer.config_handler['system']['csv_location'],
            index_col=False,
            names=namelist)

        df2 = df.fillna(0)
        camera_names = df['name'].unique()
        df2.sort_values(by=['name', 'count'], inplace=True)
        series = df2.groupby('name')['count'].max()
        self.stored_values = series.to_dict()

    def producer_images(self, pqueue, cqueue, lock):
        while True:
            if not pqueue.empty():
                camera_name = pqueue.get()
                image_name = '/tmp/{}-image.jpg'.format(camera_name)
                try:
                    self.acq_obj.retrieve(
                        self.cfg_organizer.config_handler[camera_name]['url'],
                        image_name)
                    with lock:
                        LOGGER.debug(f'retrieved image: {camera_name}')
                    cqueue.put((camera_name, image_name))
                except Exception as e:
                    with lock:
                        LOGGER.debug(f'exception: {e}')
        # pqueue.task_done()

    def consumer_process_image(self, cqueue, lock):
        counter = 1
        while True:
            if cqueue.empty():
                continue
            camera_name, image_name = cqueue.get()
            with lock:
                LOGGER.debug(f'processing camera: {camera_name}')
            try:
                with lock:
                    self.img_processing.load_file(
                        f'/tmp/{camera_name}-image.jpg')
            except IOError:
                with lock:
                    LOGGER.debug(f'yup - io error - skipping {camera_name}')
                return
            with lock:
                self.img_processing.preprocess_image()
            with lock:
                self.img_processing.process_bounding_boxes()
            processed_image = "/tmp/{}-{}-{}.jpg".format(
                camera_name, time.time(), self.img_processing.people_count)
            with lock:
                self.img_processing.output_adjusted_image(
                    '/tmp/what-{}.jpg'.format(counter))
            with lock:
                print("{},{},{},{}".format(camera_name, time.time(),
                                           self.img_processing.people_count,
                                           processed_image))
            if int(self.img_processing.people_count) > int(
                    self.stored_values[camera_name]):
                self.img_processing.output_adjusted_image(processed_image)

    def doit(self):
        self.current_max_count()

        pqueue = Queue(maxsize=0)
        cqueue = Queue(maxsize=0)

        lock = Lock()

        for i in range(NUM_WORKER_THREADS):
            pworker = threading.Thread(target=self.producer_images,
                                       args=(pqueue, cqueue, lock))
            pworker.daemon = True
            pworker.start()
            LOGGER.debug('initiated pworkers')
            cworker = threading.Thread(target=self.consumer_process_image,
                                       args=(cqueue, lock))
            cworker.daemon = True
            cworker.start()
            LOGGER.debug('initiated workers')

        cameras = self.cfg_organizer.find_cameras()
        for camera_name in cameras:
            LOGGER.debug(f'Loading {camera_name} into pqueue')
            pqueue.put(camera_name)

        cworker.join()

    def debug_dump(self):
        LOGGER.debug('Building camera information')
        for camera in self.cfg_organizer.find_cameras():
            LOGGER.debug(f'{camera}')
            LOGGER.debug('\t{}'.format(
                self.cfg_organizer.config_handler[camera]['url']))