Пример #1
0
    def __init__(self, video_location, start_fnum=0, stop_fnum=0):
        self.capture = cv2.VideoCapture(video_location)
        self.capture.set(cv2.CAP_PROP_POS_FRAMES, start_fnum)
        self.window_name = "Damage TM Parameter Analyzer"

        self.start_fnum = start_fnum
        self.stop_fnum = stop_fnum
        if stop_fnum == 0:
            self.stop_fnum = int(self.capture.get(cv2.CAP_PROP_FRAME_COUNT))

        cv2.namedWindow(self.window_name)
        cv2.createTrackbar("Step Size", self.window_name, 1, 100,
                           self.on_step_trackbar)
        cv2.createTrackbar("Delay", self.window_name, 10, 500,
                           self.on_delay_trackbar)
        cv2.createTrackbar("TM ~ Off, On", self.window_name, 0, 1,
                           self.on_tm_trackbar)

        self.step_size = 1
        self.step_delay = 10
        self.tm_flag = False

        # Read all ten of the damage integer images and extract a binary mask
        # based off of the alpha channel. Also, resize to a 360p height.
        self.orig_num_img, self.orig_num_mask = [None] * 10, [None] * 10
        self.num_img, self.num_mask = [None] * 10, [None] * 10
        for i in range(0, 10):
            self.orig_num_img[i], self.orig_num_mask[
                i] = util.get_image_and_mask("resources/{:}.png".format(i),
                                             gray_flag=True)
            self.num_img[i] = util.resize_img(self.orig_num_img[i], 360 / 480)
            self.num_mask[i] = util.resize_img(self.orig_num_mask[i],
                                               360 / 480)
        self.num_h, self.num_w = self.num_img[0].shape[:2]
Пример #2
0
    def __init__(self,
                 capture,
                 frame_range=None,
                 gray_flag=True,
                 save_flag=False,
                 show_flag=False,
                 wait_flag=False):

        self.capture = capture
        self.gray_flag = gray_flag
        self.save_flag = save_flag
        self.show_flag = show_flag

        # Predetermined parameters that have been tested to work best.
        self.calib_w_range = (24, 30)  # The possible template width values.
        self.conf_thresh = 0.8  # The cv2 Template Matching conf thresh.
        self.min_match_length_s = 30  # Minimum time of a "match" in seconds.
        self.num_init_frames = 30  # # of frames to init. template size.
        self.num_port_frames = 20  # # of frames to find port each match.
        self.prec_step_size = 2  # Fnum step size during precision sweep.
        self.max_prec_tl_gap_size = 4  # Max size of precise t.l. gaps to fill.
        self.max_tl_gap_size = 4  # Max size of timeline gaps to fill.
        self.roi_y_tolerance = 3  # The size to expand the ROI y-dimensons.
        self.step_size = 60  # Frame number step size during sweep.
        self.template_zero_radius = 2  # Size of match_mat subregion to zero.

        # Paramaters that are redefined later on during initialization.
        self.template_roi = None  # A bounding box to search for templates.

        # Set the start/stop frame to the full video if frame_range undefined.
        if frame_range:
            self.start_fnum, self.stop_fnum = frame_range
        else:
            self.start_fnum = 0
            self.stop_fnum = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))

        # Set the wait_length for cv2.waitKey. 0 represents waiting, 1 = 1ms.
        if wait_flag:
            self.wait_length = 0
        else:
            self.wait_length = 1

        # Read the percentage sign image file and extract a binary mask based
        # off of the alpha channel. Also, resize to the 360p base height.
        self.orig_pct_img, self.orig_pct_mask = util.get_image_and_mask(
            "resources/pct.png", gray_flag)
        self.pct_img = util.resize_img(self.orig_pct_img, 360 / 480)
        self.pct_mask = util.resize_img(self.orig_pct_mask, 360 / 480)
def download(url, file, size=0):
    tries = 0
    while tries < 10:
        try:
            response = requests.get(url, stream=True, timeout=3)
            if response.status_code == 200:
                if size > 0:
                    img = np.asarray(bytearray(response.content),
                                     dtype=np.uint8)
                    img = cv2.imdecode(img, cv2.IMREAD_COLOR)
                    img = resize_img(img, size, 255)
                    cv2.imwrite(file.replace('.png', '.jpg'), img)
                else:
                    with open(file, 'wb') as f:
                        f.write(response.content)
            lock.release()
            return
        except:
            tries += 1
            if tries >= 10:
                print("Failed to Download Image {}".format(file), flush=True)
                lock.release()
                return
            # print("Retry to Download Image {}".format(file), flush=True)
            continue
    lock.release()
Пример #4
0
    def __getitem__(self, idx):
        '''
        Params:
            idx (integer): the index of the image we want to look at
        Returns:
            x (array): the transformed numpy array representing the image we want
            y (list): list of labels associated with the image
        '''

        # get prediction labels
        y = list(self.df.iloc[idx][list(self.label_cols)])
        if self.mode == "CheXpert":
            y = torch.tensor(y)

        # get images
        path = CHEXPERT_DIR / self.df.iloc[idx]["Path"]
        x = cv2.imread(str(path), 0)

        # tranform images
        x = util.resize_img(x, self.resize_shape)
        x = Image.fromarray(x).convert('RGB')
        if self.data_transform is not None:
            x = self.data_transform(x)

        return x, y
Пример #5
0
    def __getitem__(self, index):
        img_path = self.img_list[index % len(self.img_list)]
        img = cv2.imread(img_path)

        #transform to torchtensor
        _, img_tensor = util.resize_img(img, self.net_h, self.net_w)
        img_tensor = img_tensor[0]
        #label load
        label_path = self.label_list[index % len(self.label_list)]
        label_np = np.loadtxt(label_path).reshape(-1, 5)
        label = torch.from_numpy(label_np)
        height, width = img.shape[0], img.shape[1]

        if len(label) == 0:
            return None, None, None

        #transform to (x,y,w,h) in resized image
        scaling = min(self.net_h / height, self.net_w / width)
        label[:, 1] *= width
        label[:, 1] *= scaling
        label[:, 1] += (self.net_w - width * scaling) / 2
        label[:, 1] /= self.net_w

        label[:, 2] *= height
        label[:, 2] *= scaling
        label[:, 2] += (self.net_h - height * scaling) / 2
        label[:, 2] /= self.net_h

        label[:, 3] *= width * scaling
        label[:, 3] /= self.net_w

        label[:, 4] *= height * scaling
        label[:, 4] /= self.net_h

        if self.augment:
            if np.random.random() < 0.5:
                img, label = horisontal_flip(img_tensor, label)

        return img_path, img_tensor, label
def load_feature(img_path):
    img = util.cv_imread(img_path)
    norm_img = img / 255.
    resized_img = util.resize_img(norm_img, size)
    crop = cv2.resize(resized_img, (size, size))
    return crop
# -*- coding: utf-8 -*-
"""
Created on Thu Jun  6 11:26:49 2019

@author: hasee
"""

import  yolov3
import util
import torch
import cv2

num_classes=1 
confidence=0.3
nms_theshold=0.4

classes=util.load_classes('data/animeface.names')
model=yolov3.yolov3_darknet('cfg/animeface.cfg')
#model.load_weights('weights/yolov3.weights')
model.load_state_dict(torch.load('model_state_dict.pt'))
img=cv2.imread('samples/test2.png')

net_h,net_w=int(model.net_info['height']),int(model.net_info['width'])

new_img,img_tensor=util.resize_img(img,net_h,net_w)
_,prediction=model(img_tensor,torch.cuda.is_available())
prediction=util.write_results(prediction,confidence,num_classes)

write_img=util.writebox(img,model,prediction,classes)
cv2.imwrite('test.png',write_img)
Пример #8
0
def get_direction(dists):
    # right now there is no laser scanner for true dists so
    # obstacle avoidance is very basic
    return np.argmin(dists)


ts = time.time()
time_str = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d_%H-%M-%S')
img_dir = f'img/{time_str}'
util.mkdir_if_missing(img_dir)
count = 0
while rval:
    board = np.zeros((2 * height, width, 3), dtype=np.uint8)
    rval, raw_image = vc.read()
    raw_image = util.resize_img(raw_image)
    image = util.prep_image_for_model(raw_image)
    depth = inference_model.inference_depth(image, sess)
    depth = np.squeeze(depth)
    norm_depth = util.normalize_depth(depth)
    depth_rgb = util.depth_to_rgb(depth)
    depth_rgb *= 255
    depth_rgb = depth_rgb.astype(np.uint8)
    dists = get_dists(norm_depth)
    direction = get_direction(dists)
    raw_image = cv2.cvtColor(raw_image, cv2.COLOR_RGB2BGR)
    depth_bgr = cv2.cvtColor(depth_rgb, cv2.COLOR_RGB2BGR)
    board[:height, :, :] = raw_image
    board[height:, :, :] = depth_bgr
    board = draw_regions(board)
    board = draw_direction(board, direction)
Пример #9
0
def load_feature(img_path):
    img = util.cv_imread(img_path)
    norm_img = img / 255.
    resized_img = util.resize_img(norm_img, config.INPUT_SIZE)
    crop = cv2.resize(resized_img, (config.INPUT_SIZE, config.INPUT_SIZE))
    return crop