def _setup_image(model_input, batch_id=None): # Handle torch Variable instances if batch_id is None: if isinstance(model_input, torch.autograd.Variable): img = model_input.data else: img = model_input else: if isinstance(model_input, torch.autograd.Variable): img = model_input.data[batch_id] else: img = model_input[batch_id] # Copy to CPU if needed if isinstance(img, torch.cuda.FloatTensor): img = img.cpu() # NumPy-ify and change from CHW to HWC img = img.numpy().transpose((1, 2, 0)) # Undo image normalization img = img * (-255) + 255 if img.shape[2] == 1: # matplotlib plots grayscale images correctly only if you get rid of channel dimension img = img[:, :, 0] cmap = plt.cm.gray else: # OpenCV images are BGR whereas matplotlib assumes RGB img = cv2.convertColor(img, cv.COLOR_BGR2RGB) cmap = None # fallback to default return img, cmap
def grab_frame(dev, width, height, col_from, col_to): valid, raw = dev.read() img = resize(convertColor(raw, COLOR_BGR2GRAY).astype(int16), (width, height), interpolation=CV_INTER_NN)[:, col_from:col_to] return img
def grab_first(dev, res): valid, raw = dev.read() height, width, depth = raw.shape new_height = res new_width = int( float(new_height*width)/float(height) ) col_from = (new_width - res)//2 col_to = col_from + res img = resize(convertColor(raw, COLOR_BGR2GRAY).astype(int16), (new_width, new_height), interpolation=CV_INTER_NN)[:, col_from:col_to] return img, new_width, new_height, col_from, col_to
def grab_first(dev, res): valid, raw = dev.read() height, width, depth = raw.shape new_height = res new_width = int(float(new_height * width) / float(height)) col_from = (new_width - res) // 2 col_to = col_from + res img = resize(convertColor(raw, COLOR_BGR2GRAY).astype(int16), (new_width, new_height), interpolation=CV_INTER_NN)[:, col_from:col_to] return img, new_width, new_height, col_from, col_to
def run(fps, vid_src, num_frames, width, height, dtype): vid_src.set(cv2.CAP_PROP_FPS, fps) stats = [] ref_frame = dtype(128)*numpy.ones((height, width), dtype=dtype) curr_frame = numpy.zeros((height, width), dtype=dtype) abs_diff = numpy.zeros((height, width), dtype=dtype) diff = numpy.zeros((height, width), dtype=dtype) valid = False for i in range(num_frames): curr_frame[:] = convertColor(vid_src.read()[1], COLOR_BGR2GRAY).astype(dtype) cv2.imshow ("", convertColor(curr_frame.astype(numpy.uint8), COLOR_GRAY2RGB)) if cv2.waitKey(1) & 0xFF == ord('q'):#\ break diff = curr_frame - ref_frame ref_frame[:] = curr_frame abs_diff = numpy.abs(diff).astype(dtype) stats.append(get_stats(abs_diff)) return stats
def grab_frame(self): #~ start_time = time.time() if self._is_virtual_cam: valid_frame, self._curr_frame[:] = self._video_source.read( self._ref_frame) return True else: if self._raw_frame is None or self._scale_changed: valid_frame, self._raw_frame = self._video_source.read() else: valid_frame, self._raw_frame[:] = self._video_source.read() #~ end_time = time.time() #~ print("Time to capture frame = ", end_time - start_time) if not valid_frame: return False #~ start_time = time.time() if self._gray_frame is None or self._scale_changed: self._gray_frame = cv2.convertColor( self._raw_frame, cv2.COLOR_BGR2GRAY).astype(int16) else: self._gray_frame[:] = cv2.convertColor(self._raw_frame, cv2.COLOR_BGR2GRAY) #~ end_time = time.time() #~ print("Time to convert to grayscale = ", end_time - start_time) #~ start_time = time.time() if self._get_sizes or self._scale_changed: self._get_sizes = False self._scale_changed = False self._img_height, self._img_width = self._gray_frame.shape self._img_ratio = float(self._img_width) / float( self._img_height) self._img_scaled_width = int( float(self._out_res) * self._img_ratio) if self._scale_img: diff = self._img_scaled_width - self._out_res self._img_width_crop_l = diff // 2 self._img_width_crop_r = self._img_width_crop_l + self._out_res else: diff = self._img_width - self._out_res self._img_width_crop_l = diff // 2 self._img_width_crop_r = self._img_width_crop_l + self._out_res diff = self._img_height - self._out_res self._img_height_crop_u = diff // 2 self._img_height_crop_b = self._img_height_crop_u + self._out_res self._tmp_frame = np.zeros( (self._out_res, self._img_scaled_width)) #~ end_time = time.time() #~ print("Time to calculate sizes = ", end_time - start_time) #~ start_time = time.time() if self._scale_img: self._tmp_frame[:] = cv2.resize( self._gray_frame, (self._img_scaled_width, self._out_res), interpolation=cv2.INTER_NN) self._curr_frame[:] = self._tmp_frame[:, self._img_width_crop_l: self._img_width_crop_r] else: self._curr_frame[:] = self._gray_frame[ self._img_height_crop_u:self._img_height_crop_b, self._img_width_crop_l:self._img_width_crop_r] #~ end_time = time.time() #~ print("Time to scale frame = ", end_time - start_time) return True
def grab_frame(self): #~ start_time = time.time() if self._is_virtual_cam: valid_frame, self._curr_frame[:] = self._video_source.read(self._ref_frame) return True else: if self._raw_frame is None or self._scale_changed: valid_frame, self._raw_frame = self._video_source.read() else: valid_frame, self._raw_frame[:] = self._video_source.read() #~ end_time = time.time() #~ print("Time to capture frame = ", end_time - start_time) if not valid_frame: return False #~ start_time = time.time() if self._gray_frame is None or self._scale_changed: self._gray_frame = cv2.convertColor(self._raw_frame, cv2.COLOR_BGR2GRAY).astype(int16) else: self._gray_frame[:] = cv2.convertColor(self._raw_frame, cv2.COLOR_BGR2GRAY) #~ end_time = time.time() #~ print("Time to convert to grayscale = ", end_time - start_time) #~ start_time = time.time() if self._get_sizes or self._scale_changed: self._get_sizes = False self._scale_changed = False self._img_height, self._img_width = self._gray_frame.shape self._img_ratio = float(self._img_width)/float(self._img_height) self._img_scaled_width = int(float(self._out_res)*self._img_ratio) if self._scale_img: diff = self._img_scaled_width - self._out_res self._img_width_crop_l = diff//2 self._img_width_crop_r = self._img_width_crop_l + self._out_res else: diff = self._img_width - self._out_res self._img_width_crop_l = diff//2 self._img_width_crop_r = self._img_width_crop_l + self._out_res diff = self._img_height - self._out_res self._img_height_crop_u = diff//2 self._img_height_crop_b = self._img_height_crop_u + self._out_res self._tmp_frame = np.zeros((self._out_res, self._img_scaled_width)) #~ end_time = time.time() #~ print("Time to calculate sizes = ", end_time - start_time) #~ start_time = time.time() if self._scale_img: self._tmp_frame[:] = cv2.resize(self._gray_frame, (self._img_scaled_width, self._out_res), interpolation=cv2.INTER_NN) self._curr_frame[:] = self._tmp_frame[:, self._img_width_crop_l: self._img_width_crop_r] else: self._curr_frame[:] = self._gray_frame[self._img_height_crop_u: self._img_height_crop_b, self._img_width_crop_l: self._img_width_crop_r] #~ end_time = time.time() #~ print("Time to scale frame = ", end_time - start_time) return True