def draw_input_image(self, idx1=None, idx2=None, new_image=False): """ Draws input image, with additional box visualizations if a node is selected - idx1: idx of object1 (subject) - idx2: idx of object2 (object) """ image = np.copy(self.image) if idx1 is not None and idx2 is None: # its an object node image = eval_utils.draw_image_box(image, self.boxes[idx1].cpu().numpy()) if idx1 is not None and idx2 is not None: # its a predicate node image = eval_utils.draw_image_edge(image, self.boxes[idx1].cpu().numpy(), self.boxes[idx2].cpu().numpy()) image = eval_utils.draw_image_box(image, self.boxes[idx1].cpu().numpy()) image = eval_utils.draw_image_box(image, self.boxes[idx2].cpu().numpy()) image = QtGui.QImage(image, image.shape[1], \ image.shape[0], QtGui.QImage.Format_RGB888) self.pixmap = QtGui.QPixmap(image) self.imb.setPixmap(self.pixmap.scaled(200, 200)) if new_image: self.ima.setVisible(0) self.imLoadCounter += 1
def run(self): print('running thread') self.infLabel.emit('Video loaded. Analyzing...') vidcap = cv2.VideoCapture(self.file) success,image = vidcap.read() while success: output = image.copy() output = imutils.resize(output, width=400) truePFR,trueFT = self.model.classify(image) print(truePFR,trueFT) truePFR,trueFT = self.rollAverage(truePFR,trueFT) pfrtext = "PFR : {pfr}".format(pfr =truePFR) fttext = "Fuel Type : {ft}".format(ft=trueFT) cv2.putText(output, pfrtext, (3, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0, 255, 0), 2) cv2.putText(output, fttext, (3, 40), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0, 255, 0), 2) #h, w, ch = output.shape #bytesPerLine = ch * w #p = QImage(image, w, h, bytesPerLine, QImage.Format_RGB888) output = QtGui.QImage(output.data, output.shape[1], output.shape[0], QtGui.QImage.Format_RGB888).rgbSwapped() #p = convertToQtFormat.scaled(640, 480, Qt.KeepAspectRatio) self.changePixmap.emit(output) self.trgLabel.emit(pfrtext+'\n'+fttext) self.infLabel.emit('Running pedictions on each frame ...') success,image = vidcap.read() print('Done') self.infLabel.emit('Finished! Click "Load File" to analyze again.')
def paintEvent(self, event): if hasattr(self, "_update_dpi") and self._update_dpi(): return # matplotlib#19123 (<3.4). # We always repaint the full canvas (doing otherwise would require an # additional copy of the buffer into a contiguous block, so it's not # clear it would be faster). buf = _util.cairo_to_premultiplied_argb32( self.get_renderer()._get_buffer()) height, width, _ = buf.shape # The image buffer is not necessarily contiguous, but the padding # in the ARGB32 case (each scanline is 32-bit aligned) happens to # match what QImage requires; in the RGBA128F case the documented Qt # requirement does not seem necessary? if QtGui.__name__.startswith("PyQt6"): from PyQt6 import sip ptr = sip.voidptr(buf) else: ptr = buf qimage = QtGui.QImage(ptr, width, height, QtGui.QImage.Format(6)) # ARGB32_Premultiplied getattr(qimage, "setDevicePixelRatio", lambda _: None)(self.device_pixel_ratio) # https://bugreports.qt.io/browse/PYSIDE-140 if (QtCore.__name__.startswith("PySide") and QtCore.__version_info__ < (5, 12)): ctypes.c_long.from_address(id(buf)).value -= 1 painter = QtGui.QPainter(self) painter.eraseRect(self.rect()) painter.drawImage(0, 0, qimage) self._draw_rect_callback(painter) painter.end()
def paintEvent(self, event: Any) -> None: """Pull the contents of the panda texture to the widget. """ # Get raw image and convert it to Qt format. # Note that `QImage` apparently does not manage the lifetime of the # input data buffer, so it is necessary to keep it is local scope. data = self.get_screenshot('RGBA', raw=True) img = QtGui.QImage(data, *self._app.buff.getSize(), QtGui.QImage.Format_RGBA8888).mirrored() # Render image on Qt widget self.paint_surface.begin(self) self.paint_surface.drawImage(0, 0, img) self.paint_surface.end()
def analyse(self): print('Analyzing data..') try: img = cv2.imread(self.file) self.output = img.copy() self.output = imutils.resize(self.output, width=400) except Exception as e: print('Error in reading Image: ',e) truePFR,trueFT = self.model.classify(img) # Image write pfrtext = "PFR : {pfr}".format(pfr =truePFR) fttext = "Fuel Type : {ft}".format(ft=trueFT) cv2.putText(self.output, pfrtext, (3, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0, 255, 0), 2) cv2.putText(self.output, fttext, (3, 40), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0, 255, 0), 2) self.output = QtGui.QImage(self.output.data, self.output.shape[1], self.output.shape[0], QtGui.QImage.Format_RGB888).rgbSwapped() self.label.setPixmap(QtGui.QPixmap.fromImage(self.output))
def run(self): print('running thread Image') print('Analyzing data..') self.infLabel.emit('Image loaded. Analyzing...') try: img = cv2.imread(self.file) output = img.copy() output = imutils.resize(output, width=400) except Exception as e: print('Error in reading Image: ',e) truePFR,trueFT = self.model.classify(img) # Image write pfrtext = "PFR : {pfr}".format(pfr =truePFR) fttext = "Fuel Type : {ft}".format(ft=trueFT) cv2.putText(output, pfrtext, (3, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0, 255, 0), 2) cv2.putText(output, fttext, (3, 40), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0, 255, 0), 2) output = QtGui.QImage(output.data, output.shape[1],output.shape[0], QtGui.QImage.Format_RGB888).rgbSwapped() self.trgLabel.emit(pfrtext+'\n'+fttext) self.changePixmapImage.emit(output) self.infLabel.emit('Finished! Click "Load File" to analyze again.')
def gen_image(self): """ Generates an image, as indicated by the modified graph """ if self.new_triples is not None: triples_ = self.new_triples else: triples_ = self.triples query_feats = None model_out = self.model( self.new_objs, triples_, None, boxes_gt=self.boxes, masks_gt=None, src_image=self.imgs_in, mode=self.mode, query_feats=query_feats, keep_box_idx=self.keep_box_idx, keep_feat_idx=self.keep_feat_idx, combine_gt_pred_box_idx=self.combine_gt_pred_box_idx, keep_image_idx=self.keep_image_idx, random_feats=args.random_feats, get_layout_boxes=True) imgs_pred, boxes_pred, masks_pred, noised_srcs, _, layout_boxes = model_out image = imagenet_deprocess_batch(imgs_pred) image = image[0].detach().numpy().transpose(1, 2, 0).copy() if args.update_input: self.image = image.copy() image = QtGui.QImage(image, image.shape[1], image.shape[0], QtGui.QImage.Format_RGB888) im_pm = QtGui.QPixmap(image) self.ima.setPixmap(im_pm.scaled(200, 200)) self.ima.setVisible(1) self.imCounter += 1 if args.update_input: # reset everything so that the predicted image is now the input image for the next step self.imgs = imgs_pred.detach().clone() self.imgs_in = torch.cat( [self.imgs, torch.zeros_like(self.imgs[:, 0:1, :, :])], 1) self.draw_input_image() self.boxes = layout_boxes.detach().clone() self.keep_box_idx = torch.ones_like(self.objs.unsqueeze(1), dtype=torch.float) self.keep_feat_idx = torch.ones_like(self.objs.unsqueeze(1), dtype=torch.float) self.keep_image_idx = torch.ones_like(self.objs.unsqueeze(1), dtype=torch.float) self.combine_gt_pred_box_idx = torch.zeros_like(self.objs) else: # input image is still the original one - don't reset anything # if an object is added for the first time, the GT/input box is still a dummy (set in add_triple) # in this case, we update the GT/input box, using the box predicted from SGN, # so that it can be used in future changes that rely on the GT/input box, e.g. replacement self.boxes = self.added_objs_idx * layout_boxes.detach().clone( ) + (1 - self.added_objs_idx) * self.boxes self.added_objs_idx = torch.zeros_like(self.objs.unsqueeze(1), dtype=torch.float)