def __init__(self): super().__init__() self.image = None print_log("Start", "debug") self.initUI() print_log("End", "debug")
def batch(self, image): print_log("Start") batch = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ])(image).unsqueeze(0) self._batch = batch print_log("End")
def get_classes(self): sum = self.predictions.shape[0] * self.predictions.shape[1] classes, counts = np.unique(self.predictions, return_counts=True) classes = [Label(_class).name for _class in classes] percentages = [count / sum * 100 for count in counts] print_log("There are {} classes.".format(len(classes))) for _class, percentage in zip(classes, percentages): print_log("'{}' ({:.2f}%)".format(_class, percentage)) return classes, percentages
def to_cuda(self): if self.cuda: print_log("using cuda") self.batch.to('cuda') self.model.to('cuda') else: print_log("using cpu") self.batch.to('cpu') self.model.to('cpu') return self.cuda
def show_image(self, img): print_log("Start") qimage = None if isinstance(img, str): qimage = QImage(img) elif isinstance(img, np.ndarray): qimage = q2n.array2qimage(img, normalize=False) pixmap = QPixmap.fromImage(qimage) im_w, im_h = pixmap.size().width(), pixmap.size().height() if im_w > im_h: pixmap = pixmap.scaledToWidth(self.label_min) else: pixmap = pixmap.scaledToHeight(self.label_min) self.imageLabel.setPixmap(pixmap) self.imageLabel.setAlignment(Qt.AlignCenter) self.image = cv2.imread(img) cv2.cvtColor(self.image, cv2.COLOR_BGR2RGB) print_log("End")
def detect_image(self): print_log("Start") self.set_status("Detecting...") if self.image is None: print_log("Image not loaded!", "warn") self.set_status("You must load an image.") self.set_status("Detecting finished.") print_log("End")
def model(self, model_name="deeplabv3_resnet101"): print_log("Start") print_log("model: {}".format(model_name)) _model = torch.hub.load('pytorch/vision:v0.6.0', model_name, pretrained=True) _model.eval() print_log("End") self._model = _model
def inference(self): print_log("Start") start_t = time.time() with torch.no_grad(): self._output = self.model(self.batch)['out'][0] end_t = time.time() t = end_t - start_t print_log("Inference time: {}".format( time.strftime("%Mm %S.%fs", time.gmtime(t))[:-3])) self._predictions = self.output.argmax(0).byte().cpu().numpy() print_log("End")
def image(self, image): print_log("Start") print_log("Type: {}".format(type(image))) if isinstance(image, str): image = cv2.imread(image) self._image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) elif isinstance(image, np.ndarray): self._image = image elif isinstance(image, Image.Image): self._image = np.asarray(image) print_log("Shape: {}".format(self._image.shape))
def get_prediction(self, label: str): print_log("class: {}".format(label)) prediction = np.where(self.predictions == Label[label.upper()].value, 1, 0) return prediction
def initUI(self): print_log("Start", "debug") self.setFixedSize(WIDTH, HEIGHT) self.setWindowTitle('Scroll Area Demonstration') # Widgets self.imageLabel = QLabel("ImageLabel") self.imageLabel.setBackgroundRole(QPalette.Dark) self.imageLabel = QLabel("ImageLabel") label_w, label_h = self.imageLabel.size().width( ), self.imageLabel.size().height() self.label_min = min(label_w, label_h) self.imageLabel.resize(self.label_min, self.label_min) print_log("Resize imageLabel") self.show_image(os.path.join(IMG_DIR, "sample2.jpg")) self.set_status("HI") self.openBtn = QPushButton("Open") self.openBtn.clicked.connect(self.open_image) self.detectBtn = QPushButton("Detect") self.detectBtn.clicked.connect(self.detect_image) self.compareBtn = QPushButton("Compare") self.viewWideBtn = QPushButton("View Wide") self.saveBtn = QPushButton("Save") self.scrollArea = QScrollArea() # Layouts self.btnGroup = QGridLayout() self.btnGroup.addWidget(self.openBtn, 0, 0) self.btnGroup.addWidget(self.detectBtn, 0, 1) self.btnGroup.addWidget(self.compareBtn, 0, 2) self.btnGroup.addWidget(self.viewWideBtn, 0, 3) self.btnGroup.addWidget(self.saveBtn, 0, 4) self.viewLayout = QVBoxLayout() self.layout = QVBoxLayout() self.layout.addWidget(self.imageLabel) self.layout.addLayout(self.btnGroup) self.layout.addWidget(self.scrollArea) # Shortcuts self.openFile = QShortcut(QKeySequence("Ctrl+O"), self) self.openFile.activated.connect(self.open_image) centralWidget = QWidget() centralWidget.setLayout(self.layout) self.setCentralWidget(centralWidget) print_log("End", "debug")
def open_image(self): print_log("Start") self.set_status("Select image...") fname = QFileDialog.getOpenFileName( self, 'Open image', str(Path.home()), "Image files (*.jpg *.jpeg *.gif *.png)") if fname[0]: print_log("File path is '{}'".format(fname[0])) try: kind = filetype.guess(fname[0]) print_log("Mime type: '{}'".format(kind.mime)) if kind.mime.split("/")[0] == "image": self.show_image(fname[0]) self.set_status("Opened.") else: print_log("Not Supported File") self.set_status("Not Supported file..") except FileNotFoundError as e: print_log("File Not Found!") self.set_status("File not found!") else: print_log("File path is not defined.") self.set_status("File not selected.") print_log("Finish")