コード例 #1
0
 def get_image2_luminance(self):
     if self.image_2:
         if not self.image_2_luminance:
             start_time = time.time()
             self.image_2_luminance = ImageConverter.luminance_qimage(self.image_2)
             klog("La conversione in luminanza ha impiegato: %.2f secondi" % (time.time()-start_time))
         return self.image_2_luminance
     else:
         return None
コード例 #2
0
    def _find_frame_vectors(self):
        if self.get_image1_luminance() and self.get_image2_luminance():

            klog("Evaluating image with size: %dx%d" %(self.get_image1_luminance().width(), self.get_image2_luminance().height() ))
            comp = ImageComparator(self.image_1_luminance)

            start_time = time.time()

            self.vectors = comp.get_motion_vectors(self.image_2_luminance, self.searcher())

            elasped_time = time.time() - start_time
            self.ui.searchElapsedTimeLabel.setText("%.2f seconds" %elasped_time)

            self._draw_compressed_frame2()
コード例 #3
0
    def search(self, image1_pixels, x_start, y_start, image2_pixels):

        self.reset_search()

        block_size = self.block_size
        pass_step = self.pass_step

        subimage_1_pixels = ImageConverter.sub_pixels(image1_pixels, x_start, y_start, x_start+block_size, y_start+block_size)


        p = pass_step
        ds = p/2
        s = 1
        xs = x_start
        ys = y_start
        xs_min = -1
        ys_min = -1
        best_local_MAD = 10000
        best_global_MAD = 10000
        best_x = -1
        best_y = -1

        klog("Check block from %d, %d" %(x_start, y_start))

        while ds >= 1:
            for x in [xs, xs+ds, xs-ds]:
                for y in [ys, ys+ds, ys-ds]:

                    if not ImageComparator.is_valid_coordinate(x, y, block_size, image2_pixels):
                        continue

                    MAD = self.calculate_MAD(subimage_1_pixels, image2_pixels, x, y, x+block_size, y+block_size)

                    if MAD < best_local_MAD:
                        best_local_MAD = MAD
                        xs_min = x
                        ys_min = y

                    #Check if the local MAD is the best global MAD
                    if MAD < best_global_MAD:
                        best_global_MAD = MAD
                        best_x = x
                        best_y = y
            s += 1
            ds -= 1
            xs = xs_min
            ys = ys_min
        print "-"
        return best_x, best_y, best_global_MAD, self._MAD_checks_count
コード例 #4
0
    def _draw_motion_vectors_old(self):
        scene = self.ui.frame2GraphicsView.scene()
        scene.clear()
        self._draw_frame(self.image_2, self.ui.frame2GraphicsView)

        pen = QPen(Qt.red, 1, Qt.SolidLine)

        for v in self.vectors:
            x = int(v["x"])
            y = int(v["y"])
            to_x = int(v["to_x"])
            to_y = int(v["to_y"])
            MAD = v["MAD"]

            klog( "(%d, %d) => (%d, %d)" % (x,y, to_x, to_y) )

            if scene:
                if MAD < self.ui.MADThresholdSpingBox.value() and (x != to_x or y != to_y):
                    scene.addLine(x,y,to_x, to_y, pen)
コード例 #5
0
    def _analyzeVideo(self):
        if not self.video:
            print "Please first choose a video"
        else:
            dialog = QMessageBox(self)
            dialog.setText("Attendi mentre analizzo il video...")
            dialog.show()

            start_time = time.time()
            comparator = VideoComparator(self.video, self.searcher())
            holden_frames, self._discared_frames = comparator.compared_frames_statuses(self.ui.maxVectDistThreSpinBox.value(), self.ui.MADThresholdSpingBox.value())
            klog("L'analisi del video ha impiegato: %.2f secondi" % (time.time()-start_time))
            print "Holden frames: "+ str(holden_frames)
            print "Discared frames: "+ str(self._discared_frames)
            dialog.close()

            model = QAnalyzedFramesTimelineListModel(self.video, self._discared_frames)
            self.ui.analyzedFramesTimelineListView.setModel(model)
            self.ui.analyzedFramesTimelineListView.setItemDelegate(QFramesTimelineDelegate())
コード例 #6
0
    def _draw_motion_vectors(self):

        sze = 3
        scene = self.ui.frame2GraphicsView.scene()
        scene.clear()
        self._draw_frame(self.image_2, self.ui.frame2GraphicsView)

        pen = QPen(Qt.red, 1, Qt.SolidLine)

        for v in self.vectors:

            x = int(v["x"])
            y = int(v["y"])
            to_x = int(v["to_x"])
            to_y = int(v["to_y"])
            MAD = v["MAD"]

            klog( "(%d, %d) => (%d, %d)" % (x,y, to_x, to_y) )


            if scene:
                if MAD < self.ui.MADThresholdSpingBox.value() and (x != to_x or y != to_y):
                    scene.addLine(x,y,to_x, to_y, pen)
                    M_PI = math.pi
                    curr_x = x - to_x
                    curr_y = y - to_y
                    if curr_x != 0 or curr_y != 0:#altrimenti la linea e lunga 0!!!
                        alpha = math.atan2 (curr_y, curr_x)
                        pa_x = sze * math.cos (alpha + M_PI / 7) + to_x
                        pa_y = sze * math.sin (alpha + M_PI / 7) + to_y
                        pb_x = sze * math.cos (alpha - M_PI / 7) + to_x
                        pb_y = sze * math.sin (alpha - M_PI / 7) + to_y

                        #scene.addLine(to_x, to_y,pa_x, pa_y)
                        #scene.addLine(to_x, to_y,pb_x, pb_y)

                        polygon = QPolygonF([QPointF(to_x-sze * math.cos (alpha), to_y-sze * math.sin (alpha)),QPointF(pa_x, pa_y),QPointF(pb_x, pb_y)])
                        scene.addPolygon(polygon, pen)


        longest_vector, max_distance = ImageComparator.longest_motion_vector(self.vectors)
        print "Max motion vector distance: %f" % max_distance
コード例 #7
0
    def _interpolateVideo(self):
        print "Interpolating video..."
        dialog = QMessageBox(self)
        dialog.setText("Attendi mentre ricostruisco il video...")
        dialog.show()

        start_time = time.time()
        interpolator = VideoInterpolator(self.video)

        #self._discared_frames = [1, 2, 3] #TODO: remove me
        frames_path = interpolator.interpolate(self._discared_frames, self.searcher(), self.ui.blockSizeSpinBox.value(), self.ui.MADThresholdSpingBox.value())
        klog("L'interpolazione del video ha impiegato: %.2f secondi" % (time.time()-start_time))


        interpolated_video = Video(frames_path=frames_path)
        #interpolated_video = Video(frames_path="/tmp/pallone.mov.interpolated")
        interpolated_video.load()
        dialog.close()

        self.ui.interpolatedFramesTimelineListView.setModel( QFramesTimelineListModel( interpolated_video ) )
        self.ui.interpolatedFramesTimelineListView.setItemDelegate( QFramesTimelineDelegate() )

        klog("Calculating PSNR for frames...")
        summed_PSNR = 0

        for i in xrange(interpolated_video.frames_count()):
            original_frame = self.video.frames[i]
            new_frame = interpolated_video.frames[i]
            PSNR = ImageComparator.calculate_PSNR( original_frame.image(), new_frame.image(), interpolated_video.width(), interpolated_video.height() )
            klog("Frame %d\t\tPSNR:%d" %(i, PSNR))
            if PSNR > 1000:
                PSNR = 50
            summed_PSNR += PSNR

        PSNR = summed_PSNR/interpolated_video.frames_count()
        klog("The interpolated video has a PSNR of %f" %PSNR)
        self.ui.interpolatedVideoPSNRLabel.setText("%f" %PSNR )

        klog("Saving the interpolated video...")
        FFMpegWrapper.generate_video(frames_path, "interpolated_video.mp4")