Exemplo n.º 1
0
    def run(self):
        super().run()
        self.queue_stop_sign.clear()  # 队列获取数据停止标志
        features = None

        need_post = False
        frame_count = 0
        while True:
            if self.source_queue:
                queue_item = self.source_queue.get()
                print("CameraSingleEval --> get frame")
                if queue_item.frame_index == QueueItem.PAUSE_SIGN_INDEX:
                    need_post = True

                elif queue_item.frame_index == QueueItem.STOP_SIGN_INDEX:
                    need_post = True
                    self.queue_stop_sign.set()
                else:
                    frame = transform(queue_item.frame)
                    self.transformed_video[frame_count] = frame
                    frame_count += 1
                    if frame_count == self.APP.Evaluate_Frame_Count:
                        need_post = True

                if need_post and frame_count > 1:
                    print("CameraSingleEval -->  开始提取特征")
                    need_post = False
                    feature = get_features(self.transformed_video[0: frame_count, ],
                                           frame_batch_size=self.APP.Get_Feature_Batch_Size,
                                           device=self.APP.Device)
                    if features is None:
                        features = feature
                    else:
                        features = torch.cat((features, feature), 0)
                    frame_count = 0

            if self.queue_stop_sign.isSet():
                print("CameraSingleEval -- > 特征提取完毕,开始评估:")
                features = torch.unsqueeze(features, 0)  # torch.Size([1, len, xxxx])
                with torch.no_grad():
                    input_length = features.shape[1] * torch.ones(1, 1)
                    outputs = self.model(features, input_length)
                    ans = outputs[0][0].to('cpu').numpy()
                    print("CameraSingleEval --> get ans = ", ans)

                    record_item = RecordItem(0, frame_count, ans)
                    record_item.set_method(Method.SINGLE_METHOD)
                    self.APP.result_records.append(record_item)
                    self.APP.app_refresh_eval_info()

                messagebox.showinfo("整段评估", "视频质量为:" + str(ans))
                self.APP.EVAL_END_SIGN.set()
                self.APP.EVAL_STATE = State.FINISHED_STATE
                print("CameraSingleEval stop !")
                break
Exemplo n.º 2
0
    def run(self):
        super().run()
        transformed_video = torch.zeros([self.APP.Evaluate_Frame_Count,
                                         self.channel, self.height, self.width])
        cur_frame_index = 0
        frame_count = 0

        next_put_index = randint(0, self.APP.Evaluate_Frame_Count)  # 下次需上传下标
        tail_out_index = next_put_index + self.APP.Evaluate_Frame_Count

        while cur_frame_index < self.video_data.shape[0]:
            if cur_frame_index == tail_out_index:
                last_put_index = tail_out_index  # 上次发送帧下标
                rand_count = randint(0, self.APP.Evaluate_Frame_Count)
                next_put_index = last_put_index + rand_count
                tail_out_index = next_put_index + self.APP.Evaluate_Frame_Count
                print("camera_thread --> get rand_count = ", rand_count)

            if next_put_index <= cur_frame_index < tail_out_index:
                frame = self.video_data[cur_frame_index]
                frame = Image.fromarray(frame)
                frame = transform(frame)
                transformed_video[frame_count] = frame
                frame_count += 1

            # 满足个数 | 视频最后一帧
            can_get_features = frame_count == self.APP.Evaluate_Frame_Count \
                               or cur_frame_index == self.video_data.shape[0] - 1
            can_get_features = can_get_features and frame_count > 1

            if can_get_features:
                features = get_features(transformed_video[0: frame_count, ],
                                        frame_batch_size=self.APP.Get_Feature_Batch_Size,
                                        device=self.APP.Device)

                features = torch.unsqueeze(features, 0)  # torch.Size([1, len, xxxx])
                with torch.no_grad():
                    input_length = features.shape[1] * torch.ones(1, 1)
                    outputs = self.model(features, input_length)
                    ans = outputs[0][0].to('cpu').numpy()
                    print("VideoMultiEval --> get ans = ", ans)

                    record_item = RecordItem(cur_frame_index - frame_count, frame_count, ans)
                    record_item.set_method(Method.MULTI_METHOD)
                    self.APP.result_records.append(record_item)
                    self.APP.app_refresh_eval_info()
                frame_count = 0
            cur_frame_index += 1

        self.APP.result_records.append(RecordItem.END_SIGN)
        self.APP.EVAL_END_SIGN.set()
        self.APP.EVAL_STATE = State.FINISHED_STATE
        messagebox.showinfo(title="分段评估", message="分段评估已完成!")
Exemplo n.º 3
0
    def run(self):
        super().run()
        print("CameraMultiEval -->      run()")
        frame_count = 0
        need_handle = False
        self.queue_stop_sign.clear()

        while True:
            if self.source_queue:  # 判断非空
                queue_item = self.source_queue.get()
                print("camera_evaluate_thread --> get frame")
                if queue_item.frame == QueueItem.PAUSE_SIGN_INDEX:
                    need_handle = True
                elif queue_item.frame == QueueItem.STOP_SIGN_INDEX:
                    need_handle = True
                    self.queue_stop_sign.set()
                else:
                    frame = transform(queue_item.frame)  # 转换格式
                    self.transformed_video[frame_count] = frame
                    frame_count += 1
                    if frame_count == self.APP.Evaluate_Frame_Count:
                        need_handle = True

                if need_handle and frame_count > 1:
                    print("camera evaluate thread -->  开始评估")
                    need_handle = False
                    features = get_features(self.transformed_video[0: frame_count, ],
                                            frame_batch_size=self.APP.Get_Feature_Batch_Size,
                                            device=self.APP.Device)

                    features = torch.unsqueeze(features, 0)  # torch.Size([1, len, xxxx])
                    with torch.no_grad():
                        input_length = features.shape[1] * torch.ones(1, 1)
                        outputs = self.model(features, input_length)
                        ans = outputs[0][0].to('cpu').numpy()
                        print("camera evaluate thread --> get ans = ", ans)

                        record_item = RecordItem(queue_item.frame_index - frame_count, frame_count, ans)
                        record_item.set_method(Method.MULTI_METHOD)
                        self.APP.result_records.append(record_item)
                        self.APP.app_refresh_eval_info()
                    frame_count = 0

            # 阻塞至此
            if self.queue_stop_sign.isSet():
                # self.APP.result_records = self.records
                self.APP.result_records.append(RecordItem.END_SIGN)
                self.APP.EVAL_END_SIGN.set()
                self.APP.EVAL_STATE = State.FINISHED_STATE
                messagebox.showinfo("分段评估", "拍摄模式分段评估已完成!")
                print("camera evaluate thread stop !")
                break
Exemplo n.º 4
0
    def run(self):
        super().run()
        cur_frame_index = self.start_index
        self.transformed_video = torch.zeros([self.length, self.channel, self.height, self.width])

        frame_count = self.video_data.shape[0]
        if self.end_index > frame_count or self.end_index == 0:
            self.end_index = frame_count

        while cur_frame_index < self.end_index:
            frame = self.video_data[cur_frame_index]
            frame = Image.fromarray(frame)
            frame = transform(frame)
            self.transformed_video[cur_frame_index] = frame
            cur_frame_index += 1

        features = get_features(self.transformed_video,
                                frame_batch_size=self.APP.Get_Feature_Batch_Size,
                                device=self.APP.Device)
        features = torch.unsqueeze(features, 0)

        with torch.no_grad():
            input_length = features.shape[1] * torch.ones(1, 1)
            outputs = self.model(features, input_length)
            ans = outputs[0][0].to('cpu').numpy()

            record_item = RecordItem(0, cur_frame_index, ans)
            if self.is_specified:
                record_item.set_method(Method.SPECIFIED_METHOD)
            else:
                record_item.set_method(Method.SINGLE_METHOD)

            self.APP.result_records.append(record_item)
            self.APP.app_refresh_eval_info()
            print("VideoSingleEval --> 获取评测结果:", ans)

        self.APP.EVAL_END_SIGN.set()
        self.APP.EVAL_STATE = State.FINISHED_STATE
        messagebox.showinfo(title="整段评估结果:", message="视频质量为: " + str(ans))
        print("VideoSingleEval --> end")
Exemplo n.º 5
0
    transformed_video = torch.zeros([video_length, video_channel, video_height, video_width])
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])

    for frame_idx in range(video_length):
        frame = video_data[frame_idx]
        frame = Image.fromarray(frame)
        frame = transform(frame)
        transformed_video[frame_idx] = frame

    print('Video length: {}'.format(transformed_video.shape[0]))

    # feature extraction
    features = get_features(transformed_video, frame_batch_size=args.frame_batch_size, device=device)
    features = torch.unsqueeze(features, 0)  # batch size 1

    # quality prediction using VSFA
    model = VSFA()
    model.load_state_dict(torch.load(args.model_path))  #
    model.to(device)
    model.eval()
    with torch.no_grad():
        input_length = features.shape[1] * torch.ones(1, 1)
        outputs = model(features, input_length)
        y_pred = outputs[0][0].to('cpu').numpy()
        print("Predicted quality: {}".format(y_pred))

    end = time.time()