Esempio n. 1
0
def savedetectedimagetofile(Image, frameid, result, cameraname, display_str,
                            framenameprefix):
    image_np_with_detections = Image.copy()
    visualization_util.visualize_boxes_and_labels_on_image_array(
        image_np_with_detections,
        result['boxes'],
        result['classes'],
        result['scores'],
        category_index,
        use_normalized_coordinates=False,
        max_boxes_to_draw=200,
        min_score_thresh=Threshold,
        agnostic_mode=False)
    #display_str=f'Inference time: {str(elapsed_time*1000)}ms, context_name: {context_name}, timestamp_micros: {frame_timestamp_micros}'
    visualization_util.draw_text_on_image(image_np_with_detections,
                                          0,
                                          0,
                                          display_str,
                                          color='black')
    #visualization_util.save_image_array_as_png(image_np_with_detections, outputfile)

    name = './output/frames/' + framenameprefix + str(
        frameid) + '_' + cameraname + '.jpg'
    #print ('Creating\...' + name)
    cv2.imwrite(name, cv2.cvtColor(image_np_with_detections,
                                   cv2.COLOR_RGB2BGR))  #write to image folder
Esempio n. 2
0
def savedetectedimagetofile(Image, frameid, result, cameraname, display_str,
                            framenameprefix, savefolderpath):
    image_np_with_detections = Image.copy()
    visualization_util.visualize_boxes_and_labels_on_image_array(
        image_np_with_detections,
        result['boxes'],
        result['classes'],
        result['scores'],
        category_index,
        use_normalized_coordinates=False,
        max_boxes_to_draw=200,
        min_score_thresh=Threshold,
        agnostic_mode=False)
    #display_str=f'Inference time: {str(elapsed_time*1000)}ms, context_name: {context_name}, timestamp_micros: {frame_timestamp_micros}'
    visualization_util.draw_text_on_image(image_np_with_detections,
                                          0,
                                          0,
                                          display_str,
                                          color='black')
    #visualization_util.save_image_array_as_png(image_np_with_detections, outputfile)
    #savefolderpath='/home/010796032/MyRepo/myoutputs'#/home/010796032/MyRepo/WaymoObjectDetection/output/'
    savepath = os.path.join(savefolderpath, framenameprefix)
    if os.path.exists(savepath) == False:
        os.mkdir(savepath)
    #name = savepath + framenameprefix+str(frameid) + '_'+cameraname+'.jpg'
    name = os.path.join(savepath, str(frameid) + '_' + cameraname + '.jpg')
    #print ('Creating:' + name)
    writeStatus = cv2.imwrite(
        name, cv2.cvtColor(image_np_with_detections,
                           cv2.COLOR_RGB2BGR))  #write to image folder
    if writeStatus is False:
        print("image save problem")
Esempio n. 3
0
def evaluateWaymoValidationFramesSubmission(PATH,
                                            validation_folders,
                                            outputsubmissionfilepath,
                                            VisEnable,
                                            outputfile="./output_video1.mp4"):
    data_files = [
        path for x in validation_folders
        for path in glob(os.path.join(PATH, x, "*.tfrecord"))
    ]
    print(data_files)  # all TFRecord file list
    print(len(data_files))
    # create a list of dataset for each TFRecord file
    dataset = [
        tf.data.TFRecordDataset(FILENAME, compression_type='')
        for FILENAME in data_files
    ]
    # total number of TFrecord files * 40 frame(each TFrecord)

    objects = metrics_pb2.Objects()  # submission objects

    wod_latency_submission.initialize_model()

    required_field = wod_latency_submission.DATA_FIELDS
    print(required_field)

    if VisEnable == True:
        frame_width = 1920
        frame_height = 1280
        out = cv2.VideoWriter(outputfile,
                              cv2.VideoWriter_fourcc('M', 'P', '4', 'V'), 5,
                              (frame_width, frame_height))
    fps = FPS().start()

    frameid = 0
    for i, data_file in enumerate(dataset):
        print("Datafile: ", i)  # Each TFrecord file
        # Create frame based on Waymo API, 199 frames per TFrecord (20s, 10Hz)
        for idx, data in enumerate(data_file):
            #             if idx % 5 != 0: #Downsample every 5 images, reduce to 2Hz, total around 40 frames
            #                 continue
            frame = open_dataset.Frame()
            frame.ParseFromString(bytearray(data.numpy()))

            #frame=frames[frameid]
            convertedframesdict = convert_frame_to_dict_cameras(
                frame)  #data_array[frameid]
            #Allconvertedframesdict.append(convertedframesdict)
            frame_timestamp_micros = convertedframesdict['TIMESTAMP']  #['key']
            context_name = frame.context.name
            print(
                f'Current frame id: {frameid}, context_name: {context_name}, frame_timestamp_micros: {frame_timestamp_micros}'
            )

            start_time = time.perf_counter()  #.time()
            #result = wod_latency_submission.run_model(Front_image)
            result = wod_latency_submission.run_model(
                **convertedframesdict)  #All images
            end_time = time.perf_counter()  #.time()
            elapsed_time = end_time - start_time
            print('Inference time: ' + str(elapsed_time) + 's')
            # print(result)

            createsubmisionobject(objects, result['boxes'], result['classes'],
                                  result['scores'], context_name,
                                  frame_timestamp_micros)

            if VisEnable == True:
                Front_image = convertedframesdict[required_field[0]]
                #Front_image = convertedframesdict['FRONT_IMAGE']
                image_np_with_detections = Front_image.copy()
                visualization_util.visualize_boxes_and_labels_on_image_array(
                    image_np_with_detections,
                    result['boxes'],
                    result['classes'],
                    result['scores'],
                    category_index,
                    use_normalized_coordinates=False,
                    max_boxes_to_draw=200,
                    min_score_thresh=Threshold,
                    agnostic_mode=False)
                display_str = f'Inference time: {str(elapsed_time*1000)}ms, context_name: {context_name}, timestamp_micros: {frame_timestamp_micros}'
                visualization_util.draw_text_on_image(image_np_with_detections,
                                                      0,
                                                      0,
                                                      display_str,
                                                      color='black')
                #visualization_util.save_image_array_as_png(image_np_with_detections, outputfile)

                name = './Test_data/frame' + str(frameid) + '.jpg'
                #print ('Creating\...' + name)
                # cv2.imwrite(name, image_np_with_detections) #write to image folder
                #out.write(image_np_with_detections)
                out.write(
                    cv2.cvtColor(image_np_with_detections, cv2.COLOR_RGB2BGR))
                #cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
            frameid = frameid + 1
            fps.update()

    # stop the timer and display FPS information
    fps.stop()
    print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
    if VisEnable == True:
        out.release()

    with open('objectsresult0525_detectron282k_valall_front.pickle',
              'wb') as f:
        pickle.dump(objects, f)
    # with open('allframedics.pickle', 'wb') as f:
    #     pickle.dump(Allconvertedframesdict, f)

    submission = submission_pb2.Submission()
    submission.task = submission_pb2.Submission.DETECTION_2D
    submission.account_name = '*****@*****.**'
    submission.authors.append('Kaikai Liu')
    submission.affiliation = 'None'
    submission.unique_method_name = 'fake'
    submission.description = 'none'
    submission.method_link = "empty method"
    submission.sensor_type = submission_pb2.Submission.CAMERA_ALL
    submission.number_past_frames_exclude_current = 0
    submission.number_future_frames_exclude_current = 0
    submission.inference_results.CopyFrom(objects)
    f = open(outputsubmissionfilepath, 'wb')  # output submission file
    f.write(submission.SerializeToString())
    f.close()

    now = datetime.datetime.now()
    print("Finished validation, current date and time : ")
    print(now.strftime("%Y-%m-%d %H:%M:%S"))
Esempio n. 4
0
def evaluateWaymoValidationFramesFakeSubmission(
        PATH,
        validation_folders,
        outputsubmissionfilepath,
        outputfile="./output_video1.mp4"):
    data_files = [
        path for x in validation_folders
        for path in glob(os.path.join(PATH, x, "*.tfrecord"))
    ]
    print(data_files)  # all TFRecord file list
    print(len(data_files))
    # create a list of dataset for each TFRecord file
    dataset = [
        tf.data.TFRecordDataset(FILENAME, compression_type='')
        for FILENAME in data_files
    ]
    # total number of TFrecord files * 40 frame(each TFrecord)

    objects = metrics_pb2.Objects()  # submission objects

    frame_width = 1920
    frame_height = 1280
    out = cv2.VideoWriter(outputfile,
                          cv2.VideoWriter_fourcc('M', 'P', '4', 'V'), 5,
                          (frame_width, frame_height))
    fps = FPS().start()

    frameid = 0
    for i, data_file in enumerate(dataset):
        print("Datafile: ", i)  # Each TFrecord file
        # Create frame based on Waymo API, 199 frames per TFrecord (20s, 10Hz)
        for idx, data in enumerate(data_file):
            #             if idx % 5 != 0: #Downsample every 5 images, reduce to 2Hz, total around 40 frames
            #                 continue
            frame = open_dataset.Frame()
            frame.ParseFromString(bytearray(data.numpy()))

            #frame=frames[frameid]
            convertedframesdict = convert_frame_to_dict_cameras(
                frame)  #data_array[frameid]
            #Allconvertedframesdict.append(convertedframesdict)
            frame_timestamp_micros = convertedframesdict['TIMESTAMP']  #['key']
            context_name = frame.context.name
            print(
                f'Current frame id: {frameid}, context_name: {context_name}, frame_timestamp_micros: {frame_timestamp_micros}'
            )

            o_list = []
            boundingbox = []
            boxscore = []
            boxtype = []
            for camera_labels in frame.camera_labels:
                if camera_labels.name != 1:  #Only use front camera
                    continue
                for gt_label in camera_labels.labels:
                    o = metrics_pb2.Object()
                    # The following 3 fields are used to uniquely identify a frame a prediction
                    # is predicted at.
                    o.context_name = frame.context.name
                    # The frame timestamp for the prediction. See Frame::timestamp_micros in
                    # dataset.proto.
                    o.frame_timestamp_micros = frame.timestamp_micros
                    # This is only needed for 2D detection or tracking tasks.
                    # Set it to the camera name the prediction is for.
                    o.camera_name = camera_labels.name

                    # Populating box and score.
                    box = label_pb2.Label.Box()
                    box.center_x = gt_label.box.center_x
                    box.center_y = gt_label.box.center_y
                    box.length = gt_label.box.length
                    box.width = gt_label.box.width
                    boundingbox.append(
                        [box.center_x, box.center_y, box.length,
                         box.width])  #width height
                    o.object.box.CopyFrom(box)
                    # This must be within [0.0, 1.0]. It is better to filter those boxes with
                    # small scores to speed up metrics computation.
                    o.score = 0.9
                    boxscore.append(o.score)
                    # Use correct type.
                    o.object.type = gt_label.type
                    boxtype.append(o.object.type)
                    o_list.append(o)
                    print(
                        f'Camera labelname: {camera_labels.name}, object type: { gt_label.type}, box:{box}'
                    )

            for o in o_list:
                objects.objects.append(o)
            # Save the original image
            #output_path = "./test.png"
            #visualization_util.save_image_array_as_png(Front_image, output_path)
            boundingbox = np.array(boundingbox)
            boxscore = np.array(boxscore)
            boxtype = np.array(boxtype).astype(np.uint8)

            Front_image = convertedframesdict['FRONT_IMAGE']
            image_np_with_detections = Front_image.copy()
            visualization_util.visualize_boxes_and_labels_on_image_array(
                image_np_with_detections,
                boundingbox,
                boxtype,
                boxscore,
                category_index,
                use_normalized_coordinates=False,
                max_boxes_to_draw=200,
                min_score_thresh=Threshold,
                agnostic_mode=False)
            display_str = f'context_name: {context_name}, timestamp_micros: {frame_timestamp_micros}'
            visualization_util.draw_text_on_image(image_np_with_detections,
                                                  0,
                                                  0,
                                                  display_str,
                                                  color='black')
            #visualization_util.save_image_array_as_png(image_np_with_detections, outputfile)

            name = './Test_data/frame' + str(frameid) + '.jpg'
            #print ('Creating\...' + name)
            # cv2.imwrite(name, image_np_with_detections) #write to image folder
            fps.update()
            #out.write(image_np_with_detections)
            out.write(cv2.cvtColor(image_np_with_detections,
                                   cv2.COLOR_RGB2BGR))
            #cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
            frameid = frameid + 1

    # stop the timer and display FPS information
    fps.stop()
    print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
    out.release()

    with open('objectsresult_gtvalall.pickle', 'wb') as f:
        pickle.dump(objects, f)
    # with open('allframedics.pickle', 'wb') as f:
    #     pickle.dump(Allconvertedframesdict, f)

    submission = submission_pb2.Submission()
    submission.task = submission_pb2.Submission.DETECTION_2D
    submission.account_name = '*****@*****.**'
    submission.authors.append('Kaikai Liu')
    submission.affiliation = 'None'
    submission.unique_method_name = 'fake'
    submission.description = 'none'
    submission.method_link = "empty method"
    submission.sensor_type = submission_pb2.Submission.CAMERA_ALL
    submission.number_past_frames_exclude_current = 0
    submission.number_future_frames_exclude_current = 0
    submission.inference_results.CopyFrom(objects)
    f = open(outputsubmissionfilepath, 'wb')  # output submission file
    f.write(submission.SerializeToString())
    f.close()

    now = datetime.datetime.now()
    print("Finished validation, current date and time : ")
    print(now.strftime("%Y-%m-%d %H:%M:%S"))
Esempio n. 5
0
def evaluateallframesgtfakesubmission(frames,
                                      outputsubmissionfilepath,
                                      outputfile="./output_video1.mp4"):
    array_len = len(frames)  #4931 frames for validation_0000
    # 20, 200 frames in one file, downsample by 10
    print("Frames lenth:", array_len)
    print("Final_array type:", type(frames))  # class 'list'

    objects = metrics_pb2.Objects()  # submission objects

    frame_width = 1920
    frame_height = 1280
    out = cv2.VideoWriter(outputfile,
                          cv2.VideoWriter_fourcc('M', 'P', '4', 'V'), 5,
                          (frame_width, frame_height))
    fps = FPS().start()

    #wod_latency_submission.initialize_model()

    #required_field = wod_latency_submission.DATA_FIELDS
    #print(required_field)
    #Allconvertedframesdict=[]

    for frameid in range(array_len):
        #frameid = 5
        print("frameid:", frameid)
        # {'key':key, 'context_name':context_name, 'framedict':framedict}
        frame = frames[frameid]
        convertedframesdict = convert_frame_to_dict_cameras(
            frame)  #data_array[frameid]
        #Allconvertedframesdict.append(convertedframesdict)
        frame_timestamp_micros = convertedframesdict['TIMESTAMP']  #['key']
        context_name = frame.context.name

        o_list = []
        boundingbox = []
        boxscore = []
        boxtype = []
        for camera_labels in frame.camera_labels:
            if camera_labels.name != 1:  #Only use front camera
                continue
            for gt_label in camera_labels.labels:
                o = metrics_pb2.Object()
                # The following 3 fields are used to uniquely identify a frame a prediction
                # is predicted at.
                o.context_name = frame.context.name
                # The frame timestamp for the prediction. See Frame::timestamp_micros in
                # dataset.proto.
                o.frame_timestamp_micros = frame.timestamp_micros
                # This is only needed for 2D detection or tracking tasks.
                # Set it to the camera name the prediction is for.
                o.camera_name = camera_labels.name

                # Populating box and score.
                box = label_pb2.Label.Box()
                box.center_x = gt_label.box.center_x
                box.center_y = gt_label.box.center_y
                box.length = gt_label.box.length
                box.width = gt_label.box.width
                boundingbox.append(
                    [box.center_x, box.center_y, box.length,
                     box.width])  #width height
                o.object.box.CopyFrom(box)
                # This must be within [0.0, 1.0]. It is better to filter those boxes with
                # small scores to speed up metrics computation.
                o.score = 0.9
                boxscore.append(o.score)
                # Use correct type.
                o.object.type = gt_label.type
                boxtype.append(o.object.type)
                o_list.append(o)
                print(
                    f'Camera labelname: {camera_labels.name}, object type: { gt_label.type}, box:{box}'
                )

        # Save the original image
        #output_path = "./test.png"
        #visualization_util.save_image_array_as_png(Front_image, output_path)
        boundingbox = np.array(boundingbox)
        boxscore = np.array(boxscore)
        boxtype = np.array(boxtype).astype(np.uint8)

        Front_image = convertedframesdict['FRONT_IMAGE']
        image_np_with_detections = Front_image.copy()
        visualization_util.visualize_boxes_and_labels_on_image_array(
            image_np_with_detections,
            boundingbox,
            boxtype,
            boxscore,
            category_index,
            use_normalized_coordinates=False,
            max_boxes_to_draw=200,
            min_score_thresh=Threshold,
            agnostic_mode=False)
        display_str = f'context_name: {context_name}, timestamp_micros: {frame_timestamp_micros}'
        visualization_util.draw_text_on_image(image_np_with_detections,
                                              0,
                                              0,
                                              display_str,
                                              color='black')
        #visualization_util.save_image_array_as_png(image_np_with_detections, outputfile)

        name = './Test_data/frame' + str(frameid) + '.jpg'
        #print ('Creating\...' + name)
        # cv2.imwrite(name, image_np_with_detections) #write to image folder
        fps.update()
        #out.write(image_np_with_detections)
        out.write(cv2.cvtColor(image_np_with_detections, cv2.COLOR_RGB2BGR))
        #cv2.cvtColor(img, cv2.COLOR_RGB2BGR)

    # stop the timer and display FPS information
    fps.stop()
    print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
    out.release()

    with open('objectsresult_gtvalall.pickle', 'wb') as f:
        pickle.dump(objects, f)
    # with open('allframedics.pickle', 'wb') as f:
    #     pickle.dump(Allconvertedframesdict, f)

    submission = submission_pb2.Submission()
    submission.task = submission_pb2.Submission.DETECTION_2D
    submission.account_name = '*****@*****.**'
    submission.authors.append('Kaikai Liu')
    submission.affiliation = 'None'
    submission.unique_method_name = 'fake'
    submission.description = 'none'
    submission.method_link = "empty method"
    submission.sensor_type = submission_pb2.Submission.CAMERA_ALL
    submission.number_past_frames_exclude_current = 0
    submission.number_future_frames_exclude_current = 0
    submission.inference_results.CopyFrom(objects)
    f = open(outputsubmissionfilepath, 'wb')  # output submission file
    f.write(submission.SerializeToString())
    f.close()

    now = datetime.datetime.now()
    print("Finished validation, current date and time : ")
    print(now.strftime("%Y-%m-%d %H:%M:%S"))
def evaluateallframescreatesubmission(frames, outputsubmissionfilepath, outputfile="./output_video1.mp4"):
    array_len = len(frames) #4931 frames for validation_0000
    # 20, 200 frames in one file, downsample by 10
    print("Frames lenth:", array_len)
    print("Final_array type:", type(frames))  # class 'list'

    objects = metrics_pb2.Objects()  # submission objects

    frame_width = 1920
    frame_height = 1280
    out = cv2.VideoWriter(outputfile, cv2.VideoWriter_fourcc(
        'M', 'P', '4', 'V'), 5, (frame_width, frame_height))
    fps = FPS().start()

    wod_latency_submission.initialize_model()

    required_field = wod_latency_submission.DATA_FIELDS
    print(required_field)

    for frameid in range(array_len):
        #frameid = 5
        print("frameid:", frameid)
        # {'key':key, 'context_name':context_name, 'framedict':framedict}
        currentframe=frames[frameid]
        convertedframesdict = convert_frame_to_dict_cameras(currentframe) #data_array[frameid]
        frame_timestamp_micros = convertedframesdict['TIMESTAMP']#['key']
        context_name = currentframe.context.name #convertedframesdict['context_name']
        #framedict = convertedframesdict['framedict']
        # 10017090168044687777_6380_000_6400_000
        #print('context_name:', context_name)
        # print('frame_timestamp_micros:', frame_timestamp_micros)  # 1550083467346370

        #result = wod_latency_submission.run_model(framedict[required_field[0]], framedict[required_field[1]])
        #result = wod_latency_submission.run_model(**framedict)
        #Front_image = framedict[required_field[0]]
        start_time = time.time()
        #result = wod_latency_submission.run_model(Front_image)
        result = wod_latency_submission.run_model(**convertedframesdict)#All images
        end_time = time.time()
        elapsed_time = end_time - start_time
        print('Inference time: ' + str(elapsed_time) + 's')
        # print(result)

        createsubmisionobject(objects, result['boxes'], result['classes'],
                              result['scores'], context_name, frame_timestamp_micros)

        # Save the original image
        #output_path = "./test.png"
        #visualization_util.save_image_array_as_png(Front_image, output_path)

        Front_image = convertedframesdict[required_field[0]]
        image_np_with_detections = Front_image.copy()
        visualization_util.visualize_boxes_and_labels_on_image_array(image_np_with_detections, result['boxes'], result['classes'], result['scores'], category_index, use_normalized_coordinates=False,
                                                                     max_boxes_to_draw=200,
                                                                     min_score_thresh=Threshold,
                                                                     agnostic_mode=False)
        display_str=f'Inference time: {str(elapsed_time*1000)}ms, context_name: {context_name}, timestamp_micros: {frame_timestamp_micros}'
        visualization_util.draw_text_on_image(image_np_with_detections, 0, 0, display_str, color='black')
        #visualization_util.save_image_array_as_png(image_np_with_detections, outputfile)

        name = './Test_data/frame' + str(frameid) + '.jpg'
        #print ('Creating\...' + name)
        # cv2.imwrite(name, image_np_with_detections) #write to image folder
        fps.update()
        #out.write(image_np_with_detections)
        out.write(cv2.cvtColor(image_np_with_detections, cv2.COLOR_RGB2BGR))
        #cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
        

    # stop the timer and display FPS information
    fps.stop()
    print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
    out.release()

    with open('objectsresult.pickle', 'wb') as f:
        pickle.dump(objects, f)

    submission = submission_pb2.Submission()
    submission.task = submission_pb2.Submission.DETECTION_2D
    submission.account_name = '*****@*****.**'
    submission.authors.append('Kaikai Liu')
    submission.affiliation = 'None'
    submission.unique_method_name = 'torchvisionfaster'
    submission.description = 'none'
    submission.method_link = "empty method"
    submission.sensor_type = submission_pb2.Submission.CAMERA_ALL
    submission.number_past_frames_exclude_current = 0
    submission.number_future_frames_exclude_current = 0
    submission.inference_results.CopyFrom(objects)
    f = open(outputsubmissionfilepath, 'wb')  # output submission file
    f.write(submission.SerializeToString())
    f.close()

    now = datetime.datetime.now()
    print("Finished validation, current date and time : ")
    print(now.strftime("%Y-%m-%d %H:%M:%S"))
        # }
        inputimage = np.load(os.path.join(timestamp_dir, f'{imagename}.npy'))

        resultdict = allcameraresult[imagename]  #one camera
        #print(f'imagename:{imagename}, resultdict:{resultdict}')
        boxes = resultdict['boxes']
        classes = resultdict['classes']
        scores = resultdict['scores']

        visualization_util.visualize_boxes_and_labels_on_image_array(
            inputimage,
            boxes,
            classes,
            scores,
            category_index,
            use_normalized_coordinates=False,
            max_boxes_to_draw=200,
            min_score_thresh=0.1,
            agnostic_mode=False)
        display_str = f'context_name: {context_name}, timestamp_micros: {timestamp_micros}'
        visualization_util.draw_text_on_image(inputimage,
                                              0,
                                              0,
                                              display_str,
                                              color='black')

        name = './frame' + nameprefix + str(imagename) + '.jpg'
        print('Creating\...' + name)
        cv2.imwrite(name,
                    cv2.cvtColor(inputimage,
                                 cv2.COLOR_RGB2BGR))  #write to image folder