Пример #1
0
def get_person_id_to_track_multicam(dataset_path,
                                    working_dirs,
                                    cam_count,
                                    person_identifier="ped_id"):
    print("Dataset path: {}".format(dataset_path))
    person_id_to_frame_no_cam_to_cam_id = {}

    for cam_id in range(cam_count):
        cam_path = os.path.join(dataset_path, "cam_{}/".format(cam_id),
                                "coords_cam_{}.csv".format(cam_id))

        cam_coords = load_csv(working_dirs, cam_path)

        cam_coords = cam_coords.groupby(["frame_no_gta", person_identifier],
                                        as_index=False).mean()

        for index, coord_row in tqdm(cam_coords.iterrows(),
                                     total=len(cam_coords)):

            person_id = int(coord_row[person_identifier])
            frame_no_cam = int(coord_row["frame_no_cam"])

            if person_id not in person_id_to_frame_no_cam_to_cam_id:
                person_id_to_frame_no_cam_to_cam_id[person_id] = {}

            if frame_no_cam not in person_id_to_frame_no_cam_to_cam_id[
                    person_id]:
                person_id_to_frame_no_cam_to_cam_id[person_id][
                    frame_no_cam] = set()

            person_id_to_frame_no_cam_to_cam_id[person_id][frame_no_cam].add(
                cam_id)

    return person_id_to_frame_no_cam_to_cam_id
Пример #2
0
def get_combined_dataframe(dataset_path,working_dirs,cam_ids,frame_count_per_cam=None,person_identifier="ped_id"):
    print("Dataset path: {}".format(dataset_path))
    combined_dataset = pd.DataFrame()

    for cam_id in cam_ids:

        if isinstance(dataset_path,str):

            cam_path = os.path.join(dataset_path, "cam_{}/".format(cam_id), "coords_fib_cam_{}.csv".format(cam_id))

            cam_coords = load_csv(working_dirs, cam_path)




        else:
            cam_coords = dataset_path[cam_id]

        cam_coords = cam_coords[["frame_no_cam"
            , person_identifier
            , "x_top_left_BB"
            , "y_top_left_BB"
            , "x_bottom_right_BB"
            , "y_bottom_right_BB"]]

        cam_coords = cam_coords.astype({"frame_no_cam": int
                                           , person_identifier: int
                                           , "x_top_left_BB": int
                                           , "y_top_left_BB": int
                                           , "x_bottom_right_BB": int
                                           , "y_bottom_right_BB": int})

        cam_coords["cam_id"] = cam_id

        #Will just take a limited number of frames. That means the first frame_count_per_cam frame frames.
        if frame_count_per_cam is not None:
            all_frame_nos = list(cam_coords["frame_no_cam"])

            all_frame_nos = set(all_frame_nos)

            all_frame_nos = list(all_frame_nos)

            all_frame_nos = sorted(all_frame_nos)

            get_frame_nos = all_frame_nos[:frame_count_per_cam]

            get_frame_nos = set(get_frame_nos)

            cam_coords = cam_coords[cam_coords["frame_no_cam"].isin(get_frame_nos)]





        combined_dataset = combined_dataset.append(cam_coords)
    return combined_dataset
Пример #3
0
def count_tracks(dataset_path,working_dirs,cam_count,person_identifier="ped_id"):
    print("Dataset path: {}".format(dataset_path) )
    for cam_id in range(cam_count):
        cam_path = os.path.join(dataset_path,"cam_{}/".format(cam_id),"coords_cam_{}.csv".format(cam_id))

        cam_coords = load_csv(working_dirs,cam_path)

        cam_coords = cam_coords.groupby(person_identifier, as_index=False).mean()

        person_id_count = len(cam_coords)

        print("Cam {} number of person ids (tracks): {}".format(cam_id,person_id_count))
    def initialize_base(self, ground_truth, working_dir_path):
        # Create an accumulator that will be updated during each frame
        self.acc = mm.MOTAccumulator(auto_id=True)
        self.ground_truth = ground_truth

        if isinstance(ground_truth, str):
            self.ground_truth = pandas_loader.load_csv(working_dir_path,
                                                       ground_truth)

        self.gt_frame_numbers_cam = set(
            self.ground_truth["frame_no_cam"].tolist())

        self.gt_frame_numbers_cam = list(map(int, self.gt_frame_numbers_cam))
    def get_combined_test_train_cam(self,cam_id):
        #It is possible to combine the train and test set without modification of frame numbers because previously
        # they were separated
        combined = pd.DataFrame()

        if self.load_test:
            cam_coords_test = load_csv(self.work_dirs,  os.path.join(self.dataset_test_folder
                                                                     , "cam_{}".format(cam_id)
                                                                    , "coords_cam_{}.csv".format(cam_id)))

            cam_coords_test = self.get_reduced_cam_coords(cam_coords_test)

            combined = combined.append(cam_coords_test,ignore_index=True)

        cam_coords_train = load_csv(self.work_dirs, os.path.join(self.dataset_train_folder
                                                                 , "cam_{}".format(cam_id)
                                                                , "coords_cam_{}.csv".format(cam_id)))

        cam_coords_train = self.get_reduced_cam_coords(cam_coords_train)
        combined = combined.append(cam_coords_train,ignore_index=True)

        return combined
Пример #6
0
        def get_cam_coords_train_or_test(dataset_folder, frame_nos_gta):
            reid_train_or_test_dataframe = pd.DataFrame()
            for cam_id in self.cam_ids:
                cam_coords = load_csv(
                    self.work_dirs,
                    os.path.join(dataset_folder, "cam_{}".format(cam_id),
                                 "coords_cam_{}.csv".format(cam_id)))

                coords = cam_coords[cam_coords["frame_no_gta"].isin(
                    frame_nos_gta)]

                reid_train_or_test_dataframe = reid_train_or_test_dataframe.append(
                    coords, ignore_index=True)

            return reid_train_or_test_dataframe
        def get_combined_one_type(dataset_folder):
            combined_cam_dataframes = pd.DataFrame()
            for cam_id in cam_ids:

                coords_cam_path = os.path.join(
                    dataset_folder, "cam_{}".format(cam_id),
                    "coords_cam_{}.csv".format(cam_id))
                cam_dataframe = load_csv(working_dir=working_dir,
                                         csv_path=coords_cam_path)

                cam_dataframe = group_drop_except_person_id_and_appearance_id(
                    cam_dataframe)
                combined_cam_dataframes = combined_cam_dataframes.append(
                    cam_dataframe, ignore_index=True)

            return combined_cam_dataframes
Пример #8
0
def load_ground_truth_dataframes(dataset_folder, working_dir, cam_ids):
    cam_ids.sort()
    # load ground truth data for each cam
    ground_truth_dataframes = []
    for cam_id in cam_ids:
        cam_coords_path = osp.join(dataset_folder, "cam_{}\\".format(cam_id),
                                   "coords_fib_cam_{}.csv".format(cam_id))

        cam_coords = pandas_loader.load_csv(working_dir, cam_coords_path)

        cam_coords = adjustCoordsTypes(cam_coords,
                                       person_identifier="person_id")

        ground_truth_dataframes.append(cam_coords)

    return ground_truth_dataframes
Пример #9
0
    def get_last_frames_and_dfs_all_cams(cam_ids):

        cam_id_to_last_frame = {}
        cam_id_to_cam_dataframe = {}

        for cam_id in cam_ids:
            cam_df_path = os.path.join(dataset_path, "cam_{}".format(cam_id),
                                       "coords_cam_{}.csv".format(cam_id))

            cam_dataframe = load_csv(working_dir=working_dir,
                                     csv_path=cam_df_path)
            last_frame_df = get_last_frame(cam_dataframe=cam_dataframe,
                                           frame_no_cam=frame_no_to_draw)
            cam_id_to_last_frame[cam_id] = last_frame_df

            cam_dataframe = group_and_drop_unnecessary(cam_dataframe)
            cam_id_to_cam_dataframe[cam_id] = cam_dataframe

        return cam_id_to_last_frame, cam_id_to_cam_dataframe
    def read_ground_truth(self, person_identifier="ped_id"):

        dataset_base_path = os.path.join(
            self.dataset_base_folder, "cam_{}".format(self.cam_id),
            "coords_cam_{}.csv".format(self.cam_id))

        ground_truth = load_csv(self.work_dirs, dataset_base_path)

        ground_truth = ground_truth.groupby(
            ["frame_no_gta", person_identifier], as_index=False).mean()

        ground_truth = adjustCoordsTypes(ground_truth,
                                         person_identifier=person_identifier)

        ground_truth = drop_unnecessary_columns(ground_truth)

        person_id_to_track = get_groundtruth_person_id_to_track(ground_truth)

        return person_id_to_track
    def get_number_of_tracks_per_frame(self,dataset_folder):


        cam_id_to_frame_no_cam_to_track_count = {}
        for cam_id in self.cam_ids:

            frame_no_cam_to_track_count = defaultdict(return_zero)
            cam_coords = load_csv(self.work_dirs, os.path.join(dataset_folder
                                                                     , "cam_{}".format(cam_id)
                                                                     , "coords_cam_{}.csv".format(cam_id)))

            cam_coords = self.get_reduced_cam_coords(cam_coords)

            person_id_per_frame_count = cam_coords.groupby(['frame_no_cam'],as_index=False).count()



            cam_id_to_frame_no_cam_to_track_count[cam_id] = person_id_per_frame_count

        return cam_id_to_frame_no_cam_to_track_count
Пример #12
0
def load_dataset_dataframes(dataset_folder, working_dir, cam_count,
                            take_frame_nos):
    dataset_cam_dataframes = []
    for cam_id in range(cam_count):
        coords_path = os.path.join(dataset_folder, "cam_{}".format(cam_id),
                                   "coords_cam_{}.csv".format(cam_id))

        coords_dataframe = load_csv(working_dir, coords_path)

        if take_frame_nos is not None:
            coords_dataframe = coords_dataframe[
                coords_dataframe["frame_no_cam"].isin(take_frame_nos)]

        coords_dataframe = coords_dataframe.astype(int)

        coords_dataframe = group_and_drop_unnecessary(coords_dataframe)

        dataset_cam_dataframes.append(coords_dataframe)

    return dataset_cam_dataframes
    def initialize_base(self, ground_truth_path, working_dir_path):
        # Create an accumulator that will be updated during each frame
        self.acc = mm.MOTAccumulator(auto_id=True)

        self.ground_truth = pandas_loader.load_csv(working_dir_path,
                                                   ground_truth_path)

        # In old csv files it was called ped_id than another id was used: person_id
        if "ped_id" in self.ground_truth.columns:
            self.ground_truth = self.ground_truth.groupby(
                ["frame_no_gta", "ped_id"], as_index=False).mean()
        else:
            self.ground_truth = self.ground_truth.groupby(
                ["frame_no_gta", "person_id"], as_index=False).mean()

        self.gt_frame_numbers_cam = self.ground_truth.groupby(
            "frame_no_cam", as_index=False).mean()
        self.gt_frame_numbers_cam = self.gt_frame_numbers_cam[
            "frame_no_cam"].tolist()

        self.gt_frame_numbers_cam = list(map(int, self.gt_frame_numbers_cam))
    def read_ground_truth(self,person_identifier="person_id"):

        all_cam_coords = pd.DataFrame()
        for cam_id in self.cam_ids:
            dataset_base_path = os.path.join(self.dataset_base_folder
                                             ,"cam_{}".format(cam_id)
                                             ,"coords_cam_{}.csv".format(cam_id))

            cam_coords = load_csv(self.work_dirs, dataset_base_path)

            cam_coords = cam_coords.groupby(["frame_no_gta", person_identifier], as_index=False).mean()

            cam_coords = adjustCoordsTypes(cam_coords, person_identifier=person_identifier)

            cam_coords = drop_unnecessary_columns(cam_coords)

            cam_coords["cam_id"] = cam_id

            all_cam_coords = all_cam_coords.append(cam_coords,ignore_index=True)


        return all_cam_coords
Пример #15
0
def get_ped_types(dataset_folder,cam_ids,working_dir,output_path):

    ped_type_frequency = pd.DataFrame()

    def group_and_drop_unnecessary(cam_dataframe):
        cam_dataframe = cam_dataframe.groupby(by=["person_id"],as_index=False).mean()
        cam_dataframe = cam_dataframe[["ped_type","person_id"]]
        cam_dataframe = cam_dataframe.astype(int)
        return cam_dataframe

    ped_types_all_cams = []
    for cam_id in cam_ids:
        cam_dataframe_path = os.path.join(dataset_folder,"cam_{}".format(cam_id),"coords_cam_{}.csv".format(cam_id))
        cam_dataframe = load_csv(working_dir=working_dir,csv_path=cam_dataframe_path)

        cam_dataframe = group_and_drop_unnecessary(cam_dataframe)
        ped_type_and_person_id = list(zip(cam_dataframe["ped_type"],cam_dataframe["person_id"]))
        ped_types_all_cams.extend(ped_type_and_person_id)

    ped_types_all_cams = set(ped_types_all_cams)

    ped_types_all_cams = list(map(lambda x: x[0],ped_types_all_cams))
    ped_type_counter = Counter(ped_types_all_cams)

    for key, value in ped_type_counter.items():
        desc = ped_type_id_to_description[key]
        ped_type_frequency = ped_type_frequency.append({"Ped Type Description": desc, "Frequency": value}
                                                       ,ignore_index=True)


    ped_type_frequency = ped_type_frequency[["Ped Type Description", "Frequency"]]
    ped_type_frequency = ped_type_frequency.astype({ "Frequency" : int })
    print(ped_type_frequency.to_latex(index=False))


    os.makedirs(os.path.split(output_path)[0],exist_ok=True)
    ped_type_frequency.to_csv(output_path,index=False)
    print("Saved to path: {}".format(output_path))
def load_ground_truth_dataframes(dataset_folder, working_dir, cam_ids):
    cam_ids.sort()
    # load ground truth data for each cam
    ground_truth_dataframes = []
    for cam_id in cam_ids:
        cam_coords_path = osp.join(dataset_folder, "cam_{}/".format(cam_id), "coords_cam_{}.csv".format(cam_id))

        cam_coords = pandas_loader.load_csv(working_dir, cam_coords_path)

        # In old csv files it was called ped_id than another id was used: person_id
        if "ped_id" in cam_coords.columns:
            person_identifier = "ped_id"
        else:
            person_identifier = "person_id"

        cam_coords = cam_coords.groupby(["frame_no_gta", person_identifier], as_index=False).mean()

        cam_coords = adjustCoordsTypes(cam_coords, person_identifier=person_identifier)

        cam_coords = drop_unnecessary_columns(cam_coords)

        ground_truth_dataframes.append(cam_coords)

    return ground_truth_dataframes
def convert_annotations(gta_dataset_path,
                        coco_gta_dataset_path,
                        work_dirs,
                        cam_number=6,
                        img_dims=(1920, 1080),
                        person_id_name="person_id",
                        samplingRate=41):
    coco_dict = {
        'info': {
            'description': 'GTA_MTMCT',
            'url': 'gtav.',
            'version': '1.0',
            'year': 2019,
            'contributor': 'Philipp Koehl',
            'date_created': '2019/07/16',
        },
        'licences': [{
            'url': 'http://creativecommons.org/licenses/by-nc/2.0',
            'id': 2,
            'name': 'Attribution-NonCommercial License'
        }],
        'images': [],
        'annotations': [],
        'categories': [{
            'supercategory': 'person',
            'id': 1,
            'name': 'person'
        }, {
            'supercategory': 'background',
            'id': 2,
            'name': 'background'
        }]
    }

    coco_gta_dataset_images_path = osp.join(coco_gta_dataset_path, "images")
    os.makedirs(coco_gta_dataset_images_path, exist_ok=True)
    os.makedirs(coco_gta_dataset_path, exist_ok=True)

    for cam_id in range(cam_number):
        print("processing cam_{}".format(cam_id))

        cam_path = os.path.join(gta_dataset_path, "cam_{}".format(cam_id))
        csv_path = osp.join(cam_path, "coords_cam_{}.csv".format(cam_id))

        cam_coords = load_csv(work_dirs, csv_path)
        cam_coords = cam_coords.groupby(["frame_no_gta", person_id_name],
                                        as_index=False).mean()
        cam_frames = cam_coords.groupby("frame_no_gta", as_index=False).mean()

        cam_frames = cam_frames.iloc[::samplingRate]

        pbar = tqdm(total=len(cam_frames))

        def updateTqdm(*a):
            pbar.update()

        pool = mp.Pool(
            processes=10
        )  # use all available cores, otherwise specify the number you want as an argument
        for cam_frame in cam_frames.iterrows():

            frame_no_cam = int(cam_frame[1]["frame_no_cam"])
            frame_no_gta = int(cam_frame[1]["frame_no_gta"])

            cam_coords_frame = cam_coords[cam_coords["frame_no_cam"] ==
                                          frame_no_cam]

            frame_annotations = get_frame_annotation(cam_coords_frame,
                                                     frame_no_gta, img_dims)
            image_name = "image_{}_{}.jpg".format(frame_no_cam, cam_id)
            image_path_gta = osp.join(cam_path, image_name)
            image_path_gta_coco = osp.join(coco_gta_dataset_images_path,
                                           image_name)

            coco_dict['images'].append({
                'license': 4,
                'file_name': image_name,
                'height': img_dims[1],
                'width': img_dims[0],
                'date_captured': '2019-07-28 00:00:00',
                'id': frame_no_gta
            })

            coco_dict['annotations'].extend(frame_annotations)
            pool.apply_async(copyfile,
                             args=(image_path_gta, image_path_gta_coco),
                             callback=updateTqdm)
        pool.close()
        pool.join()

    mmcv.dump(coco_dict, osp.join(coco_gta_dataset_path, "coords.json"))
    return coco_dict