Пример #1
0
def directory_to_data(directory, verbose=True):
    if verbose:
        print("Processing " + directory)
    relative_class_dirs = [x for x in next(os.walk(directory))[1]]
    class_dirs = [directory + "/" + x for x in next(os.walk(directory))[1]]
    class_to_id = {key: value for (value, key) in enumerate(relative_class_dirs)}
    if verbose:
        print(class_to_id)

    id_to_class = {key: value for (key, value) in enumerate(relative_class_dirs)}
    X, y, image_names = list(), list(), list()
    for class_dir in relative_class_dirs:
        class_id = int(class_to_id[class_dir])
        class_dir = directory + "/" + class_dir
        if verbose:
            print("Processing " + class_dir)
        image_files = general_utils.get_all_files(class_dir)
        for image_file in image_files:
            image = cv2.imread(class_dir + "/" + image_file, 0)
            _, _, k_number = image_file[:-4].split("_")
            image_names.append(id_to_class[class_id] + "_karyotyping_" + k_number)
            X.append(image)
            y.append(class_id)

    X = np.asarray(X)
    y = np.asarray(y)

    return X, y, id_to_class, image_names
Пример #2
0
def get_num_data(directory):
    count = 0
    class_dirs = [directory + "/" + x for x in next(os.walk(directory))[1]]
    for class_dir in class_dirs:
        image_files = general_utils.get_all_files(class_dir)
        count += len(image_files)

    return count
Пример #3
0
def frame_files_to_video(frames_path, video_path, fps=24):
    frame_files = general_utils.get_all_files(frames_path,
                                              keep_dir=True,
                                              sort=True)

    frame_array = []

    for frame_file in frame_files:
        # reading each files
        img = cv2.imread(frame_file)
        height, width, layers = img.shape
        size = (width, height)

        # inserting the frames into an image array
        frame_array.append(img)

    out = cv2.VideoWriter(video_path, cv2.VideoWriter_fourcc(*'DIVX'), fps,
                          size)
    for i in range(len(frame_array)):
        # writing to a image array
        out.write(frame_array[i])
    out.release()
Пример #4
0
def detection_by_tracking(
        frame_dir,
        json_file,
        tracker_model,
        detection_threshold=0.9,
        tracking_threshold=0.9,
        save_json_file="data/demo_tracking/detection_by_tracking.tracking_json",
        offset=0,
        low=None,
        high=None,
        step=1,
        parallel=False,
        multithreading=False):
    # Load annotations
    data = json.load(open(json_file, "r"))

    annotations = dict()
    for annotation in data['annotations']:
        if annotation['image_id'] in annotations:
            annotations[annotation['image_id']] += [annotation]
        else:
            annotations[annotation['image_id']] = [annotation]

    # Load frames
    frame_files = general_utils.get_all_files(frame_dir,
                                              keep_dir=True,
                                              sort=True)
    num_frame = len(frame_files)

    tracking_data = dict()
    tracking_data["images"] = data["images"]
    tracking_data["categories"] = data["categories"]
    tracking_data["annotations"] = list()

    if low is None:
        low = -int(1e9)

    if high is None:
        high = int(1e9)

    start = time.time()
    last_count = 0

    # Set up parallel processing
    if parallel:
        mp.set_start_method('spawn', force=True)
        mp.set_sharing_strategy('file_system')

        pool = Pool()
    else:
        pool = None
    results = [None for _ in range(num_frame)]

    # Set up multithreading processing
    if multithreading:
        executor = ThreadPoolExecutor()
    else:
        executor = None

    # Loop over frames
    for frame_id in range(num_frame):
        # Align id
        frame_id += offset

        num_box = len(annotations[frame_id])

        # Count boxes with high confidence
        count = 0
        for box_id in range(num_box):
            score = annotations[frame_id][box_id]["score"]
            if score > detection_threshold:
                count += 1

        # If this frame has more boxes, track from it for certain; else check skip criteria
        if count <= last_count:
            last_count = count

            # Skip frame
            if frame_id % step != 0:
                continue
        else:
            last_count = count

        print("Process frame ", frame_id)

        forward_tracker = build_tracker(tracker_model)
        backward_tracker = build_tracker(tracker_model)

        # Loop over detection boxes
        for box_id in range(num_box):
            # print("=> Process box ", box_id)

            # Filter by detection score
            score = annotations[frame_id][box_id]["score"]
            if score < detection_threshold:
                # print("==> Skip")
                continue

            if multithreading:
                print(
                    f"---> Multithread tracking for box {box_id} frame {frame_id}"
                )
                executor.submit(single_box_in_single_frame_tracking,
                                (frame_files, frame_id, box_id, annotations,
                                 tracking_threshold, forward_tracker,
                                 backward_tracker, offset, low, high))

            if parallel:
                print(
                    f"---> Parallel tracking for box {box_id} frame {frame_id}"
                )
                results[frame_id - offset] = pool.apply_async(
                    single_box_in_single_frame_tracking, [
                        frame_files, frame_id, box_id, annotations,
                        tracking_threshold, forward_tracker, backward_tracker,
                        offset, low, high
                    ])

            if not multithreading and not parallel:
                tracking_data[
                    "annotations"] += single_box_in_single_frame_tracking(
                        frame_files, frame_id, box_id, annotations,
                        tracking_threshold, forward_tracker, backward_tracker,
                        offset, low, high)

    for result in results:
        if result is not None:
            tracking_data["annotations"] += result.get()

    end = time.time()
    print(f"Total time: {(end - start)} s")

    with open(save_json_file, "w") as outfile:
        json.dump(tracking_data, outfile)
Пример #5
0
#     "badWeather/skating": 800,
#     "badWeather/snowFall": 800
# }

if to_gif:
    from util import general_utils
    import imageio
    import numpy as np
    import cv2

    test_dir = args.data_dir

    start_idx = args.start_frame
    concat_axis = 0

    result_files = general_utils.get_all_files("tmp", keep_dir=True)
    result_files = sorted(result_files)

    if view_train:
        num_file = 240
    else:
        num_file = len(result_files)

    frames = list()
    for i in range(num_file):
        original = imageio.imread(
            f"{test_dir}/input/in{'{:06d}'.format(i + start_idx)}.jpg")
        original = cv2.resize(original, (128, 128))

        if view_train:
            foreground = imageio.imread(
Пример #6
0
import numpy as np
import cv2
from tqdm import tqdm

import argparse

parser = argparse.ArgumentParser(description='tracking demo')
parser.add_argument('--raw_dir', type=str, help='path to frame files')
parser.add_argument('--tracking_dir', type=str, help='path to tracking_json')
parser.add_argument('--result_dir',
                    type=str,
                    help='path to save visualized results')
args = parser.parse_args()

raw_detection = general_utils.get_all_files(args.raw_dir,
                                            keep_dir=True,
                                            sort=True)
tracking_detection = general_utils.get_all_files(args.tracking_dir,
                                                 keep_dir=True,
                                                 sort=True)

general_utils.create_directory(args.result_dir)

num_frame = len(raw_detection)

for i in tqdm(range(num_frame)):
    raw = image_utils.read_image(raw_detection[i])
    tracking = image_utils.read_image(tracking_detection[i])

    combined = np.concatenate([raw, tracking], axis=1)
    cv2.imwrite(f"{args.result_dir}/frame_{'{:06d}'.format(i + 1)}.jpg",
Пример #7
0
        U = U + Eta * (X_cat - U)
        V = V + Eta * (torch.pow(X_cat, 2) - V)

        return U, V, N, prob


if __name__ == '__main__':
    eta = 0.01
    k = 3
    c = 3

    """
    baseline/highway
    """
    frame_dir = "/Users/lekhang/Desktop/Khang/data/highway/input"
    frame_files = general_utils.get_all_files(f"{frame_dir}", keep_dir=True)
    frame_files = sorted(frame_files)

    frame_0 = cv2.imread(frame_files[0], 0)
    h, w = frame_0.shape

    U = np.array([np.array(cv2.imread(frame_files[i], 0).flatten()) / 255. for i in range(k)]).T
    U = np.random.rand(*U.shape)  # TODO: set this make the result look very good - why?
    assert U.shape == (h * w, k)
    V = U ** 2
    N = np.ones((h * w, k))

    U2 = torch.from_numpy(np.random.rand(1, c * k, h, w)).float()
    V2 = torch.pow(U2, 2)
    N2 = torch.ones((1, c * k, h, w)).float()