Exemple #1
0
def create_uadetrac():
    from loaders.uadetrac_loader import UADetracLoader
    from eva_storage.external.wnet.chen_wnet_cp.src.data_io.BatchDatsetReader_VOC import BatchDatset
    loader = UADetracLoader()

    images = loader.load_cached_images()
    n_samples = images.shape[0]
    train_images = images[:int(0.8 * n_samples)]
    test_images = images[int(0.8 * n_samples):]

    train_dataset = BatchDatset(
        train_images, True
    )  ## do we really need to do this or can we use uadetrac dataset that we already defined?

    test_dataset = BatchDatset(test_images, False)
    return train_dataset, test_dataset
Exemple #2
0
class Runner:
    def __init__(self):
        self.loader = UADetracLoader()
        self.preprocess = PreprocessingModule()
        self.network = UNet()
        self.cluster = ClusterModule()
        self.index = IndexingModule()

    def run(self):
        """
        Steps:
        1. Load the data
        2. Preprocess the data
        3. Train the network
        4a. Cluster the data
        4b. Postprocess the data
        5a. Generate compressed form
        5b. Generate indexes and preform CBIR
        :return: ???
        """
        import time
        st = time.time()
        # 1. Load the image
        images = self.loader.load_cached_images()
        labels = self.loader.load_cached_labels()
        vehicle_labels = labels['vehicle']
        video_start_indices = self.loader.get_video_start_indices()
        print("Done loading images in", time.time() - st, "(sec)")

        # 2. Begin preprocessing
        st = time.time()
        segmented_images = self.preprocess.run(images, video_start_indices)
        print("Done with background subtraction in", time.time() - st, "(sec)")
        self.preprocess.saveSegmentedImages()

        st = time.time()
        self.network.train(images, segmented_images)
        final_compressed_images, final_segmented_images = self.network.execute(
        )
        print("Done training the main network in", time.time() - st, "(sec)")

        st = time.time()
        cluster_labels = self.cluster.run(final_compressed_images)
        print("Done clustering in", time.time() - st, "(sec)")

        st = time.time()
        self.index.train(images, final_segmented_images, vehicle_labels)
Exemple #3
0
def create_dataset2():
    """
    Concat the original images with output of the JNET to train the network
    :return:
    """

    logger.info("We are loading UADetrac!!")
    cfg = uad
    dataset = UADDetection(transform=SSDAugmentation(cfg['min_dim'], MEANS))
    loader = UADetracLoader()
    images = loader.load_cached_images(name='uad_train_images.npy',
                                       vi_name='uad_train_vi.npy')
    boxes = loader.load_cached_boxes(name='uad_train_boxes.npy')
    labels = loader.load_cached_labels(name='uad_train_labels.npy')

    labels = labels['vehicle']
    images, labels, boxes = loader.filter_input3(images, labels, boxes)

    images2 = loader.load_cached_images(name='jnet_train-200-300.npy',
                                        vi_name='uad_train_vi.npy')
    boxes2 = loader.load_cached_boxes(name='uad_train_boxes.npy')
    labels2 = loader.load_cached_labels(name='uad_train_labels.npy')
    labels2 = labels2['vehicle']
    images2, labels2, boxes2 = loader.filter_input3(images2, labels2, boxes2)

    final_images = np.concatenate((images, images2), axis=0)
    final_boxes = np.concatenate((boxes, boxes2), axis=0)
    final_labels = np.concatenate((labels, labels2), axis=0)

    logger.info(f"original: {images.shape}, {len(labels)}, {len(boxes)}")
    logger.info(
        f"final: {final_images.shape}, {final_boxes.shape}, {final_labels.shape}"
    )

    dataset.set_images(images)
    dataset.set_labels(labels)
    dataset.set_boxes(boxes)

    return dataset, cfg
Exemple #4
0
 def __init__(self):
     self.loader = UADetracLoader()
     self.preprocess = PreprocessingModule()
     self.network = UNet()
     self.cluster = ClusterModule()
     self.index = IndexingModule()
Exemple #5
0
        segmented_images = self.preprocess.run(images, video_start_indices)
        print("Done with background subtraction in", time.time() - st, "(sec)")
        self.preprocess.saveSegmentedImages()

        st = time.time()
        self.network.train(images, segmented_images)
        final_compressed_images, final_segmented_images = self.network.execute(
        )
        print("Done training the main network in", time.time() - st, "(sec)")

        st = time.time()
        cluster_labels = self.cluster.run(final_compressed_images)
        print("Done clustering in", time.time() - st, "(sec)")

        st = time.time()
        self.index.train(images, final_segmented_images, vehicle_labels)


if __name__ == "__main__":
    # 0. Initialize the modules
    loader = UADetracLoader()
    preprocess = PreprocessingModule()
    network = UNet()

    import time
    st = time.time()
    # 1. Load the images (cached images is fine)
    images = loader.load_cached_images()
    labels = loader.load_cached_labels()
    video_start_indices = loader.get_video_start_indices()
Exemple #6
0
import numpy as np
from loaders.uadetrac_loader import UADetracLoader
from logger import Logger
from eva_storage.baselines.indexing.external.ssd.custom_code.train_jnet_ssd_uad import load_jnet_results
import eva_storage.baselines.indexing.external.ssd.custom_code.util_ssd_uad as util_custom

if __name__ == "__main__":
    ## variables we need to change
    save_file_output = 'jnet_test-200-300.npy'
    model_directory = '/nethome/jbang36/eva_jaeho/data/models/history200_dist2thresh300-epoch60.pth'

    os.environ[
        "CUDA_VISIBLE_DEVICES"] = "1"  ## we want to run everything on gpu 1
    DEVICE = train_device = torch.device(
        'cuda')  ## will this execute everything on gpu 1?
    loader = UADetracLoader()
    logger = Logger()

    tic = time.perf_counter()

    ## we load from directories given
    images = loader.load_cached_images(name='uad_test_images.npy',
                                       vi_name='uad_test_vi.npy')
    ## before we divide the dataset, we want to load the segmented images..or process them
    save_directory = os.path.join('/nethome/jbang36/eva_jaeho/data/npy_files',
                                  save_file_output)

    logger.info(f"Starting segmentation with model in {model_directory}")
    tic = time.perf_counter()
    images = load_jnet_results(images, model_directory=model_directory)
Exemple #7
0
        rec = -1.
        prec = -1.
        ap = -1.

    return rec, prec, ap


if __name__ == '__main__':
    # load net
    num_classes = len(labelmap) + 1  # +1 for background
    net = build_ssd('test', 300, num_classes)  # initialize SSD
    net.load_state_dict(torch.load(args.trained_model))
    net.eval()
    print('Finished loading model!')
    # load data
    loader = UADetracLoader()

    images = loader.load_images(
        dir=os.path.join(home_dir, 'data', 'ua_detrac', '5_images'))
    labels, boxes = loader.load_labels(
        dir=os.path.join(home_dir, 'data', 'ua_detrac', '5_xml'))
    labels = labels['vehicle']

    images, labels, boxes = loader.filter_input3(images, labels, boxes)

    dataset = UADDetection(transform=BaseTransform(300, dataset_mean),
                           target_transform=UADAnnotationTransform())
    dataset.set_images(images)
    dataset.set_labels(labels)
    dataset.set_boxes(boxes)
Exemple #8
0
    udf_i = 0
    new_list = potential_frame_indices.copy()
    for filter_i, value in enumerate(potential_frame_indices):
        if value == 1:
            new_list[filter_i] = final_potential_frame_indices[udf_i]
            udf_i += 1
    return new_list



if __name__ == "__main__":

    ### Let's assume you are interested in running a query such as `select FRAMES where FRAME contains VAN`

    ### load the UADetrac dataset
    loaders = UADetracLoader()
    images = loaders.load_cached_images()
    labels = loaders.load_cached_labels()
    boxes = loaders.load_cached_boxes()
    ### generate binary labels for filters
    utils_eva = Utils()
    binary_labels = utils_eva.labels2binaryUAD(labels, ['van']) ## can supply as ['car', 'van', 'bus', 'others'] to get binary labels for all


    ### train the filter -- later we can look into options that save these models
    ##### https://scikit-learn.org/stable/modules/model_persistence.html
    filters = FilterMinimum()
    filters.train(images, binary_labels)

    ### train the UDF
    ### NOTE: to train the UDF (SSD), it is much more convenient to run `python train_ssd_uad.py` -- I organized the code so that it runs out of the box and trains an SSD model that can be used for evaluation
Exemple #9
0
        for i, box in enumerate(boxes_per_frame):
            left, top, right, bottom = box
            new_boxes_per_frame.append(
                (left / image_width, top / image_height, right / image_width,
                 bottom / image_height))
        new_boxes.append(new_boxes_per_frame)

    assert (len(new_boxes) == len(all_boxes))
    for i, boxes_per_frame in enumerate(all_boxes):
        assert (len(boxes_per_frame) == len(new_boxes[i]))

    return new_boxes


if __name__ == "__main__":
    loader = UADetracLoader()
    logger = Logger()
    images_train = loader.load_cached_images()
    labels_train = loader.load_cached_labels()
    boxes_train = loader.load_cached_boxes()

    ### we need to take out frames that don't have labels / boxes
    labels_train = labels_train['vehicle']
    images_train, labels_train, boxes_train = filter_input(
        images_train, labels_train, boxes_train)
    image_width, image_height = images_train.shape[1], images_train.shape[2]

    logger.info("Finished loading the UADetrac Dataset")

    ## we need to create a UA Dataset_lite..
    ## divide the set into train and validation
Exemple #10
0
    ## gpu fix
    import os
    os.environ["CUDA_VISIBLE_DEVICES"] = '1'
    """
    This file is the optimized version of eval_uad
    eval_uad now works"""
    args.trained_model = 'weights/ssd300_JNET_UAD_95000.pth'

    num_classes = len(labelmap) + 1  # +1 for background
    net = build_ssd('test', 300, num_classes)  # initialize SSD
    net.load_state_dict(torch.load(args.trained_model))
    net.eval()
    logger.info(f"Loaded model {args.trained_model}")
    # load data
    loader = UADetracLoader()

    #images = loader.load_images(dir = os.path.join(home_dir, 'data', 'ua_detrac', '5_images'))
    #labels, boxes = loader.load_labels(dir = os.path.join(home_dir, 'data', 'ua_detrac', '5_xml'))

    ### uad test
    #images = loader.load_cached_images(name = 'uad_test_images.npy', vi_name = 'uad_test_vi.npy')
    #labels = loader.load_cached_labels(name = 'uad_test_labels.npy')
    #boxes = loader.load_cached_boxes(name = 'uad_test_boxes.npy')

    ### jnet_test
    images = loader.load_cached_images(name='jnet_test-200-300.npy',
                                       vi_name='uad_test_vi.npy')
    labels = loader.load_cached_labels(name='uad_test_labels.npy')
    boxes = loader.load_cached_boxes(name='uad_test_boxes.npy')
Exemple #11
0
def create_dataset(dataset_name, is_train=None, cache_name=None):
    if dataset_name not in available_datasets.keys():
        logger.error(
            f"dataset: {dataset_name} not in {available_datasets.keys()}")

    if (dataset_name is 'UAD'
            or dataset_name is 'JNET') and (is_train is None):
        logger.error(f"Must specify training or testing for UAD and JNET!")

    if dataset_name == 'COCO':
        if not os.path.exists(COCO_ROOT):
            logger.error('Must specify dataset_root if specifying dataset')
        cfg = coco
        dataset = COCODetection(root=COCO_ROOT,
                                transform=SSDAugmentation(
                                    cfg['min_dim'], MEANS))
    elif dataset_name == 'VOC':
        cfg = voc
        dataset = VOCDetection(root=VOC_ROOT,
                               transform=SSDAugmentation(
                                   cfg['min_dim'], MEANS))

    elif dataset_name == 'UAD' and is_train:
        logger.info("We are loading UADetrac!!")
        cfg = uad
        dataset = UADDetection(
            transform=SSDAugmentation(cfg['min_dim'], MEANS))
        loader = UADetracLoader()
        images = loader.load_cached_images(name='uad_train_images.npy',
                                           vi_name='uad_train_vi.npy')
        boxes = loader.load_cached_boxes(name='uad_train_boxes.npy')
        labels = loader.load_cached_labels(name='uad_train_labels.npy')

        labels = labels['vehicle']
        images, labels, boxes = loader.filter_input3(images, labels, boxes)
        dataset.set_images(images)
        dataset.set_labels(labels)
        dataset.set_boxes(boxes)

    elif dataset_name == 'UAD' and not is_train:
        logger.info("We are loading UADetrac!!")
        cfg = uad
        dataset = UADDetection(
            transform=SSDAugmentation(cfg['min_dim'], MEANS))
        loader = UADetracLoader()
        images = loader.load_cached_images(name='uad_test_images.npy',
                                           vi_name='uad_test_vi.npy')
        boxes = loader.load_cached_boxes(name='uad_test_boxes.npy')
        labels = loader.load_cached_labels(name='uad_test_labels.npy')
        labels = labels['vehicle']
        images, labels, boxes = loader.filter_input3(images, labels, boxes)
        images = images[:4000]
        labels = labels[:4000]
        boxes = boxes[:4000]

        dataset.set_images(images)
        dataset.set_labels(labels)
        dataset.set_boxes(boxes)

    elif dataset_name == 'JNET' and is_train:
        if cache_name is None:
            logger.error("Cache name is required for JNET!! returning...")
            return None
        logger.info("We are loading JNET - UADetrac!!")
        cfg = uad
        dataset = UADDetection(
            transform=SSDAugmentation(cfg['min_dim'], MEANS))
        loader = UADetracLoader()
        images = loader.load_cached_images(name=cache_name,
                                           vi_name='uad_train_vi.npy')
        labels = loader.load_cached_labels(name='uad_train_labels.npy')
        boxes = loader.load_cached_boxes(name='uad_train_boxes.npy')
        labels = labels['vehicle']

        images, labels, boxes = loader.filter_input3(images, labels, boxes)

        logger.info(f"images shape is {images.shape}")
        logger.info(f"labels length is {len(labels)}")
        logger.info(f"boxes length is {len(boxes)}")
        assert (images.shape[0] == len(labels))
        assert (len(labels) == len(boxes))
        dataset.set_images(images)
        dataset.set_labels(labels)
        dataset.set_boxes(boxes)

    elif dataset_name == 'JNET' and not is_train:
        if cache_name is None:
            logger.error("Cache name is required for JNET! returning....")
            return
        logger.info("We are loading JNET - UADetrac!!")
        cfg = uad
        dataset = UADDetection(
            transform=SSDAugmentation(cfg['min_dim'], MEANS))
        loader = UADetracLoader()
        images = loader.load_cached_images(name=cache_name,
                                           vi_name='uad_test_vi.npy')
        labels = loader.load_cached_labels(name='uad_test_labels.npy')
        boxes = loader.load_cached_boxes(name='uad_test_boxes.npy')
        labels = labels['vehicle']
        images, labels, boxes = loader.filter_input3(images, labels, boxes)

        ###FIXED: we will make this really small so that numbers appear fast
        images = images[:2000]
        labels = labels[:2000]
        boxes = boxes[:2000]

        logger.info(f"images shape is {images.shape}")
        logger.info(f"labels length is {len(labels)}")
        logger.info(f"boxes length is {len(boxes)}")
        dataset.set_images(images)
        dataset.set_labels(labels)
        dataset.set_boxes(boxes)

    return dataset, cfg
Exemple #12
0
        logger.info(f"images shape is {images.shape}")
        logger.info(f"labels length is {len(labels)}")
        logger.info(f"boxes length is {len(boxes)}")
        dataset.set_images(images)
        dataset.set_labels(labels)
        dataset.set_boxes(boxes)

    return dataset, cfg


if __name__ == "__main__":
    ### let's save some things
    ### save the images, labels, boxes for all test and train
    logger.info("starting.....")
    loader = UADetracLoader()
    """
    images = loader.load_images(dir='/nethome/jbang36/eva_jaeho/data/ua_detrac/4_images')
    labels, boxes = loader.load_labels('/nethome/jbang36/eva_jaeho/data/ua_detrac/4_xml')
    assert(len(images) == len(boxes))
    loader.save_images(name = 'uad_train_images.npy', vi_name='uad_train_vi.npy')
    loader.save_labels(name = 'uad_train_labels.npy')
    loader.save_boxes(name = 'uad_train_boxes.npy')

    logger.info("Saved all train data!")
    """
    test_images = loader.load_images(
        dir='/nethome/jbang36/eva_jaeho/data/ua_detrac/5_images')
    test_labels, test_boxes = loader.load_labels(
        '/nethome/jbang36/eva_jaeho/data/ua_detrac/5_xml')
    assert (len(test_images) == len(test_boxes))
Exemple #13
0
    iou_threshold = 0.5

    use_2007_metric = True
    ap = ssd_utils_custom.compute_average_precision_class_agnostic(
        true_case_stat, all_gb_boxes, all_difficult_cases, class_names,
        iou_threshold, use_2007_metric)

    logger.info(
        f"\nAverage Precision Class Agnostic Method: {sum(aps)/len(aps)}")
    logger.info(f"Used iou threshold of {iou_threshold}")

    return sum(aps) / len(aps)


if __name__ == "__main__":
    loader = UADetracLoader()
    logger = Logger()
    logger.info("Starting evaluation........")

    home_dir = '/home/jbang36/eva_jaeho'

    images_test = loader.load_images(
        dir=os.path.join(home_dir, 'data', 'ua_detrac', '5_images'))
    labels_test, boxes_test = loader.load_labels(
        dir=os.path.join(home_dir, 'data', 'ua_detrac', '5_xml'))
    labels_test = labels_test['vehicle']

    ### we need to take out frames that don't have labels / boxes
    images_test, labels_test, boxes_test = ssd_utils_custom.filter_input(
        images_test, labels_test, boxes_test)