コード例 #1
0
def to_cpu(checkpoint):
    """
    """
    try:
        retinanet = resnet50(13)
        retinanet = nn.DataParallel(retinanet, device_ids=[0, 1, 2, 3])
        retinanet.load_state_dict(torch.load(checkpoint))
    except:
        retinanet = resnet34(13)
        retinanet = nn.DataParallel(retinanet, device_ids=[0, 1, 2, 3])
        retinanet.load_state_dict(torch.load(checkpoint))

    retinanet = nn.DataParallel(retinanet, device_ids=[0])
    retinanet = retinanet.cpu()

    new_state_dict = {}
    for key in retinanet.state_dict():
        new_state_dict[key.split("module.")[-1]] = retinanet.state_dict()[key]

    torch.save(new_state_dict, "cpu_{}".format(checkpoint))
    print("Successfully created: cpu_{}".format(checkpoint))
コード例 #2
0
    # enable CUDA
    use_cuda = torch.cuda.is_available()
    device = torch.device("cuda:0" if use_cuda else "cpu")

    # load localizer
    loc_cp = os.path.join(os.getcwd(), "config", "localizer_state_dict.pt")
    localizer = resnet34(13)
    localizer.load_state_dict(torch.load(loc_cp))
    localizer = localizer.to(device)
    localizer.eval()
    localizer.training = False

    # load detector
    det_cp = os.path.join(os.getcwd(), "config", "detector_state_dict.pt")
    detector = resnet50(13)
    detector.load_state_dict(torch.load(det_cp))
    detector = detector.to(device)
    detector.eval()
    detector.training = False

    # load filter
    filter_params = os.path.join(os.getcwd(), "config",
                                 "filter_params_tuned.cpkl")

    if not LOCALIZE:
        localizer = None

    with open(filter_params, "rb") as f:
        kf_params = pickle.load(f)
        # these adjustments make the filter a bit less laggy
コード例 #3
0
def track_sequence(input_file,
                   output_directory,
                   config_file,
                   log_file,
                   com_queue=None,
                   config="DEFAULT",
                   worker_id=0,
                   com_rate=15):
    """
    Tracks a video sequence according to the parameters specified in config_file
    
    Parameters
    ----------
    config_file: string
        Path to configuration file with parameter settings for tracker.
    input file: string
        Path to video sequence to be tracked
    output_file: string
        Path to directory where tracking outputs should be written
    log_file : string
        Path to log file 
    com_queue: multiprocessing Queue, (optional)
        Queue for communicating with manager process in a multiprocess scenario
    config : string, (optional)
        Specifies which camera configuration within config file should be used
    worker_id int
        Specifies worker ID assigned by manager process if any
    device : torch.device (optional)
        Specifies which GPU should be used
    """

    # write to queue that worker has started
    if com_queue is not None:
        start = time.time()
        key = "DEBUG"
        message = "Worker {} (PID {}) is executing".format(
            worker_id, os.getpid())
        com_queue.put((start, key, message, worker_id))

    # 1. parse config file -use default if named config is not available
    configs = parse_config_file(config_file)
    for configuration in configs:
        if configuration["name"] == config:
            break
        elif configuration["name"] == "DEFAULT":
            default_configuration = configuration
        else:
            configuration = None
    if configuration is None:
        configuration = default_configuration

    if com_queue is not None:
        start = time.time()
        key = "INFO"
        message = "Worker {} (PID {}) using configuration {}".format(
            worker_id, os.getpid(), configuration["name"])
        com_queue.put((start, key, message, worker_id))
    try:
        # 2. initialize tracker with parsed parameters
        # enable CUDA
        use_cuda = torch.cuda.is_available()
        device = torch.device(worker_id if use_cuda else "cpu")

        # to deal with pesky code that uses .cuda() instead of to(device)
        torch.cuda.set_device(worker_id)

        det_step = configuration["det_step"]
        skip_step = configuration["skip_step"]

        # load kf_params
        with open(configuration["kf_parameter_path"], "rb") as f:
            kf_params = pickle.load(f)
            # these adjustments make the filter a bit less laggy
            kf_params["R"] /= 20
            kf_params["R2"] /= 500

        # load class_dict
        with open(configuration["class_dict_path"], "rb") as f:
            class_dict = pickle.load(f)

        # load detector
        det_cp = configuration["detector_parameters"]
        detector = resnet50(configuration["num_classes"], device_id=worker_id)
        detector.load_state_dict(torch.load(det_cp))
        detector = detector.to(device)
        detector.eval()
        detector.training = False
        detector.freeze_bn()

        # load localizer
        if configuration["localize"]:
            loc_cp = configuration["localizer_parameters"]
            localizer = resnet34(configuration["num_classes"],
                                 device_id=worker_id)
            localizer.load_state_dict(torch.load(loc_cp))
            localizer = localizer.to(device)
            localizer.eval()
            localizer.training = False
        else:
            localizer = None

        if com_queue is not None:
            d1 = detector.regressionModel.conv1.weight.device
            ts = time.time()
            key = "DEBUG"
            message = "Worker {} (PID {}): Detector on device {}".format(
                worker_id, os.getpid(), d1)
            com_queue.put((ts, key, message, worker_id))

        if com_queue is not None and localizer is not None:
            d2 = localizer.regressionModel.conv1.weight.device
            ts = time.time()
            key = "DEBUG"
            message = "Worker {} (PID {}): Localizer on device {}.".format(
                worker_id, os.getpid(), d2)
            com_queue.put((ts, key, message, worker_id))

    except Exception as e:
        ts = time.time()
        key = "ERROR"
        message = "Worker {} (PID {}) {} error: {}".format(
            worker_id, os.getpid(), type(e), e)
        if com_queue is not None:
            com_queue.put((ts, key, message, worker_id))

    # load other params
    init_frames = configuration["init_frames"]
    fsld_max = configuration["fsld_max"]
    matching_cutoff = configuration["matching_cutoff"]
    ber = configuration["box_expansion_ratio"]
    iou_cutoff = configuration["iou_cutoff"]
    det_conf_cutoff = configuration["det_conf_cutoff"]
    SHOW = configuration["show_tracking"]
    output_video_path = configuration["output_video_path"]

    if output_video_path is not None:
        output_video_path = output_directory.split(
            "tracking_outputs")[0] + "video_outputs/{}_output".format(
                input_file.split("/")[-1].split(".mp4")[0])
        try:
            os.mkdir(output_video_path)
        except FileExistsError:
            pass
        if com_queue is not None:
            ts = time.time()
            key = "DEBUG"
            message = "Worker {} (PID {}) will write output frames to file: {}".format(
                worker_id, os.getpid(), output_video_path)
            com_queue.put((ts, key, message, worker_id))

    checksum_path = configuration["checksum_path"]
    geom_path = configuration["geom_path"]
    transform_path = configuration["transform_path"]
    # make a new output directory if not yet initialized
    try:
        os.mkdir(output_directory)
    except FileExistsError:
        pass

    try:
        tracker = Localization_Tracker(input_file,
                                       detector,
                                       localizer,
                                       kf_params,
                                       class_dict,
                                       device_id=worker_id,
                                       det_step=det_step,
                                       init_frames=init_frames,
                                       ber=ber,
                                       det_conf_cutoff=det_conf_cutoff,
                                       fsld_max=fsld_max,
                                       matching_cutoff=matching_cutoff,
                                       iou_cutoff=iou_cutoff,
                                       PLOT=SHOW,
                                       OUT=output_video_path,
                                       skip_step=skip_step,
                                       checksum_path=checksum_path,
                                       geom_path=geom_path,
                                       output_dir=output_directory,
                                       transform_path=transform_path,
                                       com_queue=com_queue,
                                       com_rate=com_rate)
        #3. track
        if com_queue is not None:
            # write to queue that worker has finished
            end = time.time()
            key = "DEBUG"
            message = "Worker {} (PID {}) starting tracking.".format(
                worker_id, os.getpid())
            com_queue.put((end, key, message, worker_id))

        tracker.track()

        if com_queue is not None:
            # write to queue that worker has finished
            end = time.time()
            key = "DEBUG"
            message = "Worker {} (PID {}) finished tracking. Writing results now.".format(
                worker_id, os.getpid())
            com_queue.put((end, key, message, worker_id))
            write_start_time = end
        tracker.write_results_csv()

        if com_queue is not None and output_video_path is not None:
            # write to queue that worker has finished
            end = time.time()
            key = "DEBUG"
            message = "Worker {} (PID {}) finished writing results. Took {} seconds. Condensing frames into video now.".format(
                worker_id, os.getpid(),
                time.time() - write_start_time)
            com_queue.put((end, key, message, worker_id))
        elif com_queue is not None:
            # write to queue that worker has finished
            end = time.time()
            key = "DEBUG"
            message = "Worker {} (PID {}) finished writing results. Took {} seconds.".format(
                worker_id, os.getpid(),
                time.time() - write_start_time)
            com_queue.put((end, key, message, worker_id))

        if output_video_path is not None:
            im_to_vid(output_video_path, DELETE_FRAMES=True)

        if com_queue is not None:
            # write to queue that worker has finished
            end = time.time()
            key = "WORKER_END"
            message = "Worker {} (PID {}) is done executing".format(
                worker_id, os.getpid())
            com_queue.put((end, key, message, worker_id))

    except Exception as e:
        if com_queue is not None:
            end = time.time()
            key = "ERROR"
            message = "Worker {} (PID {}) error {} during tracking.".format(
                worker_id, os.getpid(), e)
            com_queue.put((end, key, message, worker_id))
コード例 #4
0
    # Paths to data here
    label_dir       = "/home/worklab/Data/cv/Detrac/DETRAC-Train-Annotations-XML-v3"
    train_partition = "/home/worklab/Data/cv/Detrac/detrac_train_partition"
    val_partition   = "/home/worklab/Data/cv/Detrac/detrac_val_partition"

    ###########################################################################


    # Create the model
    if depth == 18:
        retinanet = model.resnet18(num_classes=num_classes, pretrained=True)
    elif depth == 34:
        retinanet = model.resnet34(num_classes=num_classes, pretrained=True)
    elif depth == 50:
        retinanet = model.resnet50(num_classes=num_classes, pretrained=True)
    elif depth == 101:
        retinanet = model.resnet101(num_classes=num_classes, pretrained=True)
    elif depth == 152:
        retinanet = model.resnet152(num_classes=num_classes, pretrained=True)
    else:
        raise ValueError('Unsupported model depth, must be one of 18, 34, 50, 101, 152')

    # create dataloaders
    try:
        train_data
    except:
        # datasets here defined for UA Detrac Dataset
        train_data = LocMulti_Dataset(train_partition,label_dir,cs = 112)
        val_data = LocMulti_Dataset(val_partition,label_dir,cs = 112)
        
コード例 #5
0
          n_iterations=20000,
          save_file=INIT,
          speed_init="smooth",
          state_size=8)

    #%% fit measurement error

    with open(INIT, "rb") as f:
        kf_params = pickle.load(f)

    use_cuda = torch.cuda.is_available()
    device = torch.device("cuda:0" if use_cuda else "cpu")

    cp = os.path.join(os.getcwd(), "config", "localizer_state_dict.pt")
    cp = "cpu_detrac_localizer112_retinanet_epoch30.pt"
    retinanet = resnet50(13)
    retinanet.load_state_dict(torch.load(cp))
    retinanet = retinanet.to(device)
    retinanet.eval()
    retinanet.training = False

    vecs = fit_localizer_R(loader,
                           kf_params,
                           device,
                           retinanet,
                           bers=[1],
                           save_file=INIT,
                           n_iterations=200,
                           skew_ratio=0,
                           PLOT=True,
                           wer=1.25,
コード例 #6
0
                8: 'MiniVan',
                9: 'Truck-Box-Med',
                10: 'Truck-Util',
                11: 'Truck-Pickup',
                12: 'Truck-Flatbed',
            }

            # get filter
            filter_state_path = "./config/filter_params_tuned.cpkl"

            with open(filter_state_path, "rb") as f:
                kf_params = pickle.load(f)
                kf_params["R"] /= 100

            # get localizer
            localizer = resnet50(num_classes=13)
            cp = torch.load(loc_cp)
            localizer.load_state_dict(cp)

            # no localization update!
            if not LOCALIZE:
                localizer = None

            # get detector
            detector = resnet50(num_classes=13)
            try:
                detector.load_state_dict(torch.load(det_cp))
            except:
                temp = torch.load(det_cp)["model_state_dict"]
                new = {}
                for key in temp: