Esempio n. 1
0
    def __init__(self, siamese_model_path, unet_path, dataset_path, use_cuda,
                 new_w, new_h):
        self.dataset_path = dataset_path
        self.unet_path = unet_path
        self.new_w = new_w
        self.new_h = new_h

        self.tracker = TrackerSiamFC(net_path=siamese_model_path,
                                     use_cuda=use_cuda)
        if unet_path is not None:
            print("Loading pretrained model")
            self.seg_net, pretrained = self.load_unet(), True
        else:
            print("Did not load pretrained model")
            self.seg_net, pretrained = None, False

        self.train_images = None
        self.tracks = {}
        self.track_count = 0
        self.result = []
        self.set_01, self.set_02 = None, None
Esempio n. 2
0
if __name__ == '__main__':
    # setup dataset
    root_dir = 'data/GOT-10k'
    seq_dataset = got10k(root_dir, subset='train')
    pair_dataset = Pairwise(seq_dataset)

    # setup data loader
    # cuda = torch.cuda.is_available()
    loader = DataLoader(pair_dataset,
                        batch_size=8,
                        shuffle=True,
                        drop_last=True,
                        num_workers=2)

    # setup tracker
    tracker = TrackerSiamFC()

    # path for saving checkpoints
    net_dir = 'pretrained/siamfc_new'
    if not os.path.exists(net_dir):
        os.makedirs(net_dir)

    # training loop
    epoch_num = 50
    for epoch in range(epoch_num):
        for step, batch in enumerate(loader):
            loss = tracker.step(batch, backward=True, update_lr=(step == 0))
            if step % 20 == 0:
                print('Epoch [{}][{}/{}]: Loss: {:.3f}'.format(
                    epoch + 1, step + 1, len(loader), loss))
                sys.stdout.flush()
Esempio n. 3
0
    'Bolt2', 'Boy', 'Car2', 'Car24', 'Coke', 'Coupon', 'Crossing', 'Dancer',
    'Dancer2', 'David2', 'David3', 'Dog', 'Dog1', 'Doll', 'FaceOcc1',
    'FaceOcc2', 'Fish', 'FleetFace', 'Football1', 'Freeman1', 'Freeman3',
    'Girl2', 'Gym', 'Human2', 'Human5', 'Human7', 'Human8', 'Jogging',
    'KiteSurf', 'Lemming', 'Man', 'Mhyang', 'MountainBike', 'Rubik', 'Singer1',
    'Skater', 'Skater2', 'Subway', 'Suv', 'Tiger1', 'Toy', 'Trans',
    'Twinnings', 'Vase'
]

if __name__ == '__main__':
    nbins_iou = 21
    nbins_ce = 51
    video_path = 'E:\\xxx\\OTB2015\\Bolt'
    img_files = sorted(glob.glob(os.path.join(video_path, 'img/*.jpg')))
    anno_files = glob.glob(os.path.join(video_path, 'groundtruth_rect*.txt'))
    with open(anno_files[0], 'r') as f:
        anno = np.loadtxt(io.StringIO(f.read().replace(',', ' ')))

    net_path = './pretrained/model.pth'
    tracker = TrackerSiamFC(net_path=net_path)
    boxes, _, fps = tracker.track(img_files,
                                  anno[0, :],
                                  visualize=True,
                                  debug=False,
                                  gt=anno)
    ious, center_errors = _calc_metrics(boxes, anno)
    succ_curve, prec_curve = _calc_curves(ious, center_errors)
    print('OP is {:.3f},DP is {:.3f},AUC is {:.3f},fps is {:.3f}'.format(
        len(ious[ious > 0.5]) / len(ious), prec_curve[20], np.mean(succ_curve),
        fps))
Esempio n. 4
0
from __future__ import absolute_import

import os
from got10k.experiments import *

from siamfc import TrackerSiamFC


if __name__ == '__main__':
    net_path = 'models/siamfc_alexnet_e50.pth'# 
    tracker = TrackerSiamFC(net_path=net_path) #初始化一个追踪器

    # root_dir = os.path.abspath('datasets/OTB')
    # e = ExperimentOTB(root_dir, version=2013)

    root_dir = os.path.abspath('datasets/OTB')
    e = ExperimentOTB(root_dir, version=2015)

    # root_dir = os.path.abspath('datasets/UAV123')
    # e = ExperimentUAV123(root_dir, version='UAV123')

    # root_dir = os.path.abspath('datasets/UAV123')
    # e = ExperimentUAV123(root_dir, version='UAV20L')

    # root_dir = os.path.abspath('datasets/DTB70')
    # e = ExperimentDTB70(root_dir)

    # root_dir = os.path.abspath('datasets/UAVDT')
    # e = ExperimentUAVDT(root_dir)

    # root_dir = os.path.abspath('datasets/VisDrone')
Esempio n. 5
0
from siamfc import TrackerSiamFC
from config import config
import os
import glob
import numpy as np

if __name__ == '__main__':

    # setup the tracker to access the pre-trained model
    folder_path = 'model'
    results = 'results'
    reports = 'reports'
    model = np.sort(glob.glob(os.path.join(folder_path, "*.pth")))

    for i in model:
        model_name = os.path.splitext(os.path.basename(i))[0]

        results_path_bbox = os.path.join(results, model_name)
        reports_path_graph = os.path.join(reports, model_name)

        tracker_test = TrackerSiamFC(net_path=i)
        experiments = ExperimentOTB(config.OTB_dataset_directoty,
                                    version=2015,
                                    result_dir=results_path_bbox,
                                    report_dir=reports_path_graph)

        # run the experiments for tracking to report the performance
        experiments.run(tracker_test, visualize=False)
        experiments.report([tracker_test.name])
Esempio n. 6
0
def main(args):
    print("Initialising Tracker and Segmentation network")
    img_shape = get_img_size(args.dataset_path, args.sequence)
    track_model = TrackerSiamFC(net_path=args.siamese_path,
                                use_cuda=args.use_cuda)
    seg_model = unet_model(img_shape, args.unet_path)

    imgs = load_images(args.dataset_path, args.sequence, args.img_extension)
    sorted_img_indx = sorted(imgs.keys(), key=natural_keys)
    if args.debug or args.debug_v2:
        if args.debug:
            sorted_img_indx = sorted_img_indx[
                int(len(sorted_img_indx) /
                    2):int(len(sorted_img_indx) / 2) + 3]
        else:
            sorted_img_indx = sorted_img_indx[:2]
        print("Debugging conditions set")
    frames = dict()

    if args.save_model_preds_to_file is not None:
        print("Creating '{}' to save model predictions".format(
            args.save_model_preds_to_file))
        args.save_model_preds_to_file = os.path.join(
            args.dataset_path, args.save_model_preds_to_file)
        prepare_dir(args.save_model_preds_to_file)

    if args.load_model_preds_from_file is not None:
        args.load_model_preds_from_file = os.path.join(
            args.dataset_path, args.load_model_preds_from_file)

    print("Getting segmentation")
    if args.get_initial_seg_from_lux:
        dataset_name = args.dataset_path.split('/')[-1] if not args.dataset_path[-1] == '/' else \
                       args.dataset_path.split('/')[
                           -2]
        print("Getting initial segmentation for {}".format(dataset_name))
        predict_dataset(
            name=dataset_name,
            sequence="0{}".format(args.sequence),
            model_path='model_weights/unet_{}.h5'.format(dataset_name),
            output_path="mulux_0{}".format(args.sequence))
        predict_dataset_2(path="mulux_0{}".format(args.sequence),
                          output_path="mulux_0{}".format(args.sequence))

        for img_indx_id, img_name in enumerate(
                sorted(glob.glob("mulux_0{}/mask*.{}".format(
                    args.sequence, args.img_extension)),
                       key=natural_keys)):
            frames[sorted_img_indx[img_indx_id]] = get_frame(
                cv2.imread(img_name, cv2.IMREAD_ANYDEPTH))
        # shutil.rmtree("mulux_0{}".format(args.sequence), ignore_errors=True)
    elif args.load_segs_from_file is None:
        for img_id, img_indx in enumerate(sorted_img_indx):
            if args.load_model_preds_from_file is not None:
                img_path = os.path.join(
                    args.load_model_preds_from_file,
                    "{}{}.tif".format(args.preds_extension, img_indx))
                pred = cv2.imread(img_path, cv2.IMREAD_ANYDEPTH)
                if pred is None or (pred is not None and np.sum(pred) == 0):
                    print(img_path)
                    assert pred is not None
            else:
                pred = get_segmentation(imgs[img_indx], seg_model)
            if args.load_preds_as_are:
                frames[img_indx] = pred
            else:
                frames[img_indx] = get_frame(get_labels(pred))
            if args.save_model_preds_to_file is not None:
                img_path_name = os.path.join(args.save_model_preds_to_file,
                                             "{}.tif".format(img_indx))
                cv2.imwrite(img_path_name, pred.astype(np.uint8))
    else:
        load_segs_dir = os.path.join(args.dataset_path,
                                     args.load_segs_from_file)
        imgs_list = sorted(make_list_of_imgs_only(os.listdir(load_segs_dir),
                                                  args.img_extension),
                           key=natural_keys)
        print("Reading segs from: {}".format(load_segs_dir))
        for img_indx_id, img_name in enumerate(imgs_list):
            img_path = os.path.join(load_segs_dir, img_name)
            frames[sorted_img_indx[img_indx_id]] = get_frame(
                cv2.imread(img_path, cv2.IMREAD_ANYDEPTH))

    if args.save_segs_to_file is not None:
        save_segs_dir = os.path.join(args.dataset_path, args.save_segs_to_file)
        prepare_dir(save_segs_dir)
        print("Saving segs to: {}".format(save_segs_dir))
        for img_id, img_indx in enumerate(sorted_img_indx):
            img_path_name = os.path.join(
                save_segs_dir, "{}.{}".format(img_indx, args.img_extension))
            cv2.imwrite(img_path_name, frames[img_indx].astype(np.uint8))
    exit()
    do_siamese_tracking(frames, imgs, sorted_img_indx, track_model, args)
Esempio n. 7
0
def main(mode='IR', visulization=False):
    assert mode in ['IR', 'RGB'], 'Only Support IR or RGB to evalute'
    # setup tracker
    net_path = 'model.pth'
    tracker = TrackerSiamFC(net_path=net_path)

    # setup experiments
    video_paths = glob.glob(os.path.join('dataset', 'test-dev', '*'))
    video_num = len(video_paths)
    output_dir = os.path.join('results', tracker.name)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    overall_performance = []

    # run tracking experiments and report performance
    for video_id, video_path in enumerate(video_paths, start=1):
        video_name = os.path.basename(video_path)
        video_file = os.path.join(video_path, '%s.mp4' % mode)
        res_file = os.path.join(video_path, '%s_label.json' % mode)
        with open(res_file, 'r') as f:
            label_res = json.load(f)

        init_rect = label_res['gt_rect'][0]
        capture = cv2.VideoCapture(video_file)

        frame_id = 0
        out_res = []
        while True:
            ret, frame = capture.read()
            if not ret:
                capture.release()
                break
            if frame_id == 0:
                tracker.init(frame, init_rect)  # initialization
                out = init_rect
                out_res.append(init_rect)
            else:
                out = tracker.update(frame)  # tracking
                out_res.append(out.tolist())
            if visulization:
                _gt = label_res['gt_rect'][frame_id]
                _exist = label_res['exist'][frame_id]
                if _exist:
                    cv2.rectangle(frame, (int(_gt[0]), int(_gt[1])),
                                  (int(_gt[0] + _gt[2]), int(_gt[1] + _gt[3])),
                                  (0, 255, 0))
                cv2.putText(frame, 'exist' if _exist else 'not exist',
                            (frame.shape[1] // 2 - 20, 30), 1, 2,
                            (0, 255, 0) if _exist else (0, 0, 255), 2)

                cv2.rectangle(frame, (int(out[0]), int(out[1])),
                              (int(out[0] + out[2]), int(out[1] + out[3])),
                              (0, 255, 255))
                cv2.imshow(video_name, frame)
                cv2.waitKey(1)
            frame_id += 1
        if visulization:
            cv2.destroyAllWindows()
        # save result
        output_file = os.path.join(output_dir,
                                   '%s_%s.txt' % (video_name, mode))
        with open(output_file, 'w') as f:
            json.dump({'res': out_res}, f)

        mixed_measure = eval(out_res, label_res)
        overall_performance.append(mixed_measure)
        print('[%03d/%03d] %20s %5s Fixed Measure: %.03f' %
              (video_id, video_num, video_name, mode, mixed_measure))

    print('[Overall] %5s Mixed Measure: %.03f\n' %
          (mode, np.mean(overall_performance)))
Esempio n. 8
0
from __future__ import absolute_import

from got10k.experiments import *

from siamfc import TrackerSiamFC

from options import TestOptions

if __name__ == '__main__':
    opt = TestOptions().parse()

    # setup tracker
    net_path = 'pretrained/siamfc/model.pth'
    tracker = TrackerSiamFC(name=opt.name,
                            weight=opt.weight,
                            device=opt.device)

    # setup experiments
    experiments = []
    for i in range(len(opt.exps)):
        if opt.exps[i] in ['otb2013', 'OTB2013', 'OTB-2013']:
            experiments.append(ExperimentOTB('data/OTB', version=2013))
        elif opt.exps[i] in ['otb2015', 'OTB2015', 'OTB-2015']:
            experiments.append(ExperimentOTB('data/OTB', version=2015))
        elif opt.exps[i] in ['vot2018', 'VOT2018', 'VOT-2018']:
            experiments.append(ExperimentVOT('data/vot2018', version=2018))
        elif opt.exps[i] in ['got10k', 'GOT10k', 'GOT-10k']:
            experiments.append(ExperimentGOT10k('data/GOT-10k', subset='test'))
        else:
            raise NotImplementederror
Esempio n. 9
0
                                      subset=('train', 'val'))
        pair_dataset = Pairwise(seq_got_dataset) + Pairwise(seq_vid_dataset)

    print(len(pair_dataset))

    # setup data loader
    cuda = torch.cuda.is_available()
    loader = DataLoader(pair_dataset,
                        batch_size=config.batch_size,
                        shuffle=True,
                        pin_memory=cuda,
                        drop_last=True,
                        num_workers=config.num_workers)

    # setup tracker
    tracker = TrackerSiamFC()

    # training loop
    for epoch in range(config.epoch_num):
        for step, batch in enumerate(loader):

            loss = tracker.step(batch, backward=True, update_lr=(step == 0))

            if step % config.show_step == 0:
                print('Epoch [{}][{}/{}]: Loss: {:.3f}'.format(
                    epoch + 1, step + 1, len(loader), loss))
                sys.stdout.flush()

        # save checkpoint
        net_path = os.path.join('model', 'model_e%d.pth' % (epoch + 1))
        torch.save(tracker.net.state_dict(), net_path)
Esempio n. 10
0
def main(dataset, data_time, detector):

    path_result = os.path.join('results', data_time + '_' + detector, dataset)
    os.makedirs(path_result, exist_ok=True)

    # initialize detector
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model = torchvision.models.detection.fasterrcnn_resnet50_fpn(
        pretrained=True)
    # model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=True)
    model.to(device=device)
    model.eval()

    # load background
    img_bkgd_bev = cv2.imread('calibration/' + dataset +
                              '_background_calibrated.png')
    # load transformation matrix
    transform_cam2world = np.loadtxt('calibration/' + dataset +
                                     '_matrix_cam2world.txt')

    # open video of dataset
    if dataset == 'oxford_town':
        cap = cv2.VideoCapture(os.path.join('datasets', 'TownCentreXVID.avi'))
        frame_skip = 10  # oxford town dataset has fps of 25
        thr_score = 0.9
    elif dataset == 'oxford_town_group':
        cap = cv2.VideoCapture(os.path.join('datasets', 'TownCentreXVID.avi'))
        path_track_frames = os.path.join(os.getcwd(), 'datasets',
                                         'dataset_tracks', 'TownCentre',
                                         'img1')
        frame_images = sorted(glob.glob(path_track_frames + '\****.jpg'))
        net_path = os.path.join(os.getcwd(), 'tracker', 'siamfc_pytorch',
                                'tools', 'pretrained\siamfc_alexnet_e50.pth')
        tracker = TrackerSiamFC(net_path=net_path)
        frame_skip = 10  # oxford town dataset has fps of 25
        thr_score = 0.9
    elif dataset == 'mall':
        cap = cv2.VideoCapture(os.path.join('datasets', 'mall.mp4'))
        frame_skip = 1
        thr_score = 0.9
    elif dataset == 'grand_central':
        cap = cv2.VideoCapture(os.path.join('datasets', 'grandcentral.avi'))
        frame_skip = 25  # grand central dataset has fps of 25
        thr_score = 0.5
    else:
        raise Exception('Invalid Dataset')

    # f = open(os.path.join(path_result, 'statistics.txt'), 'w')

    statistic_data = []
    i_frame = 0
    # while cap.isOpened() and i_frame < 5000:
    while cap.isOpened() and i_frame <= 7450:
        ret, img = cap.read()
        print("at frame " + str(i_frame) + "------")
        if ret is False:
            break
        if i_frame % frame_skip == 0:  #only run the social distancing system every 10 frames.
            #ret, img = cap.read()
            # print('Frame %d - ' % i_frame)
            # if i_frame > 50:
            #     break

            # skip frames to achieve 1hz detection
            # if not i_frame % frame_skip == 0:  # conduct detection per second
            #     i_frame += 1
            #     continue

            #vis = True
            if i_frame <= 3000:
                # if i_frame / frame_skip < 20:
                vis = True
            else:
                vis = False

            # counting process time
            t0 = time.time()

            # convert image from OpenCV format to PyTorch tensor format
            img_t = np.moveaxis(img, -1, 0) / 255
            img_t = torch.tensor(img_t, device=device).float()

            # pedestrian detection
            predictions = model([img_t])
            boxes = predictions[0]['boxes'].cpu().data.numpy()
            classIDs = predictions[0]['labels'].cpu().data.numpy()
            scores = predictions[0]['scores'].cpu().data.numpy()
            box_id = [0] * len(boxes)
            # array to hold box ids for tracking

            #box 1 at (x1,y1), (x2,y2)
            #box 2 at (x1,y1), (x2,y2)
            #reg box_1 array - box_2 array

            # get positions and plot on raw image
            pts_world = []
            iter_tracks = []
            for i in range(len(boxes)):
                ##if class is a person and threshold is met
                if classIDs[i] == 1 and scores[i] > thr_score:
                    # extract the bounding box coordinates
                    (x1, y1) = (boxes[i][0], boxes[i][1])
                    (x2, y2) = (boxes[i][2], boxes[i][3])

                    #detector gives coords x1 ,y1, x2, y2
                    #convert these coords to tracker input
                    #input for tracker is a bounding box [x1,y1, width, height]
                    track_box_in = [
                        boxes[i][0], boxes[i][1], boxes[i][2] - boxes[i][0],
                        boxes[i][3] - boxes[i][1]
                    ]

                    #adjust so input images are 3 frames at i, i +5, i +10 rather than the whole set
                    track_images = []
                    for z in range(4):  #number of frames to prepare
                        if i + 5 * z < len(frame_images):
                            track_images.append(frame_images[i_frame + 5 * z])

                    ##step 1 tracker
                    curr_track = tracker.track(track_images, track_box_in)
                    ##assign labels to the bounding boxes
                    ##label box = x
                    box_id[i] = i + 1
                    iter_tracks.append(curr_track)
                    #run tracker on each box

                    ################################################
                    #(if box_id > 0 run tracker)
                    #takes box (pixel coord)
                    #10 frames skip
                    #output is a tracklet area of the coord in each frame
                    #if ( box_id[i] > 0):

                    #convert coord of tracket to real

                    #run regression on coord of traklet if their is a violation

                    #regression confidence is high that difference is low that means no violation
                    #regress(y1,y2 -> x1,x2 from the difference array

                    ##############################################################################

                    if vis:
                        # draw a bounding box rectangle and label on the image
                        cv2.rectangle(img, (x1, y1), (x2, y2), [0, 0, 255], 2)

                        text = "{}: {:.2f}".format(LABELS[classIDs[i]],
                                                   scores[i], box_id[i])
                        cv2.putText(img, text, (int(x1), int(y1) - 5),
                                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, [0, 255, 0],
                                    2)

                    # find the bottom center position and convert it to world coordinate
                    p_c = np.array([[(x1 + x2) / 2], [y2], [1]])
                    p_w = transform_cam2world @ p_c
                    p_w = p_w / p_w[2]
                    pts_world.append([p_w[0][0], p_w[1][0]])

                ## convert all tracks coords to real world
            track_btm_cntr = np.zeros(
                (len(iter_tracks), 4,
                 3))  ##to hold bounding boxes adjusted to bottom center coord
            track_world = np.zeros(
                (len(iter_tracks), 4, 3))  ##to hold real world coord
            ## for each track iterate through each bounding box in a track and convert it to realworld coord
            ## foll steps above in which p_c and p_w are calculated
            for w in range(len(iter_tracks)):
                for u in range(
                        4
                ):  #add each of the boxes from the 4 frame of the track
                    row_converted = np.array([[
                        (iter_tracks[w][0][u][0] + iter_tracks[w][0][u][0] +
                         iter_tracks[w][0][u][2]) / 2
                    ], [iter_tracks[w][0][u][1] + iter_tracks[w][0][u][3]],
                                              [1]])
                    track_btm_cntr[w][u] = [
                        row_converted[0], row_converted[1], row_converted[2]
                    ]
                    track_world[w][
                        u] = transform_cam2world @ track_btm_cntr[w][u]
                    track_world[w][
                        u] = track_world[w][u] / track_world[w][u][2]

            #get every combination of difference between each track
            #because difference between track i and track j is the just the negative of the difference of track j and i
            #only store i - j
            track_differences = {(w, u): 0
                                 for w in range(len(track_world) - 1)
                                 for u in range(1 + w, len(track_world))}
            for w in range(len(track_differences)):
                for u in range(w + 1, len(track_world)):
                    track_diff_w = track_world[w, :, :2]
                    track_diff_u = track_world[u, :, :2]
                    track_diff = track_diff_w - track_diff_u
                    track_differences[w, u] = track_diff

            ##regress each item in the difference dictionary against 0. If the p > 0.05 we fail to reject that their
            ##there is a difference between two tracks (that is to say they are walking together)

            #holds the outcome for a track pair (i,j) if they are a group or not
            track_regression_out = {(w, u): 0
                                    for w in range(len(track_world) - 1)
                                    for u in range(1 + w, len(track_world))}
            for pair in track_differences:
                pair_x = track_differences[pair][:, 0].reshape(-1, 1)
                pair_y = track_differences[pair][:, 1].reshape(-1, 1)
                pair_norm = [0, 0, 0, 0]
                x_sample = [1, 2, 3, 4]
                for j in range(4):
                    if j == 0:
                        pair_norm[j] = np.linalg.norm([pair_x[j], pair_y[j]
                                                       ]) * 0.001
                    else:
                        pair_norm[j] = np.linalg.norm([
                            pair_x[j], pair_y[j]
                        ]) - np.linalg.norm([pair_x[0], pair_y[0]])
                reg_pair = sm.OLS(pair_norm, x_sample)
                #reg_pair = sm.OLS(pair_y, pair_x)
                reg_pair = reg_pair.fit()
                p_value = reg_pair.pvalues
                if pair == (5, 6):
                    x = "test"
                #if pvalue is less than 0.05 we reject null in favour that is there is a difference between track i and j
                #so they are not a group and set track_regression_out to false
                if p_value < 0.05:
                    track_regression_out[pair] = False
                #else set track regression out to true because we fail to reject the null and therefore conclude that
                #the two tracks are a group
                else:
                    track_regression_out[pair] = True

            t1 = time.time()

            pts_world = np.array(pts_world)
            if dataset == 'oxford_town':
                pts_world[:, [0, 1]] = pts_world[:, [1, 0]]
                pass
            elif dataset == 'oxford_town_group':
                pts_world[:, [0, 1]] = pts_world[:, [1, 0]]
                pass
            elif dataset == 'mall':
                # pts_world[:, [0, 1]] = pts_world[:, [1, 0]]
                pass
            elif dataset == 'grand_central':
                # pts_world[:, [0, 1]] = pts_world[:, [1, 0]]
                pass

            statistic_data.append(
                (i_frame, t1 - t0, pts_world, track_regression_out))

            # visualize
            if vis:
                violation_pairs = find_violation(pts_world,
                                                 track_regression_out)
                pts_roi_world, pts_roi_cam = get_roi_pts(
                    dataset=dataset,
                    roi_raw=ROIs[dataset],
                    matrix_c2w=transform_cam2world)

                fig = plot_frame_one_row(dataset=dataset,
                                         img_raw=img,
                                         pts_roi_cam=pts_roi_cam,
                                         pts_roi_world=pts_roi_world,
                                         pts_w=pts_world,
                                         pairs=violation_pairs)

                # fig = plot_frame(
                #     dataset=dataset,
                #     img_raw=img,
                #     img_bev_bkgd_10x=img_bkgd_bev,
                #     pts_roi_cam=pts_roi_cam,
                #     pts_roi_world=pts_roi_world,
                #     pts_w=pts_world,
                #     pairs=violation_pairs
                # )

                fig.savefig(
                    os.path.join(path_result, 'frame%04d.png' % i_frame))
                plt.close(fig)

            # update loop info
            print('Frame %d - Inference Time: %.2f' % (i_frame, t1 - t0))
            print('=======================')
        i_frame += 1

    if cap.isOpened():
        cap.release()
    # save statistics
    # f.close()
    pickle.dump(statistic_data,
                open(os.path.join(path_result, 'statistic_data.p'), 'wb'))
Esempio n. 11
0
from __future__ import absolute_import #??

import os

from got10k.datasets import * 

from siamfc import TrackerSiamFC 

import multiprocessing

multiprocessing.set_start_method('spawn',True)

if __name__ == '__main__':

    root_dir = os.path.abspath('data/GOT-10k')#获取当前工作目录

    seqs = GOT10k(root_dir, subset='train', return_meta=True)

    tracker = TrackerSiamFC(net_path=None) #优化器,GPU,损失函数,网络模型

    tracker.train_over(seqs)
Esempio n. 12
0
 def __init__(self):
     super(SiamFC, self).__init__("SiamFC")
     # TODO: edit this path
     self.net_file = path_config.SIAMFC_MODEL
     self.tracker = TrackerSiamFC(net_path=self.net_file)