Example #1
0
def main():
    args = parse_args()
    total_lost = 0

    # prepare model
    net = models.__dict__[args.arch](anchors_nums=args.anchor_nums,
                                     cls_type=args.cls_type)
    net = load_pretrain(net, args.resume)
    net.eval()
    net = net.cuda()

    # prepare video
    dataset = load_dataset(args.dataset)
    video_keys = list(dataset.keys()).copy()

    # prepare tracker
    info = edict()
    info.arch = args.arch
    info.cls_type = args.cls_type
    info.dataset = args.dataset
    info.epoch_test = args.epoch_test
    tracker = SiamRPN(info)

    for video in video_keys:
        total_lost += track(tracker, net, dataset[video], args)
    print('Total Lost: {:d}'.format(total_lost))
def visualize_tracking_result(workspace_path, tracker_id, sequence_name,
                              show_gt):

    dataset = load_dataset(workspace_path)

    sequence = None
    for sequence_ in dataset.sequences:
        if sequence_.name == sequence_name:
            sequence = sequence_
            break

    if sequence is None:
        print('Sequence (%s) cannot be found.' % sequence_name)
        exit(-1)

    tracker_class = load_tracker(workspace_path, tracker_id)
    tracker = tracker_class()

    results_path = os.path.join(workspace_path, 'results', tracker.name(),
                                sequence.name,
                                '%s_%03d.txt' % (sequence.name, 1))
    if not os.path.exists(results_path):
        print('Results does not exist (%s).' % results_path)

    regions = read_regions(results_path)

    sequence.visualize_results(regions, show_groundtruth=show_gt)
Example #3
0
def eao_vot_fc(tracker, net, config):
    dataset = load_dataset(config['benchmark'])
    video_keys = sorted(list(dataset.keys()).copy())

    for video in video_keys:
        result_path = track_tune(tracker, net, dataset[video], config)

    re_path = result_path.split('/')[0]
    tracker = result_path.split('/')[-1]

    # debug
    print('======> debug: results_path')
    print(result_path)
    print(os.system("ls"))
    print(join(realpath(dirname(__file__)), '../dataset'))

    # give abs path to json path
    data_path = join(realpath(dirname(__file__)), '../dataset')
    dataset = VOTDataset(config['benchmark'], data_path)

    dataset.set_tracker(re_path, tracker)
    benchmark = EAOBenchmark(dataset)
    eao = benchmark.eval(tracker)
    eao = eao[tracker]['all']

    return eao
Example #4
0
def draw_TS():
    global t_stamp, dataset, window_size, plot_window, plot_all, TSClass

    t_stamp += window_size
    list_timeseries = util.load_dataset(dataset)
    name_dataset = {k: v for ds in list_timeseries for k, v in ds.items()}
    dataset_list = list(name_dataset.values())
    # plot_window: input_TSBatch
    # plot_all: TS_set in "ISETS_Web_backend", how to extract the value? refer to a function which returns an object
    plot_window.axis.visible = False
    plot_all.axis.visible = False
    if t_stamp + window_size < 100:
        if plot_window.select({'name': str(t_stamp - window_size)}) != None:
            line1 = plot_window.select({'name': str(t_stamp - window_size)})
            line1.visible = False
        for ts in dataset_list[t_stamp:t_stamp + window_size]:
            if str(int(ts.class_timeseries)) == TSClass.split(': ')[1]:
                x = range(len(ts.timeseries))
                y = ts.timeseries
                plot_window.line(x, y, line_width=1, name=str(t_stamp))

    if t_stamp + window_size < 50:
        if plot_all.select({'name': str(t_stamp - window_size)}) != None:
            line2 = plot_all.select({'name': str(t_stamp - window_size)})
            line2.visible = False
        n = t_stamp + window_size
        for ts in dataset_list[:n]:
            if str(int(ts.class_timeseries)) == TSClass.split(': ')[1]:
                x = range(len(ts.timeseries))
                y = ts.timeseries
                plot_all.line(x, y, line_width=1, name=str(t_stamp))
Example #5
0
def main():
    global args, total_lost
    total_lost = 0
    args = parser.parse_args()

    model = models.__dict__[args.arch]()

    if args.resume:
        assert os.path.isfile(args.resume), '{} is not a valid file'.format(
            args.resume)
        model = load_pretrain(model, args.resume)

    model.eval()
    model = model.cuda()

    if args.video and not args.dataset == 'none':
        dataset = load_video(args.video)
        track_video(model, dataset[args.video])
    else:
        dataset = load_dataset(args.dataset)
        video_keys = list(dataset.keys()).copy()
        random.shuffle(video_keys)

        for video in video_keys:
            track_video(model, dataset[video])
Example #6
0
def main():
    args = parse_args()

    # prepare video
    dataset = load_dataset(args.dataset)
    video_keys = list(dataset.keys()).copy()

    # tracking all videos in benchmark
    for video in video_keys:
        track(dataset[video], args)
Example #7
0
 def add_testing_file(self):
     self.testfile_name = askopenfilename(parent=self.master,
                                          title="Choose a file")
     array_tsdict = util.load_dataset(self.testfile_name)
     dir = self.testfile_name.split("/")
     datasetname = dir[-1]
     self.testdataset.update(array_tsdict, datasetname)
     self.master.v_testdsname.set(self.testdataset.name)
     self.master.v_testtslength.set(self.testdataset.tslength)
     self.master.v_testtsnbr.set(self.testdataset.size)
     self.master.v_testclassnbr.set(len(self.testdataset.ClassList))
     self.master.testdataset = self.testdataset
Example #8
0
    def prepare(self):
        # Load PTB-XL data
        self.data, self.raw_labels = utils.load_dataset(
            self.datafolder, self.sampling_frequency)

        # Preprocess label data
        self.labels = utils.compute_label_aggregations(self.raw_labels,
                                                       self.datafolder,
                                                       self.task)

        # Select relevant data and convert to one-hot
        self.data, self.labels, self.Y, _ = utils.select_data(
            self.data, self.labels, self.task, self.min_samples,
            self.outputfolder + self.experiment_name + '/data/')
        self.input_shape = self.data[0].shape

        # 10th fold for testing (9th for now)
        self.X_test = self.data[self.labels.strat_fold == self.test_fold]
        self.y_test = self.Y[self.labels.strat_fold == self.test_fold]
        # 9th fold for validation (8th for now)
        self.X_val = self.data[self.labels.strat_fold == self.val_fold]
        self.y_val = self.Y[self.labels.strat_fold == self.val_fold]
        # rest for training
        self.X_train = self.data[self.labels.strat_fold <= self.train_fold]
        self.y_train = self.Y[self.labels.strat_fold <= self.train_fold]

        # Preprocess signal data
        self.X_train, self.X_val, self.X_test = utils.preprocess_signals(
            self.X_train, self.X_val, self.X_test,
            self.outputfolder + self.experiment_name + '/data/')
        self.n_classes = self.y_train.shape[1]

        # save train and test labels
        self.y_train.dump(self.outputfolder + self.experiment_name +
                          '/data/y_train.npy')
        self.y_val.dump(self.outputfolder + self.experiment_name +
                        '/data/y_val.npy')
        self.y_test.dump(self.outputfolder + self.experiment_name +
                         '/data/y_test.npy')

        modelname = 'naive'
        # create most naive predictions via simple mean in training
        mpath = self.outputfolder + self.experiment_name + '/models/' + modelname + '/'
        # create folder for model outputs
        if not os.path.exists(mpath):
            os.makedirs(mpath)
        if not os.path.exists(mpath + 'results/'):
            os.makedirs(mpath + 'results/')

        mean_y = np.mean(self.y_train, axis=0)
        np.array([mean_y] * len(self.y_train)).dump(mpath + 'y_train_pred.npy')
        np.array([mean_y] * len(self.y_test)).dump(mpath + 'y_test_pred.npy')
        np.array([mean_y] * len(self.y_val)).dump(mpath + 'y_val_pred.npy')
Example #9
0
 def add_dataset(self):
     self.dataset_name = askopenfilename(parent=self.master,
                                         title="Choose a file")
     array_tsdict = util.load_dataset(self.dataset_name)
     dir = self.dataset_name.split("/")
     datasetname = dir[-1]
     self.dataset.update(array_tsdict, datasetname)
     self.master.v_dsname.set(self.dataset.name)
     self.master.v_tslength.set(self.dataset.tslength)
     self.master.v_tsnbr.set(self.dataset.size)
     self.master.v_classnbr.set(len(self.dataset.ClassList))
     self.master.show_frame(self.master.frame2, "SMAPPage")
Example #10
0
def evaluate_tracker(workspace_path, tracker_id):

    tracker_class = load_tracker(workspace_path, tracker_id)
    tracker = tracker_class()

    dataset = load_dataset(workspace_path)

    results_dir = os.path.join(workspace_path, 'results', tracker.name())
    if not os.path.exists(results_dir):
        os.mkdir(results_dir)

    tracker.evaluate(dataset, results_dir)
    print('Evaluation has been completed successfully.')
Example #11
0
def auc_got10k_oceanplus(tracker, net, config):
    """
    get AUC for GOT10K VAL benchmark
    """
    dataset = load_dataset(config['benchmark'])
    video_keys = list(dataset.keys()).copy()
    random.shuffle(video_keys)
    for video in video_keys:
        result_path = track_tune(tracker, net, dataset[video], config)
    print(result_path)
    auc = eval_got10k_tune(result_path, config['benchmark'])

    return auc
Example #12
0
def eao_vot_rpn(tracker, net, config):
    dataset = load_dataset(config['benchmark'])
    video_keys = sorted(list(dataset.keys()).copy())
    results = []
    for video in video_keys:
        video_result = track_tune(tracker, net, dataset[video], config)
        results.append(video_result)

    year = config['benchmark'][-4:]  # need a str, instead of a int
    eng.cd('./lib/core')
    eao = eng.get_eao(results, year)

    return eao
    def prepare(self):
        # Load PTB-XL data
        self.data, self.raw_labels = utils.load_dataset(
            self.datafolder, self.sampling_frequency)

        # Preprocess label data
        self.labels = utils.compute_label_aggregations(self.raw_labels,
                                                       self.datafolder,
                                                       self.task)

        # Select relevant data and convert to one-hot
        self.data, self.labels, self.Y, _ = utils.select_data(
            self.data, self.labels, self.task, self.min_samples,
            self.outputfolder + self.experiment_name + '/data/')
        self.input_shape = self.data[0].shape

        # 10th fold for testing (9th for now)
        self.X_test = self.data[self.labels.strat_fold == self.test_fold]
        self.y_test = self.Y[self.labels.strat_fold == self.test_fold]
        # 9th fold for validation (8th for now)
        self.X_val = self.data[self.labels.strat_fold == self.val_fold]
        self.y_val = self.Y[self.labels.strat_fold == self.val_fold]
        # rest for training
        self.X_train = self.data[self.labels.strat_fold <= self.train_fold]
        self.y_train = self.Y[self.labels.strat_fold <= self.train_fold]

        print('val>>', self.X_val.shape)
        print('test>>', self.X_test.shape)

        # random crop trainset and slide cut validation/test
        self.X_train = utils.random_crop(self.X_train, fs=100, crops=1)
        self.y_train = utils.remark_label(self.y_train, crops=1)

        self.X_val, self.y_val, self.pid_val = utils.slide_and_cut(
            self.X_val, self.y_val, window_size=250, stride=125)
        self.X_test, self.y_test, self.pid_test = utils.slide_and_cut(
            self.X_test, self.y_test, window_size=250, stride=125)

        # Preprocess signal data
        self.X_train, self.X_val, self.X_test = utils.preprocess_signals(
            self.X_train, self.X_val, self.X_test,
            self.outputfolder + self.experiment_name + '/data/')
        self.n_classes = self.y_train.shape[1]

        # save train and test labels
        self.y_train.dump(self.outputfolder + self.experiment_name +
                          '/data/y_train.npy')
        self.y_val.dump(self.outputfolder + self.experiment_name +
                        '/data/y_val.npy')
        self.y_test.dump(self.outputfolder + self.experiment_name +
                         '/data/y_test.npy')
Example #14
0
def auc_otb(tracker, net, config):
    """
    get AUC for OTB benchmark
    """
    dataset = load_dataset(config['benchmark'])
    video_keys = list(dataset.keys()).copy()
    random.shuffle(video_keys)

    for video in video_keys:
        result_path = track_tune(tracker, net, dataset[video], config)

    auc = eval_auc_tune(result_path, config['benchmark'])

    return auc
Example #15
0
def eao_vot(tracker, net, config):
    dataset = load_dataset(config['benchmark'])
    video_keys = sorted(list(dataset.keys()).copy())
    results = []
    for video in video_keys:
        video_result = track_tune(tracker, net, dataset[video], config)
        results.append(video_result)

    channel = config['benchmark'].split('VOT')[-1]

    eng.cd('./lib/core')
    eao = eng.get_eao(results, channel)

    return eao
    def prepare(self):
        # Load PTB-XL data
        self.data, self.raw_labels = utils.load_dataset(
            self.datafolder, self.sampling_frequency)
        print("[PREPARE] PTBXL Loaded.")

        # Preprocess label data
        self.labels = utils.compute_label_aggregations(self.raw_labels,
                                                       self.datafolder,
                                                       self.task)
        print("[PREPARE] Label data preprocessed.")

        # Select relevant data and convert to one-hot
        self.data, self.labels, self.Y, _ = utils.select_data(
            self.data, self.labels, self.task, self.min_samples,
            self.outputfolder + self.experiment_name + '/data/')
        self.input_shape = self.data[0].shape
        print("[PREPARE] Data Selected")

        # 10th fold for testing (9th for now)
        self.X_test = self.data[self.labels.strat_fold == self.test_fold]
        self.y_test = self.Y[self.labels.strat_fold == self.test_fold]
        print("[PREPARE] Test set created")

        # 9th fold for validation (8th for now)
        self.X_val = self.data[self.labels.strat_fold == self.val_fold]
        self.y_val = self.Y[self.labels.strat_fold == self.val_fold]
        print("[PREPARE] Val set created")

        # rest for training
        self.X_train = self.data[self.labels.strat_fold <= self.train_fold]
        self.y_train = self.Y[self.labels.strat_fold <= self.train_fold]
        print("[PREPARE] Train set created")

        print("Data read successfully done")
        # Preprocess signal data
        self.X_train, self.X_val, self.X_test = utils.preprocess_signals(
            self.X_train, self.X_val, self.X_test,
            self.outputfolder + self.experiment_name + '/data/')
        self.n_classes = self.y_train.shape[1]

        print("Data preprocessing done")

        # save train and test labels
        self.y_train.dump(self.outputfolder + self.experiment_name +
                          '/data/y_train.npy')
        self.y_val.dump(self.outputfolder + self.experiment_name +
                        '/data/y_val.npy')
        self.y_test.dump(self.outputfolder + self.experiment_name +
                         '/data/y_test.npy')
Example #17
0
def performance(tracker, net, config):
    """
    return performance evaluation value (eg. iou) to GENE
    you should complete 'eval_performance_tune' according to your validation dataset
    """
    dataset = load_dataset(config['benchmark'])
    video_keys = list(dataset.keys()).copy()
    random.shuffle(video_keys)

    for video in video_keys:
        result_path = track_tune(tracker, net, dataset[video], config)

    auc = eval_performance_tune(result_path, config['benchmark'])

    return auc
Example #18
0
def draw_TS():
    global t_stamp, dataset_list, window_size, plot_window, plot_all, TSClass
    list_timeseries = util.load_dataset(dataset)
    name_dataset = {k: v for ds in list_timeseries for k, v in ds.items()}
    dataset_list = list(name_dataset.values())

    t_stamp += window_size
    #historical_TS = forget_degree * len(dataset_list)
    # get the window size
    for ts in dataset_list[t_stamp:t_stamp + window_size]:
        if ts.class_timeseries == TSClass:
            x = range(len(ts.timeseries))
            y = ts.timeseries
            plot_window.line(x, y, line_width=1)
    for ts in dataset_list[:t_stamp + window_size]:
        if ts.class_timeseries == TSClass:
            x = range(len(ts.timeseries))
            y = ts.timeseries
            plot_all.line(x, y, line_width=1)
def tracking_analysis(workspace_path, tracker_id):

    dataset = load_dataset(workspace_path)

    tracker_class = load_tracker(workspace_path, tracker_id)
    tracker = tracker_class()

    print('Performing evaluation for tracker:', tracker.name())

    per_seq_overlaps = len(dataset.sequences) * [0]
    per_seq_failures = len(dataset.sequences) * [0]
    per_seq_time = len(dataset.sequences) * [0]

    for i, sequence in enumerate(dataset.sequences):

        results_path = os.path.join(workspace_path, 'results', tracker.name(),
                                    sequence.name,
                                    '%s_%03d.txt' % (sequence.name, 1))
        if not os.path.exists(results_path):
            print('Results does not exist (%s).' % results_path)

        time_path = os.path.join(workspace_path, 'results', tracker.name(),
                                 sequence.name,
                                 '%s_%03d_time.txt' % (sequence.name, 1))
        if not os.path.exists(time_path):
            print('Time file does not exist (%s).' % time_path)

        regions = read_regions(results_path)
        times = read_vector(time_path)

        overlaps, overlap_valid = trajectory_overlaps(regions,
                                                      sequence.groundtruth)
        failures = count_failures(regions)
        t = average_time(times, regions)

        per_seq_overlaps[i] = sum(overlaps) / sum(overlap_valid)
        per_seq_failures[i] = failures
        per_seq_time[i] = t

    return export_measures(workspace_path, dataset, tracker, per_seq_overlaps,
                           per_seq_failures, per_seq_time)
    def check_dataset_conformity(cls, runs_dir, runs_img, title, dataset,
                                 net_input, communication):
        """
        Generate a scatter plot to check the conformity of the dataset.
        The plot will show the distribution of the input sensing, in particular, as the difference between the front
        sensor and the mean of the rear sensors, with respect to the output control of the datasets.

        :param runs_dir: directory containing the simulation
        :param runs_img: directory containing the simulation images
        :param title: title of the plot
        :param dataset: name of the dataset
        :param net_input: input of the net between prox_values, prox_comm or all_sensors
        :param communication: states if the communication is used by the network
        """

        runs = utils.load_dataset(runs_dir, 'simulation.pkl')
        runs_sub = runs[[
            'timestep', 'run', 'motor_left_target', 'prox_values', 'prox_comm',
            'all_sensors'
        ]]

        N = runs.myt_quantity.unique().max() - 2
        myt_quantitiy = np.array(runs[['run', 'myt_quantity'
                                       ]].drop_duplicates().myt_quantity) - 2

        x, y, myt_quantities, _, _ = utils.extract_input_output(
            runs_sub,
            net_input,
            N,
            myt_quantities=myt_quantitiy,
            communication=communication)

        #  Generate a scatter plot to check the conformity of the dataset
        file_name = 'dataset-scatterplot-%s' % dataset

        x_label = 'sensing (%s)' % net_input
        y_label = 'control'

        my_plots.my_scatterplot(x, y, x_label, y_label, runs_img, title,
                                file_name)
def tracking_comparison(workspace_path, tracker_ids, sensitivity, output_path):

    dataset = load_dataset(workspace_path)

    outputs_all = []
    for tracker_id in tracker_ids:

        tracker_class = load_tracker(workspace_path, tracker_id)
        tracker = tracker_class()

        results_path = os.path.join(workspace_path, 'analysis', tracker.name(), 'results.json')
        if os.path.exists(results_path):
            output = load_output(results_path)
            print_summary(output)
        else:
            output = tracking_analysis(workspace_path, tracker_id)
        
        outputs_all.append(output)

    if output_path == '':
        output_path = os.path.join(workspace_path, 'analysis', 'ar.png')

    export_plot(outputs_all, sensitivity, output_path)
Example #22
0
def main():
    args = parse_args()

    # prepare model
    net = models.__dict__[args.arch]()
    net = load_pretrain(net, args.resume)
    net.eval()
    net = net.cuda()

    # prepare video
    dataset = load_dataset(args.dataset)
    video_keys = list(dataset.keys()).copy()

    # prepare tracker
    info = edict()
    info.arch = args.arch
    info.dataset = args.dataset
    info.epoch_test = args.epoch_test
    tracker = SiamFC(info)

    # tracking all videos in benchmark
    for video in video_keys:
        track(tracker, net, dataset[video], args)
Example #23
0
def global_structure(k, data_directory, m_ratio, stack_ratio, window_size):
    list_timeseries = util.load_dataset(data_directory)
    name_dataset = {k: v for ds in list_timeseries for k, v in ds.items()}
    dataset_list = list(name_dataset.values())
    global drift
    min_m = util.min_length_dataset(dataset_list)
    print("Maximum length of shapelet is : " + str(min_m))
    min_length = int(0.1 * min_m)
    max_length = int(0.5 * min_m)
    m_list = range(min_length, max_length, int(min_m * m_ratio))
    stack_size = stack_ratio * len(dataset_list)
    TS_set = []
    MP_set_all = {}

    #Initialization of shapList
    driftDetection = eb.driftDetection()
    inputTSBatch = driftDetection.stream_window(dataset_list, window_size)
    TS_newSet, MP_set_all = mb.memory_cache_all_length(TS_set, MP_set_all,
                                                       stack_size,
                                                       inputTSBatch, m_list)
    print(len(TS_newSet))

    shapList = sb.extract_shapelet_all_length(k, TS_newSet, MP_set_all, m_list)
    output_loss = pd.DataFrame([[0, 0, 0, 0, 0, 0]],
                               columns=[
                                   't_stamp', 'loss_batch', 'cum_loss', 'PH',
                                   'avg_loss', 'nbr_drift'
                               ])
    output_shapelet = pd.DataFrame([[0, 0, 0, 0, 0]],
                                   columns=[
                                       't_stamp', 'shap.name', 'shap.Class',
                                       'shap.subseq', 'shap.score'
                                   ])
    while driftDetection.t_stamp < len(dataset_list):
        inputTSBatch = driftDetection.stream_window(dataset_list, window_size)
        drift, loss_batch, cum_loss, PH, avg_loss = driftDetection.shapelet_matching(
            shapList, inputTSBatch)
        if drift == True:
            nbr_drift = 1
        else:
            nbr_drift = 0
        loss_set = [
            driftDetection.t_stamp, loss_batch, cum_loss, PH, avg_loss,
            nbr_drift
        ]
        loss_pd = pd.DataFrame([loss_set],
                               columns=[
                                   't_stamp', 'loss_batch', 'cum_loss', 'PH',
                                   'avg_loss', 'nbr_drift'
                               ])
        output_loss.append(loss_pd)

        print("Drift is " + str(drift))
        if drift == True:
            TS_newSet, MP_set_all = mb.memory_cache_all_length(
                TS_set, MP_set_all, stack_size, inputTSBatch, m_list)
            shapList = sb.extract_shapelet_all_length(k, TS_newSet, MP_set_all,
                                                      m_list)
            for shap in shapList:
                shap_set = [
                    driftDetection.t_stamp, shap.name, shap.Class,
                    str(shap.subseq), shap.normal_distance
                ]
                shap_pd = pd.DataFrame([shap_set],
                                       columns=[
                                           't_stamp', 'shap.name',
                                           'shap.Class', 'shap.subseq',
                                           'shap.score'
                                       ])
                output_shapelet.append(shap_pd)
    output_loss.to_csv("output_loss2.csv", index=False)
    output_shapelet.to_csv("output_shapelet2.csv", index=False)
Example #24
0
def main():
    parser = argparse.ArgumentParser(description="Standalone Center Loss.")

    # Dataset
    parser.add_argument("--dataset", type=str, default="fashion-mnist", choices=["mnist", "fashion-mnist", "cifar-10"])
    parser.add_argument("--num_workers", type=int, default=4, help="Number of data loading workers.")
    # Optimization
    parser.add_argument("--epochs", type=int, default=100, help="Number of epochs.")
    parser.add_argument("--batch_size", type=int, default=128, help="Batch size.")
    parser.add_argument("--gpu_ids", type=str, default='', help="GPUs for running this script.")
    parser.add_argument("--lr", type=float, default=0.01, help="Learning rate for gradient descent.")
    parser.add_argument("--weight_intra", type=float, default=1.0, help="Weight for intra loss.")
    parser.add_argument('--weight_inter', type=float, default=0.1, help="Weight for inter loss.")
    parser.add_argument("--factor", type=float, default=0.2, help="Factor by which the learning rate will be reduced.")
    parser.add_argument("--patience", type=int, default=10,
                        help="Number of epochs with no improvement after which learning rate will be reduced.")
    parser.add_argument("--threshold", type=float, default=0.1,
                        help="Threshold for measuring the new optimum, to only focus on significant changes. ")
    # Model
    parser.add_argument("--model", type=str, default="resnet", choices=["resnet"])
    parser.add_argument("--feat_dim", type=int, default=128, help="Dimension of the feature.")
    # Misc
    parser.add_argument("--log_dir", type=str, default="./run/", help="Where to save the log?")
    parser.add_argument("--log_name", type=str, required=True, help="Name of the log folder.")
    parser.add_argument("--seed", type=int, default=0, help="Random seed.")
    parser.add_argument("--eval_freq", type=int, default=1, help="How frequently to evaluate the model?")
    parser.add_argument("--vis", action="store_true", help="Whether to visualize the features?")

    args = parser.parse_args()

    # Check before run.
    if not os.path.exists(args.log_dir):
        os.mkdir(args.log_dir)
    log_dir = os.path.join(args.log_dir, args.log_name)

    # Setting up logger
    log_file = datetime.now().strftime("%Y-%m-%d-%H-%M-%S_{}.log".format(args.dataset))
    sys.stdout = Logger(os.path.join(log_dir, log_file))
    print(args)

    for s in args.gpu_ids:
        try:
            int(s)
        except ValueError as e:
            print("Invalid gpu id:{}".format(s))
            raise ValueError

    os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(args.gpu_ids)

    if args.gpu_ids:
        if torch.cuda.is_available():
            use_gpu = True
            cudnn.benchmark = True
            torch.cuda.manual_seed_all(args.seed)
        else:
            use_gpu = False
    else:
        use_gpu = False

    torch.manual_seed(args.seed)

    trainloader, testloader, input_shape, classes = load_dataset(args.dataset, args.batch_size, use_gpu,
                                                                 args.num_workers)
    model = build_model(args.model, input_shape, args.feat_dim, len(classes))

    criterion = StandaloneCenterLoss(len(classes), feat_dim=args.feat_dim, use_gpu=use_gpu)
    optimizer = torch.optim.SGD(list(model.parameters()) + list(criterion.parameters()), lr=args.lr, momentum=0.9)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode="max", factor=args.factor,
                                                           patience=args.patience, verbose=True,
                                                           threshold=args.threshold)

    if use_gpu:
        model = model.cuda()
        model = torch.nn.DataParallel(model)

    print("Start training...")
    start = datetime.now()
    with SummaryWriter(log_dir) as writer:
        for epoch in range(args.epochs):
            train(model, trainloader, criterion, args.weight_intra, args.weight_inter, optimizer, use_gpu, writer,
                  epoch, args.epochs, args.vis, args.feat_dim, classes)

            if epoch % args.eval_freq == 0 or epoch == args.epochs - 1:
                eval(model, testloader, criterion, scheduler, use_gpu, writer, epoch, args.epochs, args.vis,
                     args.feat_dim, classes)

    elapsed_time = str(datetime.now() - start)
    print("Finish training. Total elapsed time %s." % elapsed_time)
Example #25
0
def main():
    print('Warning: this is a demo to test OceanPlus')
    print(
        'Warning: if you want to test it on VOT2020, please use our integration scripts'
    )
    args = parse_args()

    info = edict()
    info.arch = args.arch
    info.dataset = args.dataset
    info.online = args.online
    info.TRT = 'TRT' in args.arch

    siam_info = edict()
    siam_info.arch = args.arch
    siam_info.dataset = args.dataset
    siam_info.vis = args.vis
    siam_tracker = OceanPlus(siam_info)

    if args.mms == 'True':
        MMS = True
    else:
        MMS = False
    siam_net = models.__dict__[args.arch](online=args.online, mms=MMS)
    print('===> init Siamese <====')
    siam_net = load_pretrain(siam_net, args.resume)
    siam_net.eval()
    siam_net = siam_net.cuda()

    # if info.TRT:
    #     print('===> load model from TRT <===')
    #     print('===> please ignore the warning information of TRT <===')
    #     trtNet = reloadTRT()
    #     siam_net.tensorrt_init(trtNet)

    if args.online:
        online_tracker = ONLINE(info)
    else:
        online_tracker = None

    print('====> warm up <====')
    for i in tqdm(range(20)):
        siam_net.template(
            torch.rand(1, 3, 127, 127).cuda(),
            torch.rand(1, 127, 127).cuda())
        siam_net.track(torch.rand(1, 3, 255, 255).cuda())

    # prepare video
    print('====> load dataset <====')
    dataset = load_dataset(args.dataset)
    video_keys = list(dataset.keys()).copy()

    # hyper-parameters in or not
    if args.hp is None:
        hp = None
    elif isinstance(args.hp, str):
        f = open(join('tune', args.hp), 'r')
        hp = json.load(f)
        f.close()
        print('====> tuning hp: {} <===='.format(hp))
    else:
        raise ValueError('not supported hyper-parameters')

    # tracking all videos in benchmark
    for video in video_keys:
        if args.dataset in ['DAVIS2016', 'DAVIS2017', 'YTBVOS']:  # VOS
            track_vos(siam_tracker, online_tracker, siam_net, dataset[video],
                      args, hp)
        else:  # VOTS (i.e. VOT2020)
            if video == 'butterfly':
                track(siam_tracker, online_tracker, siam_net, dataset[video],
                      args)
        total_loss /= (batch + 1)
        y_trues = torch.cat(y_trues, dim=0)
        y_predicts = torch.cat(y_predicts, dim=0)

    return total_loss, y_trues, y_predicts


if __name__ == '__main__':
    warnings.filterwarnings('ignore')

    set_random_seed(args['seed'])

    print(f'loading dataset {args["dataset"]}...')

    graph, _, _, _, _, _ = load_dataset(
        data_path=args['data_path'],
        predict_category=args['predict_category'],
        data_split_idx_path=args['data_split_idx_path'])

    reverse_etypes = dict()
    for stype, etype, dtype in graph.canonical_etypes:
        for srctype, reltype, dsttype in graph.canonical_etypes:
            if srctype == dtype and dsttype == stype and reltype != etype:
                reverse_etypes[etype] = reltype
                break

    print(f'generating edge idx...')
    train_edge_idx, valid_edge_idx, test_edge_idx = get_predict_edge_index(
        graph,
        sample_edge_rate=args['sample_edge_rate'],
        sampled_edge_type=args['sampled_edge_type'],
        seed=args['seed'])
Example #27
0
def main():
    args = parse_args()

    info = edict()
    info.arch = args.arch
    info.dataset = args.dataset
    info.TRT = 'TRT' in args.arch
    info.epoch_test = args.epoch_test

    siam_info = edict()
    siam_info.arch = args.arch
    siam_info.dataset = args.dataset
    siam_info.online = args.online
    siam_info.epoch_test = args.epoch_test
    siam_info.TRT = 'TRT' in args.arch
    if args.online:
        siam_info.align = False
    else:
        siam_info.align = True if 'VOT' in args.dataset and args.align == 'True' else False

    if siam_info.TRT:
        siam_info.align = False

    siam_tracker = Ocean(siam_info)
    siam_net = models.__dict__[args.arch](align=siam_info.align,
                                          online=args.online)
    print(siam_net)
    print('===> init Siamese <====')

    if not siam_info.TRT:
        siam_net = load_pretrain(siam_net, args.resume)
    else:
        print("tensorrt toy model: not loading checkpoint")
    siam_net.eval()
    siam_net = siam_net.cuda()

    if siam_info.TRT:
        print('===> load model from TRT <===')
        print('===> please ignore the warning information of TRT <===')
        print(
            '===> We only provide a toy demo for TensorRT. There are some operations are not supported well.<==='
        )
        print(
            '===> If you wang to test on benchmark, please us Pytorch version. <==='
        )
        print(
            '===> The tensorrt code will be contingously optimized (with the updating of official TensorRT.)<==='
        )
        trtNet = reloadTRT()
        siam_net.tensorrt_init(trtNet)

    if args.online:
        online_tracker = ONLINE(info)
    else:
        online_tracker = None

    print('====> warm up <====')
    for i in tqdm(range(10)):
        siam_net.template(torch.rand(1, 3, 127, 127).cuda())
        siam_net.track(torch.rand(1, 3, 255, 255).cuda())

    # prepare video
    dataset = load_dataset(args.dataset)
    video_keys = list(dataset.keys()).copy()

    if args.video is not None:
        track(siam_tracker, online_tracker, siam_net, dataset[args.video],
              args)
    else:
        for video in video_keys:
            track(siam_tracker, online_tracker, siam_net, dataset[video], args)
Example #28
0
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
sns.set()
import torch
from utils.utils import load_dataset, load_obj
from sklearn.metrics import roc_curve, auc
from models.ssd.utils import find_jaccard_overlap
from utils.evaluate import *

base_path = "/home/salvacarrion/Documents/Programming/Python/Projects/yolo4math/models/yolov3"
predictions = load_obj(base_path + "/predictions.pkl")
confusion_matrix = load_obj(base_path + "/confusion_matrix.pkl")
stats = load_dataset(base_path + "/stats.json")

det_boxes = predictions['det_boxes']
det_labels = predictions['det_labels']
det_scores = predictions['det_scores']
true_boxes = predictions['true_boxes']
true_labels = predictions['true_labels']

# det_labels = ignore_bg(det_labels)
# true_labels = ignore_bg(true_labels)

pred_class, pred_score, pred_iou, true_class = match_classes(det_boxes,
                                                             det_labels,
                                                             det_scores,
                                                             true_boxes,
                                                             true_labels,
                                                             n_classes=2)
Example #29
0
def main():
  """ Validate adv -> Rescale -> Validate scaled adv """
  model = init_models()
  preprocessing = dict(
    mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], axis=-3
  )

  dataset_loader, dataset_size = utils.load_dataset(
    dataset_path=DATASET_PATH, dataset_image_len=DATASET_IMAGE_NUM
  )

  # use GPU if available
  if torch.cuda.is_available():
    model = model.cuda()

  fmodel = foolbox.models.PyTorchModel(
    model,
    bounds=(0, 1),
    num_classes=len(CLASS_NAMES),
    preprocessing=preprocessing,
  )

  advs = np.load(ADV_SAVE_PATH)

  # * TASK 1/3: validate original adversaries
  control_group_acc = utils.validate(
    fmodel, dataset_loader, dataset_size, batch_size=BATCH_SIZE, advs=advs
  )

  # * TASK 2/3: resize adversaries
  scales = [0.5, 2]
  methods = [
    "INTER_NEAREST",
    "INTER_LINEAR",
    "INTER_AREA",
    "INTER_CUBIC",
    "INTER_LANCZOS4",
  ]
  # Initialize resized adversaries dict
  resized_advs = {method: {scale: None for scale in scales} for method in methods}

  pbar = tqdm(total=len(scales) * len(methods), desc="SCL")
  for method in methods:
    for scale in scales:
      resized_advs[method][scale] = utils.scale_adv(advs, scale, method)
      pbar.update(1)
  pbar.close()

  # * TASK 3/3: validate resized adversaries
  print(
    "{:<19} - success: {}%".format("CONTROL_GROUP  ×1", 100 - control_group_acc)
  )

  # Initialize success rate data
  success_data = {1: {"CONTROL_GROUP": 100.0 - control_group_acc}, 0.5: {}, 2: {}}
  success_data_flatten = {"CONTROL_GROUP ×1": 100.0 - control_group_acc}

  for scale in scales:
    for method in methods:
      acc = utils.validate(
        fmodel,
        dataset_loader,
        dataset_size,
        batch_size=BATCH_SIZE,
        advs=resized_advs[method][scale],
        silent=True,
      )
      success_data[scale][method] = 100.0 - acc
      success_data_flatten["{} ×{}".format(method, scale)] = 100.0 - acc
      print("{:<14} ×{:<3} - success: {}%".format(method, scale, 100.0 - acc))

  save_results_csv(success_data_flatten)

  # %%
  # * Plot results (success rate - advs)
  plot_results(success_data, success_data_flatten)
}
args['data_path'] = f'../dataset/{args["dataset"]}/{args["dataset"]}.pkl'
args[
    'data_split_idx_path'] = f'../dataset/{args["dataset"]}/{args["dataset"]}_split_idx.pkl'
args['device'] = f'cuda:{args["cuda"]}' if torch.cuda.is_available(
) and args["cuda"] >= 0 else 'cpu'

if __name__ == '__main__':
    warnings.filterwarnings('ignore')

    set_random_seed(args['seed'])

    print(f'loading dataset {args["dataset"]}...')

    graph, labels, num_classes, train_idx, valid_idx, test_idx = load_dataset(
        data_path=args['data_path'],
        predict_category=args['predict_category'],
        data_split_idx_path=args['data_split_idx_path'])

    r_hgnn = R_HGNN(graph=graph,
                    input_dim_dict={
                        ntype: graph.nodes[ntype].data['feat'].shape[1]
                        for ntype in graph.ntypes
                    },
                    hidden_dim=args['hidden_units'],
                    relation_input_dim=args['relation_hidden_units'],
                    relation_hidden_dim=args['relation_hidden_units'],
                    num_layers=args['n_layers'],
                    n_heads=args['num_heads'],
                    dropout=args['dropout'],
                    residual=args['residual'])