コード例 #1
0
ファイル: core.py プロジェクト: nolan88/BayesianTracker
    def track_interactive(self, step_size=100):
        """ Run the tracking in an interactive mode """

        # TODO(arl): this needs cleaning up to have some decent output
        if not self.__initialised:
            logger.info('Using default parameters')
            self.configure({'MotionModel': 'constant_velocity.json'})

        logger.info('Starting tracking... ')

        stats = self.step()
        frm = 0
        # while not stats.complete and stats.error == 910:
        while not stats.complete and stats.error not in constants.ERRORS:
            logger.info('Tracking objects in frames {0:d} to '
                        '{1:d} (of {2:d})...'.format(
                            frm,
                            min(frm + step_size - 1,
                                self.__frame_range[1] + 1),
                            self.__frame_range[1] + 1))

            stats = self.step(step_size)
            utils.log_stats(stats.to_dict())
            frm += step_size

        if not utils.log_error(stats.error):
            logger.info('SUCCESS.')
            logger.info(' - Found {0:d} tracks in {1:d} frames (in '
                        '{2:.2f}s)'.format(self.n_tracks,
                                           1 + self.__frame_range[1],
                                           stats.t_total_time))
            logger.info(' - Inserted {0:d} dummy objects to fill '
                        'tracking gaps'.format(self.n_dummies))
コード例 #2
0
def train(epoch):
    print('\nEpoch: %d' % epoch)
    net.train()
    train_loss = 0
    correct = 0
    total = 0
    count = 0

    conv_param_names = []
    conv_params = []
    for name, param in net.named_parameters():
        if "conv" in name:
            conv_params += [param]
            conv_param_names += [name]

    for batch_idx, (inputs, targets) in enumerate(trainloader):
        inputs, targets = inputs.to(device), targets.to(device)
        optimizer.zero_grad()
        outputs = net(inputs)
        loss = criterion(outputs, targets)
        loss.backward()
        # if random.uniform(0,1) < 0.2 and count < 5:
        #     count +=1
        #     get_gradient_stats(net, epoch, batch_idx)

        if batch_idx % 10 == 0:
            # conv params
            param_stats, bin_counts = get_param_stats(conv_params,
                                                      conv_param_names)
            grad_norm_stats = get_grad_norm(conv_params, conv_param_names)
            log_stats(param_stats,
                      bin_counts,
                      grad_norm_stats,
                      dir="GradientStatsPercentile_Abs_Norm",
                      epoch=epoch,
                      iteration=batch_idx)
            param_stats, bin_counts = get_param_stats(conv_params,
                                                      conv_param_names,
                                                      take_abs=True)
            grad_norm_stats = get_grad_norm(conv_params, conv_param_names)
            log_stats(param_stats,
                      bin_counts,
                      grad_norm_stats,
                      dir="GradientStatsPercentile_Abs_Norm",
                      epoch=epoch,
                      iteration=batch_idx,
                      param_file="PerParamStatsAbs.log",
                      bin_counts_file="OverallStatsAbs.log",
                      grad_norm_file="GradNormStatsAbs.log")

        optimizer.step()
        train_loss += loss.item()
        _, predicted = outputs.max(1)
        total += targets.size(0)
        correct += predicted.eq(targets).sum().item()

        progress_bar(
            batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' %
            (train_loss /
             (batch_idx + 1), 100. * correct / total, correct, total))
コード例 #3
0
def main_worker(args, config):
    """Trains model on ImageNet using data parameters

    Args:
        args (argparse.Namespace):
        config (dict): config file for the experiment.
    """
    global_iter = 0

    # Create model
    model, loss_criterion = get_model_and_loss_criterion(args)

    # Define optimizer
    optimizer = torch.optim.SGD(model.parameters(), args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    # Get train and validation dataset loader
    train_loader, val_loader = get_train_and_val_loader(args)

    # Initialize class and instance based temperature
    (class_parameters, inst_parameters,
     optimizer_class_param, optimizer_inst_param) = utils.get_class_inst_data_params_n_optimizer(
                                                                        args=args,
                                                                        nr_classes=1000,
                                                                        nr_instances=len(train_loader.dataset),
                                                                        device='cuda'
                                                                        )
    # Training loop
    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch, args)

        # Train for one epoch
        global_iter = train_for_one_epoch(
                            args=args,
                            train_loader=train_loader,
                            model=model,
                            criterion=loss_criterion,
                            optimizer=optimizer,
                            epoch=epoch,
                            global_iter=global_iter,
                            optimizer_data_parameters=(optimizer_class_param, optimizer_inst_param),
                            data_parameters=(class_parameters, inst_parameters),
                            config=config)

        # Evaluate on validation set
        validate(args, val_loader, model, loss_criterion, epoch)

        # Save artifacts
        utils.save_artifacts(args, epoch, model, class_parameters, inst_parameters)

        # Log temperature stats over epochs
        if args.learn_class_parameters:
            utils.log_stats(data=torch.exp(class_parameters),
                            name='epochs_stats_class_parameter',
                            step=epoch)
        if args.learn_inst_parameters:
            utils.log_stats(data=torch.exp(inst_parameters),
                            name='epochs_stats_inst_parameter',
                            step=epoch)
コード例 #4
0
ファイル: core.py プロジェクト: nolan88/BayesianTracker
    def track(self):
        """ Run the actual tracking algorithm """

        if not self.__initialised:
            logger.info('Using default parameters')
            self.configure({'MotionModel': 'constant_velocity.json'})

        logger.info('Starting tracking... ')
        ret, tm = timeit(lib.track, self.__engine)

        # get the statistics
        stats = self.__stats(ret)

        if not utils.log_error(stats.error):
            logger.info('SUCCESS. Found {0:d} tracks in {1:d} frames (in '
                        '{2:.2f}s)'.format(self.n_tracks,
                                           1 + self.__frame_range[1], tm))

        # can log the statistics as well
        utils.log_stats(stats.to_dict())
コード例 #5
0
def main_worker(args, config):
    """Trains model on ImageNet using data parameters

    Args:
        args (argparse.Namespace):
        config (dict): config file for the experiment.
    """
    global_iter = 0
    learning_rate_schedule = np.array([80, 100, 160])

    # Create model
    model, loss_criterion = get_model_and_loss_criterion(args)

    # Define optimizer
    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    # Get train and validation dataset loader
    train_loader, val_loader = get_train_and_val_loader(args)

    # Initialize class and instance based temperature
    (class_parameters, inst_parameters, optimizer_class_param,
     optimizer_inst_param) = utils.get_class_inst_data_params_n_optimizer(
         args=args,
         nr_classes=args.nr_classes,
         nr_instances=len(train_loader.dataset),
         device='cuda')

    # Training loop
    for epoch in range(args.start_epoch, args.epochs):

        # Adjust learning rate for model parameters
        if epoch in learning_rate_schedule:
            adjust_learning_rate(model_initial_lr=args.lr,
                                 optimizer=optimizer,
                                 gamma=0.1,
                                 step=np.sum(epoch >= learning_rate_schedule))

        # Train for one epoch
        global_iter = train_for_one_epoch(
            args=args,
            train_loader=train_loader,
            model=model,
            criterion=loss_criterion,
            optimizer=optimizer,
            epoch=epoch,
            global_iter=global_iter,
            optimizer_data_parameters=(optimizer_class_param,
                                       optimizer_inst_param),
            data_parameters=(class_parameters, inst_parameters),
            config=config)

        # Evaluate on validation set
        validate(args, val_loader, model, loss_criterion, epoch)

        # Save artifacts
        utils.save_artifacts(args, epoch, model, class_parameters,
                             inst_parameters)

        # Log temperature stats over epochs
        if args.learn_class_parameters:
            utils.log_stats(data=torch.exp(class_parameters),
                            name='epochs_stats_class_parameter',
                            step=epoch)
        if args.learn_inst_parameters:
            utils.log_stats(data=torch.exp(inst_parameters),
                            name='epoch_stats_inst_parameter',
                            step=epoch)

        if args.rand_fraction > 0.0:
            # We have corrupted labels in the train data; plot instance parameter stats for clean and corrupt data
            nr_corrupt_instances = int(
                np.floor(len(train_loader.dataset) * args.rand_fraction))
            # Corrupt data is in the top-fraction of dataset
            utils.log_stats(data=torch.exp(
                inst_parameters[:nr_corrupt_instances]),
                            name='epoch_stats_corrupt_inst_parameter',
                            step=epoch)
            utils.log_stats(data=torch.exp(
                inst_parameters[nr_corrupt_instances:]),
                            name='epoch_stats_clean_inst_parameter',
                            step=epoch)