コード例 #1
0
def update():

    opt.data = ops.join(opt.dataset_root, 'features')
    opt.gt = ops.join(opt.dataset_root, 'groundTruth')
    opt.output_dir = ops.join(opt.dataset_root, 'output')
    opt.mapping_dir = ops.join(opt.dataset_root, 'mapping')
    dir_check(opt.output_dir)
    opt.f_norm = True
    if torch.cuda.is_available():
        opt.device = 'cuda'

    if opt.global_pipe:
        opt.embed_dim = 30
    else:
        opt.embed_dim = 20

    if not opt.load_model:
        if opt.global_pipe:
            opt.lr = 1e-4
        else:
            opt.lr = 1e-4
        opt.epochs = 60

    opt.bg = False  # YTI argument
    opt.gr_lev = ''  # 50Salads argument
    if opt.model_name == 'nothing':
        opt.load_embed_feat = True

    update_opt_str()

    logger = path_logger()

    vars_iter = list(vars(opt))
    for arg in sorted(vars_iter):
        logger.debug('%s: %s' % (arg, getattr(opt, arg)))
コード例 #2
0
def update():
    opt.data = ops.join(opt.dataset_root, 'features')
    opt.gt = ops.join(opt.dataset_root, 'groundTruth')
    opt.output_dir = ops.join(opt.dataset_root, 'output')
    opt.mapping_dir = ops.join(opt.dataset_root, 'mapping')
    dir_check(opt.output_dir)
    opt.f_norm = False
    if torch.cuda.is_available():
        opt.device = 'cuda'

    opt.embed_dim = 30

    if not opt.load_model:
        opt.lr = 1e-3
        opt.epochs = 30

    if opt.model_name == 'nothing':
        opt.load_embed_feat = True

    update_opt_str()

    logger = path_logger()

    vars_iter = list(vars(opt))
    for arg in sorted(vars_iter):
        logger.debug('%s: %s' % (arg, getattr(opt, arg)))
コード例 #3
0
 def save_embed_feat(self):
     dir_check(ops.join(opt.data, 'embed'))
     dir_check(ops.join(opt.data, 'embed', opt.subaction))
     for video in self._videos:
         video_features = self._embedded_feat[video.global_range]
         feat_name = opt.resume_str + '_%s' % video.name
         np.savetxt(ops.join(opt.data, 'embed', opt.subaction, feat_name),
                    video_features)
コード例 #4
0
    def __init__(self, subaction='coffee', K=None):
        """
        Args:
            Q: number of Gaussian components in each mixture
            subaction: current name of complex activity
        """
        np.random.seed(opt.seed)
        self.gt_map = GroundTruth(frequency=opt.frame_frequency)
        self.gt_map.load_mapping()
        self._K = self.gt_map.define_K(subaction=subaction) if K is None else K
        logger.debug('%s  subactions: %d' % (subaction, self._K))
        self.iter = 0
        self.return_stat = {}

        self._acc_old = 0
        self._videos = []
        self._subaction = subaction
        # init with ones for consistency with first measurement of MoF
        self._subact_counter = np.ones(self._K)
        self._gaussians = {}
        self._inv_count_stat = np.zeros(self._K)
        self._embedding = None
        self._gt2label = None
        self._label2gt = {}

        self._with_bg = opt.bg
        self._total_fg_mask = None

        # multiprocessing for sampling activities for each video
        self._features = None
        self._embedded_feat = None
        self._init_videos()
        # logger.debug('min: %f  max: %f  avg: %f' %
        #              (np.min(self._features),
        #               np.max(self._features),
        #               np.mean(self._features)))

        # to save segmentation of the videos
        dir_check(os.path.join(opt.output_dir, 'segmentation'))
        dir_check(os.path.join(opt.output_dir, 'likelihood'))
        self.vis = None  # visualization tool
コード例 #5
0
    def plot(self, iter=0, show=True, prefix=''):
        if iter is not None:
            self._counter = iter
        if 20 in self._labels:
            self._labels = np.array(self._labels)
            mask = self._labels == 20
            self._labels[mask] = 10
        plt.axis('off')

        plt.scatter(self._result[..., 0],
                    self._result[..., 1],
                    c=self._labels,
                    s=self._sizes,
                    alpha=1)
        plt.grid(True)
        if prefix == 'time_':
            plt.colorbar()
        if self._save:
            # plt.figure(figsize=(1))
            dir_check(join(opt.dataset_root, 'plots'))
            dir_check(join(opt.dataset_root, 'plots', opt.subaction))
            # name = ['iter%d_' % self._counter, 'gt_'][gt_plot]
            name = prefix + '%s_%s_' % (opt.subaction, opt.model_name)
            folder_name = opt.log_str
            dir_check(
                join(opt.dataset_root, 'plots', opt.subaction, folder_name))
            folder_name = join(opt.log_str, opt.vis_mode)
            dir_check(
                join(opt.dataset_root, 'plots', opt.subaction, folder_name))
            if self.svg:
                name += '_%s.svg' % self._mode
            else:
                name += '_%s.png' % self._mode
                # plt.savefig(join(opt.dataset_root, 'plots', opt.subaction,
                #                  folder_name, name), dpi=400)
            plt.savefig(join(opt.dataset_root, 'plots', opt.subaction,
                             folder_name, name),
                        transparent=True,
                        dpi=300)
            np.savetxt(
                join(opt.dataset_root, 'plots', opt.subaction, folder_name,
                     '%s.txt' % opt.vis_mode), self._result)
        if show:
            plt.show()
コード例 #6
0
won't use 0 as index for this (YTI) dataset.
-1 - background index
"""

__author__ = 'Anna Kukleva'
__date__ = 'September 2018'

import os
import re

from ute.utils.util_functions import dir_check
from ute.utils.arg_pars import opt

actions = ['coffee', 'changing_tire', 'cpr', 'jump_car', 'repot']
gt_folder = '/media/data/kukleva/lab/YTInstructions/segmentation_gt_dt'
dir_check(opt.gt)

label2idx = {}
idx2label = {}

label2idx['bg'] = -1
idx2label[-1] = 'bg'

videos = {}

for root, dirs, files in os.walk(gt_folder):
    for filename in files:
        segmentation = []
        with open(os.path.join(root, filename), 'r') as f:
            for line in f:
                match = re.match(r'(\d*)-(\d*)\s*(\w*)', line)
コード例 #7
0
def training(train_loader, epochs, save, **kwargs):
    """Training pipeline for embedding.

    Args:
        train_loader: iterator within dataset
        epochs: how much training epochs to perform
        n_subact: number of subactions in current complex activity
        mnist: if training with mnist dataset (just to test everything how well
            it works)
    Returns:
        trained pytorch model
    """
    logger.debug('create model')

    # make everything deterministic -> seed setup
    torch.manual_seed(opt.seed)
    torch.cuda.manual_seed(opt.seed)
    np.random.seed(opt.seed)
    random.seed(opt.seed)
    torch.backends.cudnn.deterministic = True

    model = kwargs['model']
    loss = kwargs['loss']
    optimizer = kwargs['optimizer']

    cudnn.benchmark = True

    batch_time = Averaging()
    data_time = Averaging()
    losses = Averaging()

    adjustable_lr = opt.lr

    logger.debug('epochs: %s', epochs)
    for epoch in range(epochs):
        # model.cuda()
        model.to(opt.device)
        model.train()

        logger.debug('Epoch # %d' % epoch)
        if opt.lr_adj:
            # if epoch in [int(epochs * 0.3), int(epochs * 0.7)]:
            # if epoch in [int(epochs * 0.5)]:
            if epoch % 30 == 0 and epoch > 0:
                adjustable_lr = adjust_lr(optimizer, adjustable_lr)
                logger.debug('lr: %f' % adjustable_lr)
        end = time.time()
        for i, (features, labels) in enumerate(train_loader):
            data_time.update(time.time() - end)
            features = features.float()
            labels = labels.float().to(opt.device)
            if opt.device == 'cuda':
                features = features.cuda(non_blocking=True)
            # features = features.float().cuda(non_blocking=True)
            # labels = labels.float().cuda()
            output = model(features)
            loss_values = loss(output, labels)
            losses.update(loss_values.item(), features.size(0))

            optimizer.zero_grad()
            loss_values.backward()
            optimizer.step()

            batch_time.update(time.time() - end)
            end = time.time()

            if i % 100 == 0 and i:
                logger.debug(
                    'Epoch: [{0}][{1}/{2}]\t'
                    'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                    'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                    'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                        epoch,
                        i,
                        len(train_loader),
                        batch_time=batch_time,
                        data_time=data_time,
                        loss=losses))
        logger.debug('loss: %f' % losses.avg)
        losses.reset()

    opt.resume_str = join(opt.dataset_root, 'models',
                          '%s.pth.tar' % opt.log_str)
    if save:
        save_dict = {
            'epoch': epoch,
            'state_dict': model.state_dict(),
            'optimizer': optimizer.state_dict()
        }
        dir_check(join(opt.dataset_root, 'models'))
        torch.save(save_dict, opt.resume_str)
    return model
コード例 #8
0
 def save_likelihood(self):
     """Used for multiprocessing"""
     dir_check(os.path.join(opt.data, 'likelihood'))
     np.savetxt(os.path.join(opt.data, 'likelihood', self.name), self._likelihood_grid)
コード例 #9
0
    def accuracy_corpus(self, prefix=''):
        """Calculate metrics as well with previous correspondences between
        gt labels and output labels"""
        accuracy = Accuracy()
        f1_score = F1Score(K=self._K, n_videos=len(self._videos))
        long_gt = []
        long_pr = []
        long_rel_time = []
        self.return_stat = {}

        for video in self._videos:
            long_gt += list(video.gt)
            long_pr += list(video._z)
            try:
                long_rel_time += list(video.temp)
            except AttributeError:
                pass
                # logger.debug('no poses')
        accuracy.gt_labels = long_gt
        accuracy.predicted_labels = long_pr
        if opt.bg:
            # enforce bg class to be bg class
            accuracy.exclude[-1] = [-1]

        old_mof, total_fr = accuracy.mof(old_gt2label=self._gt2label)
        self._gt2label = accuracy._gt2cluster
        self._label2gt = {}
        for key, val in self._gt2label.items():
            try:
                self._label2gt[val[0]] = key
            except IndexError:
                pass
        acc_cur = accuracy.mof_val()
        logger.debug('%sAction: %s' % (prefix, self._subaction))
        logger.debug('%sMoF val: ' % prefix + str(acc_cur))
        logger.debug('%sprevious dic -> MoF val: ' % prefix +
                     str(float(old_mof) / total_fr))

        accuracy.mof_classes()
        accuracy.iou_classes()

        self.return_stat = accuracy.stat()

        f1_score.set_gt(long_gt)
        f1_score.set_pr(long_pr)
        f1_score.set_gt2pr(self._gt2label)
        if opt.bg:
            f1_score.set_exclude(-1)
        f1_score.f1()

        for key, val in f1_score.stat().items():
            self.return_stat[key] = val

        for video in self._videos:
            video.segmentation[video.iter] = (video._z, self._label2gt)

        if opt.vis:
            ########################################################################
            # VISUALISATION

            if opt.vis_mode != 'segm':
                long_pr = [self._label2gt[i] for i in long_pr]

                if self.vis is None:
                    self.vis = Visual(mode=opt.vis_mode,
                                      save=True,
                                      reduce=None)
                    self.vis.fit(self._embedded_feat, long_pr,
                                 'iter_%d' % self.iter)
                else:
                    reset = prefix == 'final'
                    self.vis.color(labels=long_pr,
                                   prefix='iter_%d' % self.iter,
                                   reset=reset)
            else:
                ####################################################################
                # visualisation of segmentation
                if prefix == 'final':
                    colors = {}
                    cmap = plt.get_cmap('tab20')
                    for label_idx, label in enumerate(np.unique(long_gt)):
                        if label == -1:
                            colors[label] = (0, 0, 0)
                        else:
                            # colors[label] = (np.random.rand(), np.random.rand(), np.random.rand())
                            colors[label] = cmap(label_idx /
                                                 len(np.unique(long_gt)))

                    dir_check(os.path.join(opt.dataset_root, 'plots'))
                    dir_check(
                        os.path.join(opt.dataset_root, 'plots', opt.subaction))
                    fold_path = os.path.join(opt.dataset_root, 'plots',
                                             opt.subaction, 'segmentation')
                    dir_check(fold_path)
                    for video in self._videos:
                        path = os.path.join(fold_path, video.name + '.png')
                        name = video.name.split('_')
                        name = '_'.join(name[-2:])
                        plot_segm(path, video.segmentation, colors, name=name)
                ####################################################################
            ####################################################################

        return accuracy.frames()
コード例 #10
0
ファイル: mapping.py プロジェクト: wzmsltw/unsup_temp_embed
 def save_obj(obj, name):
     dir_check(opt.mapping_dir)
     path = os.path.join(opt.mapping_dir, '%s.pkl' % name)
     with open(path, 'wb') as f:
         pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)