Exemple #1
0
def darwin(fullfeatnames, st, num_videos, darwins_path):
    if not exists(darwins_path):
        makedirs(darwins_path)

    for feat_t in fullfeatnames:
        # node_darwins[feat_t] = dict()

        if not exists(join(darwins_path, feat_t)):
            makedirs(join(darwins_path, feat_t))

        for featname in fullfeatnames[feat_t]:
            output_filepath = join(darwins_path, feat_t, basename(featname))
            if isfile(output_filepath):
                print('%s -> OK' % (featname))
                continue

            start_time = time.time()

            with open(featname, 'rb') as f:
                data = cPickle.load(f)

            # compute VD
            node_darwins = dict()
            node_darwins[1] = darwin(data['X'])
            for id, X in data['tree_perframe'].iteritems():
                node_darwins[id] = darwin(X)

            # construct a list of edge pairs for easy access

            with open(output_filepath, 'wb') as f:
                cPickle.dump(dict(node_darwins=node_darwins), f)

            elapsed_time = time.time() - start_time
            print('%s -> DONE (in %.2f secs)' %
                  (output_filepath, elapsed_time))

    return None


# ==============================================================================
# Helper functions
# ==============================================================================
Exemple #2
0
def darwin(fullfeatnames, st, num_videos, darwins_path):
    if not exists(darwins_path):
        makedirs(darwins_path)

    for feat_t in fullfeatnames:
        # node_darwins[feat_t] = dict()

        if not exists(join(darwins_path, feat_t)):
            makedirs(join(darwins_path, feat_t))

        for featname in fullfeatnames[feat_t]:
            output_filepath = join(darwins_path, feat_t, basename(featname))
            if isfile(output_filepath):
                print('%s -> OK' % (featname))
                continue

            start_time = time.time()

            with open(featname, 'rb') as f:
                data = cPickle.load(f)

            # compute VD
            node_darwins = dict()
            node_darwins[1] = darwin(data['X'])
            for id, X in data['tree_perframe'].iteritems():
                node_darwins[id] = darwin(X)

            # construct a list of edge pairs for easy access

            with open(output_filepath, 'wb') as f:
                cPickle.dump(dict(node_darwins=node_darwins), f)

            elapsed_time = time.time() - start_time
            print('%s -> DONE (in %.2f secs)' % (output_filepath, elapsed_time))

    return None


# ==============================================================================
# Helper functions
# ==============================================================================
def _compute_vd_descriptors(tracklets_path, intermediates_path, videonames, traintest_parts, indices, feat_types, feats_path, \
                            pca_reduction=False, treelike=True, clusters_path=None, verbose=False):
    try:
        makedirs(feats_path)
    except OSError:
        pass

    for k, part in enumerate(traintest_parts):
        # cach'd pca and gmm

        for j, feat_t in enumerate(feat_types):
            try:
                makedirs(join(feats_path, feat_t + '-' + str(k)))
            except OSError:
                pass

        cache = None

        # process videos
        total = len(videonames)
        for i in indices:
            # FV computed for all feature types? see the last in INTERNAL_PARAMETERS['feature_types']
            all_done = np.all([isfile(join(feats_path, feat_t + '-' + str(k), videonames[i] + '.pkl'))
                   for feat_t in feat_types])
            if all_done:
                if verbose:
                    print('[_compute_vd_descriptors] %s -> OK' % videonames[i])
                continue

            if cache is None:
                cache = dict()
                for j, feat_t in enumerate(feat_types):
                    with open(join(intermediates_path, 'gmm' + ('_pca-' if pca_reduction else '-') + feat_t + '-' + str(k) + '.pkl'), 'rb') as f:
                        cache[feat_t] = cPickle.load(f)

            start_time = time.time()

            # object features used for the per-frame FV representation computation (cach'd)
            with open(join(tracklets_path, 'obj', videonames[i] + '.pkl'), 'rb') as f:
                obj = cPickle.load(f)
            with open(join(clusters_path, videonames[i] + '.pkl'), 'rb') as f:
                clusters = cPickle.load(f)

            for j, feat_t in enumerate(feat_types):
                if isfile(join(feats_path, feat_t + '-' + str(k), videonames[i] + '.pkl')):
                    continue

                # load video tracklets' feature
                with open(join(tracklets_path, feat_t, videonames[i] + '.pkl'), 'rb') as f:
                    d = cPickle.load(f)

                if feat_t == 'trj': # (special case)
                    d = convert_positions_to_displacements(d)

                if feat_t == 'mbh':
                    dx = preprocessing.normalize(d[:,:d.shape[1]/2], norm='l1', axis=1)
                    dy = preprocessing.normalize(d[:,d.shape[1]/2:], norm='l1', axis=1)
                    d = np.hstack((dx,dy))
                else:
                    d = preprocessing.normalize(d, norm='l1', axis=1)

                d = rootSIFT(d)

                if pca_reduction:
                    d = cache[feat_t]['pca'].transform(d)  # reduce dimensionality

                d = np.ascontiguousarray(d, dtype=np.float32)  # required in many of Yael functions

                output_filepath = join(feats_path, feat_t + '-' + str(k), videonames[i] + '.pkl')
                # compute FV of the video
                if not treelike:
                    # (in a per-frame representation)
                    fids = np.unique(obj[:,0])
                    V = [] # row-wise fisher vectors (matrix)
                    for f in fids:
                        tmp = d[np.where(obj[:,0] == f)[0],:]  # hopefully this is contiguous if d already was
                        fv = ynumpy.fisher(cache[feat_t]['gmm'], tmp, include=INTERNAL_PARAMETERS['fv_repr_feats'])  # f-th frame fisher vec
                        V.append(fv)  # no normalization or nothing (it's done when computing darwin)

                    vd = videodarwin.darwin(np.array(V))

                    with open(output_filepath, 'wb') as f:
                        cPickle.dump(dict(v=vd), f)

                else:  # or separately the FVs of the tree nodes
                    vdtree = dict()
                    if len(clusters['tree']) == 1:
                        fids = np.unique(obj[:,0])
                        V = [ynumpy.fisher(cache[feat_t]['gmm'], d[np.where(obj[:,0] == f)[0],:], INTERNAL_PARAMETERS['fv_repr_feats'])
                             for f in fids]
                        vdtree[1] = videodarwin.darwin(np.array(V))
                    else:
                        T = reconstruct_tree_from_leafs(np.unique(clusters['int_paths']))
                        for parent_idx, children_inds in T.iteritems():
                            # (in a per-frame representation)
                            node_inds = np.where(np.any([clusters['int_paths'] == idx for idx in children_inds], axis=0))[0]
                            fids = np.unique(obj[node_inds,0])
                            V = []
                            for f in fids:
                                tmp = d[np.where(obj[node_inds,0] == f)[0],:]
                                fv = ynumpy.fisher(cache[feat_t]['gmm'], tmp, INTERNAL_PARAMETERS['fv_repr_feats'])
                                V.append(fv)  # no normalization or nothing (it's done when computing darwin)
                            vdtree[parent_idx] = videodarwin.darwin(np.array(V))

                    with open(output_filepath, 'wb') as f:
                        cPickle.dump(dict(tree=vdtree), f)

            elapsed_time = time.time() - start_time
            if verbose:
                print('[_compute_vd_descriptors] %s -> DONE (in %.2f secs)' % (videonames[i], elapsed_time))
def _compute_vd_descriptors(tracklets_path, intermediates_path, videonames, traintest_parts, indices, feat_types, feats_path, \
                            pca_reduction=True, treelike=True, clusters_path=None):
    if not exists(feats_path):
        makedirs(feats_path)

    for k, part in enumerate(traintest_parts):
        # cach'd pca and gmm
        cache = dict()
        for j, feat_t in enumerate(feat_types):
            if not exists(feats_path + feat_t + '-' + str(k)):
                makedirs(feats_path + feat_t + '-' + str(k))
            with open(intermediates_path + 'gmm' + ('_pca-' if pca_reduction else '-') + feat_t + '-' + str(k) + '.pkl', 'rb') as f:
                cache[feat_t] = cPickle.load(f)

        # process videos
        total = len(videonames)
        for i in indices:
            # FV computed for all feature types? see the last in INTERNAL_PARAMETERS['feature_types']
            output_filepath = join(feats_path, feat_types[-1] + '-' + str(k), videonames[i] + '.pkl')
            if isfile(output_filepath):
                # for j, feat_t in enumerate(feat_types):
                #     featnames.setdefault(feat_t, []).append(feats_path + feat_t + '/' + videonames[i] + '-fvtree.pkl')
                print('%s -> OK' % output_filepath)
                continue

            start_time = time.time()

            # object features used for the per-frame FV representation computation (cach'd)
            with open(tracklets_path + 'obj/' + videonames[i] + '.pkl', 'rb') as f:
                obj = cPickle.load(f)
            with open(clusters_path + videonames[i] + '.pkl', 'rb') as f:
                clusters = cPickle.load(f)

            for j, feat_t in enumerate(feat_types):
                # load video tracklets' feature
                with open(tracklets_path + feat_t + '/' + videonames[i] + '.pkl', 'rb') as f:
                    d = cPickle.load(f)
                    if feat_t == 'trj': # (special case)
                        d = convert_positions_to_displacements(d)

                # pre-processing
                d = rootSIFT(preprocessing.normalize(d, norm='l1', axis=1))  # https://hal.inria.fr/hal-00873267v2/document

                if pca_reduction:
                    d = cache[feat_t]['pca'].transform(d)  # reduce dimensionality

                d = np.ascontiguousarray(d, dtype=np.float32)  # required in many of Yael functions

                output_filepath = join(feats_path, feat_t + '-' + str(k), videonames[i] + '.pkl')
                # compute FV of the video
                if not treelike:
                    # (in a per-frame representation)
                    fids = np.unique(obj[:,0])
                    V = [] # row-wise fisher vectors (matrix)
                    for f in fids:
                        tmp = d[np.where(obj[:,0] == f)[0],:]  # hopefully this is contiguous if d already was
                        fv = ynumpy.fisher(cache[feat_t]['gmm'], tmp, include=INTERNAL_PARAMETERS['fv_repr_feats'])  # f-th frame fisher vec
                        V.append(fv)  # no normalization or nothing (it's done when computing darwin)

                    vd = normalize(videodarwin.darwin(np.array(V)))

                    with open(output_filepath, 'wb') as f:
                        cPickle.dump(dict(v=vd), f)

                else:  # or separately the FVs of the tree nodes
                    T = reconstruct_tree_from_leafs(np.unique(clusters['int_paths']))
                    vdtree = dict()
                    for parent_idx, children_inds in T.iteritems():
                        # (in a per-frame representation)
                        node_inds = np.where(np.any([clusters['int_paths'] == idx for idx in children_inds], axis=0))[0]
                        fids = np.unique(obj[node_inds,0])
                        # dim = INTERNAL_PARAMETERS['fv_gmm_k'] * len(INTERNAL_PARAMETERS['fv_repr_feats']) * d.shape[1]
                        V = []
                        for f in fids:
                            tmp = d[np.where(obj[node_inds,0] == f)[0],:]
                            fv = ynumpy.fisher(cache[feat_t]['gmm'], tmp, INTERNAL_PARAMETERS['fv_repr_feats'])
                            V.append(fv)  # no normalization or nothing (it's done when computing darwin)
                        vdtree[parent_idx] = normalize(videodarwin.darwin(np.array(V)))

                    with open(output_filepath, 'wb') as f:
                        cPickle.dump(dict(tree=vdtree), f)

            elapsed_time = time.time() - start_time
            print('%s -> DONE (in %.2f secs)' % (videonames[i], elapsed_time))
def _compute_vd_descriptors(tracklets_path, intermediates_path, videonames, traintest_parts, indices, feat_types, feats_path, \
                            pca_reduction=False, treelike=True, clusters_path=None, verbose=False):
    try:
        makedirs(feats_path)
    except OSError:
        pass

    for k, part in enumerate(traintest_parts):
        # cach'd pca and gmm

        for j, feat_t in enumerate(feat_types):
            try:
                makedirs(join(feats_path, feat_t + '-' + str(k)))
            except OSError:
                pass

        cache = None

        # process videos
        total = len(videonames)
        for i in indices:
            # FV computed for all feature types? see the last in INTERNAL_PARAMETERS['feature_types']
            all_done = np.all([
                isfile(
                    join(feats_path, feat_t + '-' + str(k),
                         videonames[i] + '.pkl')) for feat_t in feat_types
            ])
            if all_done:
                if verbose:
                    print('[_compute_vd_descriptors] %s -> OK' % videonames[i])
                continue

            if cache is None:
                cache = dict()
                for j, feat_t in enumerate(feat_types):
                    with open(
                            join(
                                intermediates_path,
                                'gmm' + ('_pca-' if pca_reduction else '-') +
                                feat_t + '-' + str(k) + '.pkl'), 'rb') as f:
                        cache[feat_t] = cPickle.load(f)

            start_time = time.time()

            # object features used for the per-frame FV representation computation (cach'd)
            with open(join(tracklets_path, 'obj', videonames[i] + '.pkl'),
                      'rb') as f:
                obj = cPickle.load(f)
            with open(join(clusters_path, videonames[i] + '.pkl'), 'rb') as f:
                clusters = cPickle.load(f)

            for j, feat_t in enumerate(feat_types):
                if isfile(
                        join(feats_path, feat_t + '-' + str(k),
                             videonames[i] + '.pkl')):
                    continue

                # load video tracklets' feature
                with open(join(tracklets_path, feat_t, videonames[i] + '.pkl'),
                          'rb') as f:
                    d = cPickle.load(f)

                if feat_t == 'trj':  # (special case)
                    d = convert_positions_to_displacements(d)

                if feat_t == 'mbh':
                    dx = preprocessing.normalize(d[:, :d.shape[1] / 2],
                                                 norm='l1',
                                                 axis=1)
                    dy = preprocessing.normalize(d[:, d.shape[1] / 2:],
                                                 norm='l1',
                                                 axis=1)
                    d = np.hstack((dx, dy))
                else:
                    d = preprocessing.normalize(d, norm='l1', axis=1)

                d = rootSIFT(d)

                if pca_reduction:
                    d = cache[feat_t]['pca'].transform(
                        d)  # reduce dimensionality

                d = np.ascontiguousarray(
                    d, dtype=np.float32)  # required in many of Yael functions

                output_filepath = join(feats_path, feat_t + '-' + str(k),
                                       videonames[i] + '.pkl')
                # compute FV of the video
                if not treelike:
                    # (in a per-frame representation)
                    fids = np.unique(obj[:, 0])
                    V = []  # row-wise fisher vectors (matrix)
                    for f in fids:
                        tmp = d[np.where(
                            obj[:, 0] == f
                        )[0], :]  # hopefully this is contiguous if d already was
                        fv = ynumpy.fisher(
                            cache[feat_t]['gmm'],
                            tmp,
                            include=INTERNAL_PARAMETERS['fv_repr_feats']
                        )  # f-th frame fisher vec
                        V.append(
                            fv
                        )  # no normalization or nothing (it's done when computing darwin)

                    vd = videodarwin.darwin(np.array(V))

                    with open(output_filepath, 'wb') as f:
                        cPickle.dump(dict(v=vd), f)

                else:  # or separately the FVs of the tree nodes
                    vdtree = dict()
                    if len(clusters['tree']) == 1:
                        fids = np.unique(obj[:, 0])
                        V = [
                            ynumpy.fisher(cache[feat_t]['gmm'],
                                          d[np.where(obj[:, 0] == f)[0], :],
                                          INTERNAL_PARAMETERS['fv_repr_feats'])
                            for f in fids
                        ]
                        vdtree[1] = videodarwin.darwin(np.array(V))
                    else:
                        T = reconstruct_tree_from_leafs(
                            np.unique(clusters['int_paths']))
                        for parent_idx, children_inds in T.iteritems():
                            # (in a per-frame representation)
                            node_inds = np.where(
                                np.any([
                                    clusters['int_paths'] == idx
                                    for idx in children_inds
                                ],
                                       axis=0))[0]
                            fids = np.unique(obj[node_inds, 0])
                            V = []
                            for f in fids:
                                tmp = d[np.where(obj[node_inds, 0] == f)[0], :]
                                fv = ynumpy.fisher(
                                    cache[feat_t]['gmm'], tmp,
                                    INTERNAL_PARAMETERS['fv_repr_feats'])
                                V.append(
                                    fv
                                )  # no normalization or nothing (it's done when computing darwin)
                            vdtree[parent_idx] = videodarwin.darwin(
                                np.array(V))

                    with open(output_filepath, 'wb') as f:
                        cPickle.dump(dict(tree=vdtree), f)

            elapsed_time = time.time() - start_time
            if verbose:
                print('[_compute_vd_descriptors] %s -> DONE (in %.2f secs)' %
                      (videonames[i], elapsed_time))