コード例 #1
0
    def train_from_samples(self, patches):
        min_prob = self._settings.get('min_prob', 0.01)

        kp_patches = patches.reshape((patches.shape[0], -1, patches.shape[-1]))
        flatpatches = kp_patches.reshape((kp_patches.shape[0], -1))

        q = []

        models = []
        constant_terms = []
        constant_terms_unsummed = []

        tree = []# -123*np.ones((1000, 2), dtype=np.int64)
        cur_pos = 0

        s = 0
        cur_part_id = 0

        q.insert(0, (cur_pos, 0, flatpatches))
        s += 1
        #cur_pos += 1
        while q:
            p, depth, x = q.pop()

            model = np.clip(np.mean(x, 0), min_prob, 1 - min_prob)
            def entropy(x):
                return -(x * np.log2(x) + (1 - x) * np.log2(1 - x))
            H = np.mean(entropy(model))

            sc = self._settings.get('split_criterion', 'H')

            if len(x) < self._settings.get('min_samples_per_part', 20) or depth >= self._max_depth or \
               (sc == 'H' and H < self._settings.get('split_entropy', 0.30)):
                #tree[p,0] = -1
                #tree[p,1] = cur_part_id
                tree.append((-1, cur_part_id))
                cur_part_id += 1

            else:
                mm = BernoulliMM(n_components=self._num_parts_per_layer,
                                 n_iter=self._settings.get('n_iter', 8),
                                 tol=1e-15,
                                 n_init=self._settings.get('n_init', 1), # May improve a bit to increase this
                                 random_state=self._settings.get('em_seed', 0),
                                 #params='m',
                                 min_prob=min_prob)
                mm.fit(x[:self._settings.get('traing_limit')])
                logprob, resp = mm.score_samples(x)
                comps = resp.argmax(-1)

                w = logit(mm.means_[1]) - logit(mm.means_[0])


                Hafter = np.mean(entropy(mm.means_[0])) * mm.weights_[0] + np.mean(entropy(mm.means_[1])) * mm.weights_[1]
                IG = H - Hafter

                if sc == 'IG' and IG < self._settings.get('min_information_gain', 0.05):
                    tree.append((-1, cur_part_id))
                    cur_part_id += 1
                else:
                    tree.append((len(models), s))
                    K_unsummed = np.log((1 - mm.means_[1]) / (1 - mm.means_[0]))
                    K = np.sum(K_unsummed)

                    models.append(w)
                    constant_terms.append(K)
                    constant_terms_unsummed.append(K_unsummed)
                    #tree[p,1] = s


                    q.insert(0, (s, depth+1, x[comps == 0]))
                    #cur_pos += 1
                    q.insert(0, (s+1, depth+1, x[comps == 1]))
                    #cur_pos += 1
                    s += 2

        shape = (len(models),) + patches.shape[1:]
        weights = np.asarray(models).reshape(shape)
        constant_terms = np.asarray(constant_terms)
        constant_terms_unsummed = np.asarray(constant_terms_unsummed).reshape(shape)
        tree = np.asarray(tree, dtype=np.int64)

        self._tree = tree
        self._num_parts = cur_part_id
        #print('num_parts', self._num_parts)
        self._w = weights
        self._constant_terms = constant_terms

        supp_radius = self._settings.get('keypoint_suppress_radius', 0)
        if supp_radius > 0:

            NW = self._w.shape[0]
            max_indices = self._settings.get('keypoint_max', 1000)
            keypoints = np.zeros((NW, max_indices, 3), dtype=np.int64)
            kp_constant_terms = np.zeros(NW)
            num_keypoints = np.zeros(NW, dtype=np.int64)

            from gv.keypoints import get_key_points
            for k in range(NW):
                kps = get_key_points(self._w[k], suppress_radius=supp_radius, max_indices=max_indices)

                NK = len(kps)
                num_keypoints[k] = NK
                keypoints[k,:NK] = kps

                for kp in kps:
                    kp_constant_terms[k] += constant_terms_unsummed[k,kp[0],kp[1],kp[2]]

            self._keypoints = keypoints
            self._num_keypoints = num_keypoints
            self._keypoint_constant_terms = kp_constant_terms
コード例 #2
0
def superimposed_model(settings, threading=True):
    num_mixtures = settings['detector']['num_mixtures']

    # Train a mixture model to get a clustering of the angles of the object
    descriptor = gv.load_descriptor(settings)
    detector = gv.BernoulliDetector(num_mixtures, descriptor, settings['detector'])

    files = get_training_files(detector)
    neg_files = sorted(glob.glob(settings['detector']['neg_dir']))

    ag.info("Checkpoint 1")

    testing_type = detector.settings.get('testing_type')

    # Extract clusters (manual or through EM)
    ##############################################################################
    detector, comps = cluster(detector, files)
    each_mix_N = np.bincount(comps, minlength=num_mixtures)

    ##############################################################################

    ag.info("Checkpoint 3")

    ag.info("Checkpoint 4")

    support = detector.support 

    kernels = []

    #ag.info("TODO, quitting")
    #return detector

    # Determine bounding boxes
    ##############################################################################

    psize = settings['detector']['subsample_size']

    bbs = calc_bbs(detector)

    ag.info("Checkpoint 6")

    ag.info("Checkpoint 7")

    bkgs = []
    orig_sizes = []
    new_support = []
    im_size = settings['detector']['image_size']

    ag.info("Checkpoint 8")
    all_negs = []

    ag.info("Checkpoint 9")

    # Retrieve features and support 
    ##############################################################################

    ag.info('Fetching positives again...')
    all_pos_feats = []
    all_neg_feats = []
    alphas = []
    all_alphas = []
    all_binarized_alphas = []


    if settings['detector'].get('superimpose'):
        detector.extra['concentrations'] = []

        argses = [(m, settings, bbs[m], list(np.where(comps == m)[0]), files, neg_files, settings['detector'].get('stand_multiples', 1)) for m in range(detector.num_mixtures)]        
        for mixcomp, neg_feats, pos_feats, alpha_maps, extra in itr.starmap(get_pos_and_neg, argses):
            alpha = np.mean(alpha_maps, axis=0)
            alpha_maps = np.asarray(alpha_maps)
            all_alphas.append(alpha_maps)
            all_binarized_alphas.append(alpha_maps > 0.05)

            alphas.append(alpha)
            all_neg_feats.append(neg_feats)
            all_pos_feats.append(pos_feats)

            detector.extra['concentrations'].append(extra.get('concentrations', {}))

        ag.info('Done.')

        # Setup some places to store things
        if 'weights' not in detector.extra:
            detector.extra['weights'] = [None] * detector.num_mixtures
        if 'sturf' not in detector.extra:
            detector.extra['sturf'] = [{} for _ in xrange(detector.num_mixtures)]

        for m in xrange(detector.num_mixtures):
            detector.extra['sturf'].append(dict())

            obj = all_pos_feats[m].mean(axis=0)
            bkg = all_neg_feats[m].mean(axis=0)
            size = gv.bb.size(bbs[m])

            kernels.append(obj)
            bkgs.append(bkg)
            orig_sizes.append(size)
            new_support.append(alphas[m])

        if 0:
            for m in xrange(detector.num_mixtures):
                obj = all_pos_feats[m].mean(axis=0)
                bkg = all_neg_feats[m].mean(axis=0)
                size = gv.bb.size(bbs[m])

                eps = 0.025
                obj = np.clip(obj, eps, 1 - eps)
                avg = np.clip(avg, eps, 1 - eps)
                #lmb = obj / avg
                #w = np.clip(np.log(obj / avg), -1, 1)
                w = np.log(obj / (1 - obj) * ((1 - avg) / avg))
                #w = np.log(

                #w_avg = np.apply_over_axes(np.sum, w * support[...,np.newaxis], [0, 1]) / support.sum()

                #w -= w_avg * support[...,np.newaxis]

                if 'weights' not in detector.extra:
                    detector.extra['weights'] = []
                detector.extra['weights'].append(w)

                if 'sturf' not in detector.extra:
                    detector.extra['sturf'] = []

                detector.extra['sturf'].append(dict())
                        
                kernels.append(obj)
                bkgs.append(bkg)
                orig_sizes.append(size)
                new_support.append(alphas[m])

        detector.settings['per_mixcomp_bkg'] = True
    else:
        # Get a single background model for this one
        bkg = _get_background_model(settings, neg_files)

        crop_image = detector.settings.get('crop_image')
        import pdb; pdb.set_trace()
        argses = [(m, settings, list(np.where(comps == m)[0]), files, crop_image) for m in range(detector.num_mixtures)]        
        for m, pos_feats in gv.parallel.starmap(get_positives, argses):
            obj = pos_feats.mean(axis=0)
            all_pos_feats.append(pos_feats)

            kernels.append(obj)
            bkgs.append(bkg)
            size = gv.bb.size(bbs[m])

            orig_sizes.append(size)
            support = np.ones(settings['detector']['image_size'])
            new_support.append(support)

        detector.settings['per_mixcomp_bkg'] = True # False 


    # Get weights and support

    for m in xrange(detector.num_mixtures):
        #kern = detector.kernel_templates[m]
        #bkg = detector.fixed_spread_bkg[m]
        obj = all_pos_feats[m].mean(axis=0)
        bkg = all_neg_feats[m].mean(axis=0)

        if detector.eps is None:
            detector.prepare_eps(bkg)

        weights = detector.build_clipped_weights(obj, bkg, detector.eps)

        detector.extra['weights'][m] = weights

        detector.extra['sturf'][m]['support'] = arrange_support(alphas[m], weights.shape, psize)

    # Modify weights

    if not detector.settings.get('plain'):
        for m in xrange(detector.num_mixtures):
            weights = detector.extra['weights'][m] 

            F = detector.num_features
            indices = get_key_points(weights, suppress_radius=detector.settings.get('indices_suppress_radius', 4), even=True)

            L0 = indices.shape[0] // F 
            
            kp_weights = np.zeros((L0, F))

            M = np.zeros(weights.shape, dtype=np.uint8)
            counts = np.zeros(F)
            for index in indices:
                f = index[2]
                M[tuple(index)] = 1
                kp_weights[counts[f],f] = weights[tuple(index)]
                counts[f] += 1

            #theta = np.load('theta3.npy')[1:-1,1:-1]
            #th = theta
            #eth = np.load('empty_theta.npy')

            #support = 1-th[:,:,np.arange(1,F+1),np.arange(F)].mean(-1)
            #offset = gv.sub.subsample_offset_shape(alphas[m].shape, psize)

            support = detector.extra['sturf'][m]['support'] 

            #    def subsample_offset_shape(shape, size):


            pos, neg = all_pos_feats[m].astype(bool), all_neg_feats[m].astype(bool)
            #avg = np.apply_over_axes(

            diff = pos ^ neg
            appeared = pos & ~neg
            disappeared = ~pos & neg

            #bs = (support > 0.5)[np.newaxis,...,np.newaxis]
             

            A = appeared.mean(0) / (0.00001+((1-neg).mean(0)))
            D = disappeared.mean(0) / (0.00001+neg.mean(0))
            #ss = D.mean(-1)[...,np.newaxis]
            ss = support[...,np.newaxis]

            B = (np.apply_over_axes(np.mean, A*ss, [0, 1])).squeeze() / ss.mean()

            def clogit(x):
                return gv.logit(gv.bclip(x, 0.025))

            def find_zero(fun, l, u, depth=30):
                m = np.mean([l, u])
                if depth == 0:
                    return m
                v = fun(m)
                if v > 0:
                    return find_zero(fun, l, m, depth-1)
                else:
                    return find_zero(fun, m, u, depth-1)

            # Find zero-crossing
            #for f in xrange(F):
                

            # Now construct weights from these deltas
            #weights = ((clogit(ss * deltas + A) - clogit(B)))
            #weights = (ss * (clogit(deltas + pos.mean(0)) - clogit(neg.mean(0))))

            
            avg = np.apply_over_axes(np.mean, pos * M * ss, [1, 2]) / (ss * M).mean()

            if 0:
                for l0, l1, f in gv.multirange(*weights.shape):

                    def fun(w):
                        return -(np.clip(pos[:,l0,l1,f].mean(), 0.005, 0.995) - np.mean(expit(w + logit(avg[...,f]))))

                    weights[l0,l1,f] = find_zero(fun, -10, 10)



            if 1:
                # Print these to file
                from matplotlib.pylab import cm
                grid = gv.plot.ImageGrid(detector.num_features, 1, weights.shape[:2], border_color=(0.5, 0.5, 0.5))
                mm = np.fabs(weights).max()
                for f in xrange(detector.num_features):
                    grid.set_image(weights[...,f], f, 0, vmin=-mm, vmax=mm, cmap=cm.RdBu_r)
                fn = os.path.join(os.path.expandvars('$HOME'), 'html', 'plots', 'plot2.png')
                grid.save(fn, scale=10)
                os.chmod(fn, 0644)
                



            #A = appeared.mean(0) / (0.00001+((1-neg).mean(0)))
            #mm = (A * ss).mean() / ss.mean()


            #xx = (bs & pos) | (~bs & appeared)

            #avg = xx.mean(0)
            weights1 = ss*(weights - np.apply_over_axes(np.mean, weights * ss, [0, 1])/ss.mean())
            detector.extra['sturf'][m]['weights1'] = weights1

            eps = 0.025

            avg_pos = (np.apply_over_axes(np.mean, pos * ss, [0, 1, 2]) / ss.mean()).squeeze().clip(eps, 1-eps)
            avg_neg = (np.apply_over_axes(np.mean, neg * ss, [0, 1, 2]) / ss.mean()).squeeze().clip(eps, 1-eps)

            #w_avg = np.apply_over_axes(np.sum, weights * support[...,np.newaxis], [0, 1]) / support.sum()
            #
            #w_avg = (logit(np.apply_over_axes(np.mean, pos, [0, 1, 2])) - \
             #        logit(np.apply_over_axes(np.mean, neg, [0, 1, 2]))).squeeze()
            w_avg = logit(avg_pos) - logit(avg_neg)
            detector.extra['sturf'][m]['wavg'] = w_avg
            detector.extra['sturf'][m]['reweighted'] = (w_avg * support[...,np.newaxis]).squeeze()

            #weights -= w_avg * support[...,np.newaxis]
            #weights *= support[...,np.newaxis] * M
            if 0:
                weights *= support[...,np.newaxis]

                avg_weights = np.apply_over_axes(np.mean, weights, [0, 1]) / M.mean(0).mean(0)

                avg_w = kp_weights.mean(0)

                weights -= avg_w - (-kp_weights.var(0) / 2)

                weights *= support[...,np.newaxis]

                print((weights * M).mean(0))


            #weights = (weights - w_avg) * support[...,np.newaxis]
            #weights -= (w_avg + 0.0) * support[...,np.newaxis]

            weights -= w_avg * support[...,np.newaxis]

            F = detector.num_features

            if 0:
                for f in xrange(F):
                    #zz = np.random.normal(-1.5, size=(1, 1, 50))
                    zz = np.random.normal(-1.5, size=(1, 1, 50)).ravel()

                    betas = np.zeros(len(zz))
                    for i, z in enumerate(zz):
                        def fun(beta):
                            w = weights[...,f] - beta * support 
                            return np.log(1 - expit(w[...,np.newaxis] + z)).mean() - np.log(1 - expit(z))

                        betas[i] = find_zero(fun, -10, 10)

                    
                    if f == 0:
                        np.save('betas.npy', betas)
                    beta0 = betas.mean()
                    print(f, beta0, betas.std())
                    weights[...,f] -= beta0 * support 


            if 1:
                # Print these to file
                from matplotlib.pylab import cm
                grid = gv.plot.ImageGrid(detector.num_features, 2, weights.shape[:2], border_color=(0.5, 0.5, 0.5))
                mm = np.fabs(weights).max()
                for f in xrange(detector.num_features):
                    grid.set_image(weights[...,f], f, 0, vmin=-mm, vmax=mm, cmap=cm.RdBu_r)
                    grid.set_image(M[...,f], f, 1, vmin=0, vmax=1, cmap=cm.RdBu_r)
                fn = os.path.join(os.path.expandvars('$HOME'), 'html', 'plots', 'plot.png')
                grid.save(fn, scale=10)
                os.chmod(fn, 0644)

            ag.info('sum', np.fabs(np.apply_over_axes(np.sum, weights, [0, 1])).sum())

            # Instead, train model rigorously!!
            detector.extra['sturf'][m]['pos'] = all_pos_feats[m]
            detector.extra['sturf'][m]['neg'] = all_neg_feats[m]


            # Averags of all positives
            ff = all_pos_feats[m]
            posavg = np.apply_over_axes(np.sum, all_pos_feats[m] * support[...,np.newaxis], [1, 2]).squeeze() / support.sum() 
            negavg = np.apply_over_axes(np.sum, all_neg_feats[m] * support[...,np.newaxis], [1, 2]).squeeze() / support.sum() 

            S = np.cov(posavg.T)
            Sneg = np.cov(negavg.T)

            detector.extra['sturf'][m]['pavg'] = avg_pos
            detector.extra['sturf'][m]['pos-samples'] = posavg 
            detector.extra['sturf'][m]['S'] = S
            detector.extra['sturf'][m]['Sneg'] = Sneg
            detector.extra['sturf'][m]['navg'] = avg_neg

            Spos = S
            rs = np.random.RandomState(0)
            detector.extra['sturf'][m]['Zs'] = rs.multivariate_normal(avg_neg, Sneg, size=1000).clip(min=0.005, max=0.995)
            detector.extra['sturf'][m]['Zs_pos'] = rs.multivariate_normal(avg_pos, Spos, size=1000).clip(min=0.005, max=0.995)
            detector.extra['sturf'][m]['Zs_pos2'] = rs.multivariate_normal(avg_pos, Spos * 2, size=1000).clip(min=0.005, max=0.995)
            detector.extra['sturf'][m]['Zs_pos10'] = rs.multivariate_normal(avg_pos, Spos * 10, size=1000).clip(min=0.005, max=0.995)
            detector.extra['sturf'][m]['Zs_pos50'] = rs.multivariate_normal(avg_pos, Spos * 50, size=1000).clip(min=0.005, max=0.995)

    #{{{
    if 0:
        argses = [(m, settings, bbs[m], np.where(comps == m)[0], files, neg_files) for m in xrange(detector.num_mixtures)]
        for kern, bkg, orig_size, sup in gv.parallel.starmap(_create_kernel_for_mixcomp, argses):
            kernels.append(kern) 
            bkgs.append(bkg)
            orig_sizes.append(orig_size)
            new_support.append(sup)
                    
            ag.info("Checkpoint 10")

            detector.settings['per_mixcomp_bkg'] = True
    #}}}

    detector.kernel_templates = kernels
    detector.kernel_sizes = orig_sizes
    detector.settings['kernel_ready'] = True
    detector.use_alpha = False
    detector.support = new_support

    # Determine the background
    ag.info("Determining background")

    detector.fixed_bkg = None
    detector.fixed_spread_bkg = bkgs

    detector.settings['bkg_type'] = 'from-file'

    detector._preprocess()
    detector.prepare_eps(detector.fixed_spread_bkg[0])

    # Determine the standardization values
    ag.info("Determining standardization values")

    #fixed_train_mean = np.zeros(detector.num_mixtures)
    #detector.fixed_train_mean = []
    #fixed_train_std = np.ones(detector.num_mixtures)

    # Determine indices for coarse detection sweep
    if INDICES:
        detector.indices = []

        for m in xrange(detector.num_mixtures):
            these_indices = []
            weights = detector.extra['weights'][m]

            ag.info('Indices:', np.prod(weights.shape))

            # If not plain, we need even keypoints
            even = not detector.settings.get('plain')
            indices = get_key_points(weights, suppress_radius=detector.settings.get('indices_suppress_radius', 4), even=even)

            if not detector.settings.get('plain'):
                detector.extra['weights'][m] = weights

            assert len(indices) > 0, "No indices were extracted when keypointing"

            detector.indices.append(indices)
    else:
        detector.indices = None

    if testing_type in ('fixed', 'non-parametric'):
        detector.standardization_info = []
        if testing_type == 'fixed':
            if detector.settings.get('standardize_with_samples'):
                detector.standardization_info = [dict(mean=0, std=1)] * detector.num_mixtures
                info = []
                source = detector.settings.get('standardize_negative_source', 'neg-dir')
                N = detector.settings.get('standardize_num_images', 50)
                if source.startswith('voc-train-non-'):
                    obj_class = source.split('-')[-1] 
                    print('Taking negatives from voc train, without class', obj_class)
                    gen = gv.voc.gen_negative_files(obj_class, 'train')
                    #print('negatives', len([im for im in gen]))
                else:
                    print('Taking negatives from neg_dir')
                    gen = itr.cycle(gv.datasets.ImgFile(path=fn, img_id=os.path.basename(fn)) for fn in neg_files)
                    
                gen = itr.cycle(gen)
                gen = itr.islice(gen, N)
                gens = itr.tee(gen, detector.num_mixtures)

                th = -np.inf
                for m in xrange(detector.num_mixtures):
                    neg_files_segment = gens[m]
                    argses = [(detector, i, fileobj, th, m) for i, fileobj in enumerate(neg_files_segment)] 
                    topsy = list(gv.parallel.starmap_unordered(get_strong_fps_single, argses))
                    confs = np.asarray([bbobj.confidence for topsy_m in topsy for bbobj in topsy_m])

                    info.append(dict(mean=confs.mean(), std=confs.std())) 
                    #for m in xrange(detector.num_mixtures):
                    
                detector.standardization_info = info      

            else:

                argses = [(m, settings, detector.eps, bbs[m], kernels[m], bkgs[m], None, None, None, detector.indices[m] if INDICES else None, 3) for m in xrange(detector.num_mixtures)]

                detector.standardization_info = list(gv.parallel.starmap(_calc_standardization_for_mixcomp, argses))
        else:
            raise Exception("Unknown testing type")


    detector.settings['testing_type'] = testing_type 
    #detector.settings['testing_type'] = 'NEW'

    #detector.

    #
    # Data mine stronger negatives 
    #
    # TODO: Object class must be input
    if 1:
        contest = 'voc'
        obj_class = 'car'
        gen = gv.voc.gen_negative_files(obj_class, 'train')
    else:
        contest = 'custom-tmp-frontbacks'
        obj_class = 'bicycle'
        gen, tot = gv.datasets.load_files(contest, obj_class)

    import heapq
    top_bbs = [[] for k in xrange(detector.num_mixtures)]
    TOP_N = 10000


    if detector.settings.get('cascade'): # New SVM attempt 
        detector.extra['cascade_threshold'] = detector.settings.get('cascade_threshold', 8) 
        COUNT = detector.settings.get('cascade_farming_count', 500)

        args = itr.izip( \
            itr.repeat(detector), 
            xrange(COUNT), 
            itr.islice(gen, COUNT)
        )

        for res in gv.parallel.starmap_unordered(get_strong_fps, args):
            for m in xrange(detector.num_mixtures):
                top_bbs[m].extend(res[m])

        ag.info('- TOPS ------')
        ag.info(map(np.shape, top_bbs) )
        detector.extra['top_bbs_shape'] = map(np.shape, top_bbs) 

        # Save the strong negatives
        detector.extra['negs'] = top_bbs
        
        def phi(X, mixcomp):
            if SVM_INDICES and 0:
                indices = detector.indices2[mixcomp][0]
                return X.ravel()[np.ravel_multi_index(indices.T, X.shape)]
            else:
                #return gv.sub.subsample(X, (2, 2)).ravel()
                return X.ravel()

        all_neg_X0 = []
        for k in xrange(detector.num_mixtures):
            all_neg_X0.append(np.asarray(map(lambda bbobj: phi(bbobj.X, k), top_bbs[k])))

        del top_bbs

        all_pos_X0 = []
        for mixcomp, pos_feats in enumerate(all_pos_feats):
            all_pos_X0.append(np.asarray(map(lambda X: phi(X, mixcomp), pos_feats))) 
        ag.info('Done.')

        detector.extra['poss'] = all_pos_feats

        ag.info('Training SVMs...')
        # Train SVMs
        #from sklearn.svm import LinearSVC
        from sklearn.svm import LinearSVC, SVC
        clfs = []
        detector.indices2 = None # not [] for now 

        #all_neg_X0 = [[bbobj.X for bbobj in top_bbs[m]] for m in xrange(detector.num_mixtures)]

        detector.extra['svms'] = []
        for m in xrange(detector.num_mixtures):
            X = np.concatenate([all_pos_X0[m], all_neg_X0[m]])  
    
            # Flatten
            ag.info(m, ':', X.shape)
            #X = phi(X, k)
            ag.info(m, '>', X.shape)
            y = np.concatenate([np.ones(len(all_pos_feats[m])), np.zeros(len(all_neg_X0[m]))])

            #detector.extra['data_x'].append(X)
            #detector.extra['data_y'].append(y)


            from sklearn import cross_validation as cv

            #C = 5e-8
            C = 1.0

            #clf = LinearSVC(C=C)
            #clf = LinearSVC(C=C)
            clf = SVC(C=C, kernel='linear')
            clf.fit(X, y)

            svm_info = dict(intercept=float(clf.intercept_), weights=clf.coef_)
            detector.extra['svms'].append(svm_info)

            #sh = all_pos_feats[m][0].shape

            # Get most significant coefficients

            #th = smallest_th[k] 
            #th = 0
            #detector.extra['svms'].append(dict(svm=clf, th=th, uses_indices=SVM_INDICES))
        ag.info('Done.')

        # Remove negatives and positives from extra, since it takes space
        if 1:
            del detector.extra['poss']
            del detector.extra['negs']

    ag.info('extra')
    ag.info(detector.extra.keys())
    ag.info('eps', detector.eps)

    #ag.info("THIS IS SO TEMPORARY!!!!!")
    if 'weights' in detector.extra:
        #detector.indices = None

        ag.info(detector.standardization_info)
        #try:
        #    detector.standardization_info[0]['std'] = 1.0
        #except TypeError:
        #    detector.standardization_info = [dict(std=1.0, mean=0.0)]
        ag.info('corner2', detector.extra['weights'][0][0,0,:5])

    return detector 
コード例 #3
0
def main():

    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('model', metavar='<model file>', type=argparse.FileType('rb'), help='Filename of model file')
    parser.add_argument('output_model', metavar='<output model file>', type=str, help='Filename of output model file')

    args = parser.parse_args()

    d = gv.BernoulliDetector.load(args.model)

    assert len(d.kernel_templates) == 1, "Can only rotate a model that has a single component to begin with"
    #assert d.descriptor.settings.get('polarities', 1) == 1, "Only POL 1 for now"

    deg_per_step = d.descriptor.degrees_per_step
    ROT = d.descriptor.settings.get('orientations', 1)
    print('degrees per step', deg_per_step)

    #rots = [-ROT//4, ROT//4]
    rots = np.arange(ROT)[::1][1:]
    print('rots', rots)
    new_components = []

    #kern = d.kernel_templates[0]

    w0 = d.weights(0)

    weights = [w0]

    bbs = copy(d.extra['bbs'])
    bb0 = bbs[0]
    supports = copy(d.support)
    kernel_sizes = copy(d.kernel_sizes)

    for rot in rots:
        deg = rot * deg_per_step
        print('deg', deg)
        slices = []
        for f in xrange(w0.shape[-1]):
            rotated = (rotate(w0[...,f] / 20 + 0.5, deg, resize=True, cval=0.5) - 0.5) * 20

            slices.append(rotated)

            if f % 50 == 0:
                print(f)

            # Crop it a bit
            # TODO
            
            if 0:    
                plt.figure()
                plt.imshow(w0[...,f], vmin=-3, vmax=3, cmap=plt.cm.RdBu_r, interpolation='nearest')
                plt.savefig(vz.generate_filename())

                plt.figure()
                plt.imshow(rotated, vmin=-3, vmax=3, cmap=plt.cm.RdBu_r, interpolation='nearest')
                plt.savefig(vz.generate_filename())

        slices = np.rollaxis(np.asarray(slices), 0, 3)

        # THE PARTS ARE NOT ROTATED YET!
        for k in xrange(w0.shape[-1]//ROT):
            slices[k*ROT:(k+1)*ROT] = np.roll(slices[k*ROT:(k+1)*ROT], rot, axis=-1)

        weights.append(slices)

        

        bb = gv.bb.create(center=gv.bb.center(bb0), size=gv.bb.rotate_size(gv.bb.size(bb0), deg))
        bbs.append(bb)
        supports.append(d.support[0])
        kernel_sizes.append(d.kernel_sizes[0])

    d.num_mixtures = len(weights)

    print(map(np.shape, weights))

    bkg = np.apply_over_axes(np.mean, d.fixed_spread_bkg[0], [0, 1]).ravel()

    # Invent new keypoints and determine standardization info   
    indices = []
    info = []
    for m in xrange(d.num_mixtures):
        w = weights[m]
        ii = get_key_points(w, suppress_radius=d.settings.get('indices_suppress_radius', 4), even=True)
        indices.append(ii)

        llh_mean = 0.0
        llh_var = 0.0
        for index in ii:
            part = index[-1]
            # TODO: Should this really be clipped before averaging?
            mvalue = bkg[part]

            llh_mean += mvalue * w[tuple(index)]
            llh_var += mvalue * (1 - mvalue) * w[tuple(index)]**2

        info.append(dict(mean=llh_mean, std=np.sqrt(llh_var)))

    d.settings['testing_type'] = 'fixed'
    d.standardization_info = info

    # Now store the weights preprocessed
    d.indices = indices
    d.extra['weights'] = weights
    d.extra['bbs'] = bbs
    print('bbs', bbs)
    d.support = supports
    d.kernel_sizes = kernel_sizes
    print('d.TEMP_second', d.TEMP_second)
    print('kernel_sizes', d.kernel_sizes)
    d.TEMP_second = False
    d.save(args.output_model)