def _get_fpt_ell_collection(dm, fpts, T_data, alpha, edgecolor): ell_patches = [] for (x, y, a, c, d) in fpts: # Manually Calculated sqrtm(inv(A)) with catch_warnings(): simplefilter("ignore") aIS = 1 / sqrt(a) cIS = (c / sqrt(a) - c / sqrt(d)) / (a - d + eps(1)) dIS = 1 / sqrt(d) transEll = Affine2D([(aIS, 0, x), (cIS, dIS, y), (0, 0, 1)]) unitCirc1 = Circle((0, 0), 1, transform=transEll) ell_patches = [unitCirc1] + ell_patches ellipse_collection = PatchCollection(ell_patches) ellipse_collection.set_facecolor("none") ellipse_collection.set_transform(T_data) ellipse_collection.set_alpha(alpha) ellipse_collection.set_edgecolor(edgecolor) return ellipse_collection
def _get_fpt_ell_collection(dm, fpts, T_data, alpha, edgecolor): ell_patches = [] for (x,y,a,c,d) in fpts: # Manually Calculated sqrtm(inv(A)) with catch_warnings(): simplefilter("ignore") aIS = 1/sqrt(a) cIS = (c/sqrt(a) - c/sqrt(d))/(a - d + eps(1)) dIS = 1/sqrt(d) transEll = Affine2D([\ ( aIS, 0, x),\ ( cIS, dIS, y),\ ( 0, 0, 1)]) unitCirc1 = Circle((0,0),1,transform=transEll) ell_patches = [unitCirc1] + ell_patches ellipse_collection = PatchCollection(ell_patches) ellipse_collection.set_facecolor('none') ellipse_collection.set_transform(T_data) ellipse_collection.set_alpha(alpha) ellipse_collection.set_edgecolor(edgecolor) return ellipse_collection
def build_model(vm, force_recomp=False): ''' Builds the model, if needed. Tries to reload if it can ''' logmsg('\n\nRequested: Build Model') if not force_recomp and not vm.isDirty: logmsg('The model is clean and is not forced to recompute') return True cm = vm.hs.cm # Delete old index and resample chips to index vm.delete_model() vm.sample_train_set() # Try to load the correct model if not force_recomp and vm.load_model(): logmsg('Loaded saved model from disk') return logmsg('Building the model. This may take some time.') # Could not load old model. Do full rebuild # ----- # STEP 1 - Loading logdbg('Step 1: Aggregate the model support (Load feature vectors) ---') tx2_cx = vm.get_train_cx() tx2_cid = vm.get_train_cid() assert len(tx2_cx) > 0, 'Training set cannot be np.empty' logdbg('Building model with %d sample chips' % (vm.num_train())) cm.load_features(tx2_cx) tx2_nfpts = cm.cx2_nfpts(tx2_cx) num_train_keypoints = sum(tx2_nfpts) # ----- # STEP 2 - Aggregating logdbg('Step 2: Build the model Words') isTFIDF = False if vm.hs.am.algo_prefs.model.quantizer == 'naive_bayes': logdbg('No Quantization. Aggregating all fdscriptors for nearest neighbor search.') vm.wx2_fdsc = np.empty((num_train_keypoints,128),dtype=np.uint8) _p = 0 for cx in tx2_cx: nfdsc = cm.cx2_nfpts(cx) vm.wx2_fdsc[_p:_p+nfdsc,:] = cm.cx2_fdsc[cx] _p += nfdsc ax2_wx = np.array(range(0,num_train_keypoints),dtype=np.uint32) if vm.hs.am.algo_prefs.model.quantizer == 'akmeans': raise NotImplementedError(':)') # ----- # STEP 3 - Inverted Indexing logdbg('Step 3: Point the parts of the model back to their source') vm.wx2_axs = np.empty(vm.wx2_fdsc.shape[0], dtype=object) for ax in xrange(0,num_train_keypoints): if vm.wx2_axs[ax] is None: vm.wx2_axs[ax] = [] wx = ax2_wx[ax] vm.wx2_axs[wx].append(ax) vm.ax2_cid = -np.ones(num_train_keypoints,dtype=np.int32) vm.ax2_fx = -np.ones(num_train_keypoints,dtype=np.int32) ax2_tx = -np.ones(num_train_keypoints,dtype=np.int32) curr_fx = 0; next_fx = 0 for tx in xrange(vm.num_train()): nfpts = tx2_nfpts[tx] next_fx = next_fx + nfpts ax_range = range(curr_fx,next_fx) ax2_tx[ax_range] = tx vm.ax2_cid[ax_range] = tx2_cid[tx] # Point to Inst vm.ax2_fx[ax_range] = range(nfpts) # Point to Kpts curr_fx = curr_fx + nfpts if isTFIDF: # Compute info for TF-IDF logdbg('Computing TF-IDF metadata') max_tx = len(tx2_cx) tx2_wtf_denom = np.float32(cm.cx2_nfpts(tx2_cx)) vm.wx2_maxtf = map(lambda ax_of_wx:\ max( np.float32(bincount(ax2_tx[ax_of_wx], minlength=max_tx)) / tx2_wtf_denom ), vm.wx2_axs) vm.wx2_idf = np.log2(map(lambda ax_of_wx:\ vm.num_train()/len(pylab.unique(ax2_tx[ax_of_wx])),\ vm.wx2_axs)+eps(1)) logdbg('Built Model using %d feature vectors. Preparing to index.' % len(vm.ax2_cid)) # ----- # STEP 4 - Indexing logdbg('Step 4: Building FLANN Index: over '+str(len(vm.wx2_fdsc))+' words') assert vm.flann is None, 'Flann already exists' vm.flann = FLANN() flann_param_dict = vm.hs.am.algo_prefs.model.indexer.to_dict() flann_params = vm.flann.build_index(vm.wx2_fdsc, **flann_param_dict) vm.isDirty = False vm.save_model() logmsg('The model was built.')
def compute_hulls(S, fS, domain): """ (Re-)compute upper and lower hull given the segment points `S` with function values `fS` and the `domain` of the logpdf. Parameters ---------- S : np.ndarray (N, 1) Straight-line segment points accumulated thus far. fS : tuple Value of the `logpdf` under sampling for each of the given segment points in `S`. domain : Tuple[float, float] Domain of `logpdf`. May be unbounded on either or both sides, in which case `(float("-inf"), float("inf"))` would be passed. If this domain is unbounded to the left, the derivative of the logpdf for x<= a must be positive. If this domain is unbounded to the right the derivative of the logpdf for x>=b must be negative. Returns ---------- lower_hull: List[arspy.hull.HullNode] upper_hull: List[arspy.hull.HullNode] """ assert (len(S) == len(fS)) assert (len(domain) == 2) lower_hull = [] for li in range(len(S) - 1): m = (fS[li + 1] - fS[li]) / (S[li + 1] - S[li]) b = fS[li] - m * S[li] left = S[li] right = S[li + 1] lower_hull.append(HullNode(m=m, b=b, left=left, right=right)) # compute upper piecewise-linear hull # expected final length of upper hull after full computation n_upper_segments = 2 * (len(S) - 2) + isinf(domain[0]) + isinf(domain[1]) upper_hull = [] if isinf(domain[0]): # first line (from -infinity) m = (fS[1] - fS[0]) / (S[1] - S[0]) b = fS[0] - m * S[0] pr = compute_segment_log_prob(float("-inf"), S[0], m, b) upper_hull.append( HullNode(m=m, b=b, pr=pr, left=float("-inf"), right=S[0])) # second line m = (fS[2] - fS[1]) / (S[2] - S[1]) b = fS[1] - m * S[1] pr = compute_segment_log_prob(S[0], S[1], m, b) upper_hull.append(HullNode(m=m, b=b, pr=pr, left=S[0], right=S[1])) # interior lines # there are two lines between each abscissa for li in range(1, len(S) - 2): m1 = (fS[li] - fS[li - 1]) / (S[li] - S[li - 1]) b1 = fS[li] - m1 * S[li] m2 = (fS[li + 2] - fS[li + 1]) / (S[li + 2] - S[li + 1]) b2 = fS[li + 1] - m2 * S[li + 1] if isinf(m1) and isinf(m2): raise ValueError("both hull slopes are infinite") dx1 = S[li] - S[li - 1] df1 = fS[li] - fS[li - 1] dx2 = S[li + 2] - S[li + 1] df2 = fS[li + 2] - fS[li + 1] f1 = fS[li] f2 = fS[li + 1] x1 = S[li] x2 = S[li + 1] # more numerically stable than above ix = ((f1 * dx1 - df1 * x1) * dx2 - (f2 * dx2 - df2 * x2) * dx1) / (df2 * dx1 - df1 * dx2) if isinf(m1) or abs(m1 - m2) < 10.0**8 * eps(m1): ix = S[li] pr1 = float("-inf") pr2 = compute_segment_log_prob(ix, S[li + 1], m2, b2) elif isinf(m2): ix = S[li + 1] pr1 = compute_segment_log_prob(S[li], ix, m1, b1) pr2 = float("-inf") else: if isinf(ix): raise ValueError("Non finite intersection") if abs(ix - S[li]) < 10.0**12 * eps(S[li]): ix = S[li] elif abs(ix - S[li + 1]) < 10.0**12 * eps(S[li + 1]): ix = S[li + 1] if ix < S[li] or ix > S[li + 1]: raise ValueError( "Intersection out of bounds -- logpdf is not concave") pr1 = compute_segment_log_prob(S[li], ix, m1, b1) pr2 = compute_segment_log_prob(ix, S[li + 1], m2, b2) upper_hull.append( HullNode(m=m1, b=b1, pr=pr1, left=S[li], right=ix)) upper_hull.append( HullNode(m=m2, b=b2, pr=pr2, left=ix, right=S[li + 1])) # second last line m = (fS[-2] - fS[-3]) / float(S[-2] - S[-3]) b = fS[-2] - m * S[-2] pr = compute_segment_log_prob(S[-2], S[-1], m, b) upper_hull.append(HullNode(m=m, b=b, pr=pr, left=S[-2], right=S[-1])) if isinf(domain[1]): # last line (to infinity) m = (fS[-1] - fS[-2]) / (S[-1] - S[-2]) b = fS[-1] - m * S[-1] pr = compute_segment_log_prob(S[-1], float("inf"), m, b) upper_hull.append( HullNode(m=m, b=b, pr=pr, left=S[-1], right=float("inf"))) # normalize probabilities normalized_probabilities = exp_normalize( asarray([node.pr for node in upper_hull])) for node, probability in zip(upper_hull, normalized_probabilities): node.pr = probability assert (len(lower_hull) == len(S) - 1) assert (len(upper_hull) == n_upper_segments) return lower_hull, upper_hull
def elu(param, x): return param * (np.eps(x) - 1.) if x < 0 else x