def sample(self): if self.downloading: self.set_hdr_paths() while not len(self.hdr_paths): self.set_hdr_paths() if timegap(5, 'waiting for download ".hdr" file'): print('Waiting for download first ".hdr" file....') sleep(0.1) return random.choice(self.hdr_paths)
def softmaxFgBgSub(fg, bg, t=1): diff = bg - fg toExp = t * diff if (toExp > 80).sum() and timegap(1, 'toExp'): from boxx import prettyFrameLocation, pred print(prettyFrameLocation()) pred - "toExp.max() is %.2f > 80, diff.max() is %.2f" % ( toExp.max(), diff.max()) prob = 1 / (1 + th.exp(toExp)) return prob
def sample(self): if self.downloading: self.set_hdr_paths() while not len(self.hdr_paths): assert ( self.downloading ), f'No hdri file in "{self.hdri_dir}", make sure HdriManager(download=True)' self.set_hdr_paths() if timegap(5, 'waiting for download ".hdr" file'): print('Waiting for download first ".hdr" file....') sleep(0.1) return random.choice(self.hdr_paths)
def forward(self, feats, xyens): if self.layerNorma: feats = layerNormaFun(feats) losses = [ spatialSoftmax(feats, xyens) * w for w, spatialSoftmax in zip(self.weights, self.spatialSoftmaxs) ] if self.pointMaxW: losses += [self.pointMaxW * self.pointMax(feats, xyens)] if timegap(self.log_freq, 'losses'): print( Markdown([ dict( zip(self.cyc_rs + ['point'], [strnum(float(loss.cpu())) for loss in losses])) ])) return sum(losses) / len(losses)
def forward(self, feats, xyens): shape = feats.shape device = feats.device dim1, dim2 = np.mgrid[:shape[-4], :shape[-3]] dim1, dim2 = torch.tensor(dim1.ravel(), device=device), torch.tensor(dim2.ravel(), device=device) xyens = xyens.view(-1, 3) typee = feats.type() x, y, existMask = xyens[..., 0].type( th.long).to(device), xyens[..., 1].type( th.long).to(device), xyens[..., -1].type(typee).to(device) # tree([dim1.ravel(), dim2.ravel(), y, x], deep=1) validXMask = (x >= 0) & (x < shape[-1]) validYMask = (y >= 0) & (y < shape[-2]) # x = torch.clamp(x, 0, shape[-1]) # y = torch.clamp(y, 0, shape[-2]) x[~validXMask] = 0 y[~validYMask] = 0 indMask = ((existMask > 0) & validXMask & validYMask).type(typee) point_feats = feats[dim1, dim2, y, x] siged = th.sigmoid(point_feats) loss = -th.log(siged + eps) loss = (loss * indMask).sum() / (indMask.sum() + eps) loss *= self.w if self.suppressionBg or cf.get('debugPoinMax'): backloss = self._suppressionBg(feats, loss) s = f"pointmax: {loss} + " loss += backloss s += f"backloss: {backloss}" s = f"loss: {loss} = " + s if timegap(cf.debugPoinMax, 'debugPoinMax'): pred - s # g() # 1/0 return loss
if key == ord('w'): cycR += 1 if key == ord('s'): cycR -= 1 if key == ord('e'): showeye = not showeye tDiff = time.time() - lastTime lastTime = time.time() timeLong = lastTime - begin lastDu += tDiff * duSpeed lastDu %= 360 if timegap(genGap, 'gen_points'): for ind in range(lineNumber): point = FarAwayPoint(lastDu + ind * (360 / lineNumber), color=colors[ind], r=cycR) points.append(point) img = bg.copy() points = points[-300:] for point in points: point(img) if showeye: from process_img import png pngHw = Vector(png.shape[:2])
def forward(self, feats, xyens): logName = 'cyc_r: %s, out_cyc_r: %s' % (self.cyc_r, self.out_cyc_r or 'N') logTag = timegap(self.log_freq, logName) if logTag: logDic = dicto() if self.layerNorma: feats = layerNormaFun(feats) tensorType = feats.type() shape = self.shape = feats.shape feats = feats.view(-1, *shape[-2:]) xyens = xyens.view(-1, 3) pgts = xyens[..., [1, 0]].cpu().numpy() existMask = xyens[..., -1].type(tensorType) # with timeit(logName): masks = map2(getMaskOfPgt, [ dict(pgt=pgt, cycle_tmpl=self.cycle_tmpl, shape=shape, out_cyc_r=self.out_cyc_r) for pgt in pgts ]) # masks = mapmp(self.getMask, pgts, pool=4) masks = np.array(masks) masks = th.from_numpy(np.uint8(masks)).type(tensorType).cuda() loss = 0 #(lambda a,b,t=1:e**(t*a)/(e**(t*a)+e**(t*b)))(2,1,5) def softmaxFgBgOld(fg, bg, t=1): fge = th.exp(fg * t) bge = th.exp(bg * t) prob = fge / (fge + bge + eps) return prob def softmaxFgBgSubMax(fg, bg, t=1): fg = fg * t bg = bg * t maxx = max(float(fg.max()), float(bg.max())) fge = th.exp(fg - maxx) bge = th.exp(bg - maxx) prob = fge / (fge + bge + eps) return prob def softmaxFgBgSub(fg, bg, t=1): diff = bg - fg toExp = t * diff if (toExp > 80).sum() and timegap(1, 'toExp'): from boxx import prettyFrameLocation, pred print(prettyFrameLocation()) pred - "toExp.max() is %.2f > 80, diff.max() is %.2f" % ( toExp.max(), diff.max()) prob = 1 / (1 + th.exp(toExp)) return prob softmaxFgBg = softmaxFgBgSubMax def CE(fg, bg): prob = softmaxFgBg(fg, bg) avgLosses = -th.log(prob + eps) return avgLosses if 'avg' in self.poolings: bgAvgPool = (feats * masks[..., 0, :, :]).sum(-1).sum(-1) / ( masks[..., 0, :, :].sum(-1).sum(-1) + eps) fgAvgPool = (feats * masks[..., 1, :, :]).sum(-1).sum(-1) / ( masks[..., 1, :, :].sum(-1).sum(-1) + eps) avgProbs = softmaxFgBg(fgAvgPool, bgAvgPool, self.temper) avgLosses = -th.log(avgProbs + eps) indexMask = existMask * (avgProbs < self.probMargin).type( tensorType) if self.probMargin else existMask avgLoss = (avgLosses * indexMask).sum() / (indexMask.sum() + eps) loss += avgLoss if logTag: logDic.avgLoss = float(avgLoss) logDic.avgProb = float(avgProbs.mean()) if 'max' in self.poolings: bgMaxPool = (feats * masks[..., 0, :, :]).max(-1)[0].max(-1)[0] fgMaxPool = (feats * masks[..., 1, :, :]).max(-1)[0].max(-1)[0] maxProbs = softmaxFgBg(fgMaxPool, bgMaxPool, self.temper) maxLosses = -th.log(maxProbs + eps) indexMask = existMask * (maxProbs < self.probMargin).type( tensorType) if self.probMargin else existMask maxLoss = (maxLosses * indexMask).sum() / (indexMask.sum() + eps) loss += maxLoss if logTag: logDic.maxLoss = float(maxLoss) logDic.maxProb = float(maxProbs.mean()) if logTag: print("%s | %s" % (logName, ', '.join( map(lambda kv: "%s: %.3f" % kv, logDic.items())))) # print(Markdown([{k:strnum(v) for k,v in logDic.items()}])) # g() return loss