def EntropyLoss1(y_pred, y_true, train_pos_ratio): scale = 10 train_pos_ratio = array( train_pos_ratio, ctx=y_pred.context, dtype=np.float32) * scale train_neg_ratio = (scale - train_pos_ratio) L = -y_true * nd.log2(y_pred) * train_neg_ratio - ( 1 - y_true) * nd.log2(1 - y_pred) * train_pos_ratio return nd.mean(L)
def EntropyLoss1(y_pred, y_true): train_pos_ratio = array([ 0.09584448, 0.00999555, 0.05294822, 0.00299553, 0.04936361, 0.00880486 ], ctx=y_pred.context, dtype=np.float32) * 10 train_neg_ratio = (1.0 - train_pos_ratio) * 10 L = -y_true * nd.log2(y_pred) * train_neg_ratio - ( 1 - y_true) * nd.log2(1 - y_pred) * train_pos_ratio return nd.mean(L)
def test_exponent_logarithm_operators(): a = 2 * nd.ones(shape=LARGE_X) # exponent result = nd.exp(a) assert result[-1] == 7.389056 assert result.shape == a.shape # exponent minus 1 result = nd.expm1(a) assert result[-1] == 6.389056 assert result.shape == a.shape # log2 result = nd.log2(a) assert result[-1] == 1 assert result.shape == a.shape # log10 result = nd.log10(a) assert result[-1] == 0.30103 assert result.shape == a.shape # log1p result = nd.log1p(a) assert result[-1] == 1.0986123 assert result.shape == a.shape # log result = nd.log(a) assert result[-1] == 0.6931472 assert result.shape == a.shape
def quantize_to(x, bits=8): max_v = nd.max(nd.abs(x)) if max_v == 0: return x.astype(np.int8), 8 int_len = nd.ceil(nd.log2(max_v)).asscalar() sb = bits - int_len f = 2**sb y = nd.floor(x * f) y = nd.clip(y, a_min=-2**(bits - 1), a_max=2**(bits - 1) - 1) return y, sb
def quantize(self, x): max = nd.max(nd.abs(x)) if max != 0: int_len = (nd.ceil(nd.log2(nd.max(nd.abs(x))))).astype('float32') num_bit = self.num_bit.as_in_context(x.context) frac_len = num_bit - int_len f = (2**(frac_len)).astype('float32') y = ((x * f)).floor() * (1 / f) return y return x
def int_inference(self, x): max = nd.max(nd.abs(x)) if max != 0: int_len = (nd.ceil(nd.log2(nd.max(nd.abs(x))))).astype('float32') num_bit = self.num_bit.as_in_context(x.context) frac_len = num_bit - int_len f = (2**(frac_len)).astype('float32') y = ((x * f)).round() return y.astype('int8'), frac_len return x.astype('int8'), 0
def int_quantize(self, x): max = nd.max(nd.abs(x)) if max != 0: int_len = (nd.ceil(nd.log2(nd.max(nd.abs(x))))).astype('float32') num_bit = self.num_bit.as_in_context(x.context) frac_len = num_bit - int_len f = (2**(frac_len)).astype('float32') y = ((x * f)).floor() y = nd.clip(y, a_min=-128, a_max=127) return y, frac_len return x, 0
def forward(self, is_train, req, in_data, out_data, aux): x = in_data[0] y = out_data[0] in_max = nd.max(nd.abs(x)) if in_max != 0: int_len = (nd.ceil(nd.log2(nd.max(nd.abs(x))))).astype('float32') num_bit = self.num_bit.as_in_context(x.context) frac_len = num_bit - int_len f = (2**(frac_len)).astype('float32') y = ((x * f)).round() * (1 / f) y = x self.assign(out_data[0], req[0], mx.nd.array(y))
def int_quantize_double(self, x, w): max1 = nd.max(nd.abs(x)) max2 = nd.max(nd.abs(w)) if max1 > max2: max = max1 else: max = max2 if max != 0: int_len = (nd.ceil(nd.log2(max))).astype('float32') num_bit = self.num_bit.as_in_context(x.context) frac_len = num_bit - int_len f = (2**(frac_len)).astype('float32') int_x = ((x * f)).floor() int_w = ((w * f)).floor() return int_x, int_w, frac_len return x, w, 0
def quantize_vector(x, bits=8): """Quantize vertor with precision 'bits' Parameters ---------- x: NDArray shape is (1, n) bits: int vector after quantize preserve bits' precision Returns ------- y, sb: vector after quantization should be left_shift 'sb' bit to backward original value. """ max_v = nd.max(nd.abs(x)) if max_v == 0: return x.astype(np.int8), 8 int_len = nd.ceil(nd.log2(max_v)).asscalar() sb = bits - int_len f = 2**sb y = nd.floor(x * f) y = nd.clip(y, a_min=-2**(bits - 1), a_max=2**(bits - 1) - 1) return y, sb
def accuracy(output, label): L = -label*nd.log2(output) - (1-label) * nd.log2(1-output) return nd.mean(L).asscalar()
def log2(x): return nd.log2(x)
def forward(self, features, proposals): """ :param features: OrderedDict, each features: (B, C, H, W) :param proposals: :return: """ device = features[self.feature_map_names[0]].context batch_ids = [ nd.full(len(ps), i, ctx=device) for i, ps in enumerate(proposals) ] B = len(batch_ids) batch_ids = nd.concat(*batch_ids, dim=0) batch_proposals = nd.concat(*proposals, dim=0) if self.use_fpn: proposals = nd.concat(batch_ids.reshape(-1, 1), batch_proposals, dim=1) ws = batch_proposals[:, 2] - batch_proposals[:, 0] hs = batch_proposals[:, 3] - batch_proposals[:, 1] areas = ws * hs ks = nd.floor(4 + nd.log2(nd.sqrt(areas) / 224)) ks = nd.clip(ks, self.levels_min, self.levels_max) ks = ks.asnumpy() batch_indices = np.arange(len(batch_ids)) _batch_ids = [] _roi_features = [] for level, name in self.levels_map.items(): level_indices = batch_indices[ks == level] if len(level_indices) == 0: continue level_batch_ids = batch_ids[level_indices] roi_features = contrib.ndarray.ROIAlign( features[name], proposals[level_indices], (7, 7), 0.5**level) _batch_ids.append(level_batch_ids) _roi_features.append(roi_features) batch_ids = nd.concat(*_batch_ids, dim=0) batch_ids = batch_ids.asnumpy() roi_features = nd.concat(*_roi_features, dim=0) features_split = [] for i in range(B): i_mask = batch_ids == i i_indices = batch_indices[i_mask] features_split.append(roi_features[i_indices]) return features_split else: features = features[self.feature_map_names[0]] features = contrib.ndarray.ROIAlign( features, nd.concat(batch_ids.reshape(-1, 1), batch_proposals, dim=1), (7, 7), 0.5**4) features_split = [] idx = 0 for num_proposals in [len(ps) for ps in proposals]: features_split.append(features[idx:idx + num_proposals]) idx = idx + num_proposals return features_split
def EntropyLoss(y_pred, y_true): L = -y_true * nd.log2(y_pred) - (1 - y_true) * nd.log2(1 - y_pred) return nd.mean(L)
def EntropyLoss(y_pred, y_true, train_pos_ratio=None): L = -y_true * (1 - y_pred)**2 * nd.log2(y_pred) - ( 1 - y_true) * nd.log2(1 - y_pred) * y_pred**2 return nd.mean(L)