def divide(self, tensor_in_1, tensor_in_2): """ Element-wise division of the input arrays with broadcasting. Args: tensor_in_1 (Tensor): Tensor object tensor_in_2 (Tensor): Tensor object Returns: MXNet NDArray: Element-wise division of the input arrays. """ tensor_in_1 = self.astensor(tensor_in_1) tensor_in_2 = self.astensor(tensor_in_2) return nd.divide(tensor_in_1, tensor_in_2)
def Rational_MXNET_C_F(x, weight_numerator, weight_denominator, training): # P(X) / Q(X) = a_0 + a_1 * X + ... + a_n * X ^ n / # eps + |b_1*X + b_1*X^2 + ... + b_{n-1}*X^n| z = nd.reshape(x, shape=(-1, )) xps = get_xps(weight_denominator, weight_numerator, z) numerator = nd.array([0], dtype='float32') for i, w_n in enumerate(weight_numerator): numerator = numerator + nd.multiply(w_n, xps[i]) denominator = nd.array([0], dtype='float32') for j, w_d in enumerate(weight_denominator): denominator = denominator + nd.multiply(w_d, xps[j]) return nd.divide(numerator, (0.1 + nd.abs(denominator))).reshape(x.shape)
def Rational_MXNET_A_F(x, weight_numerator, weight_denominator, training): # P(X) / Q(X) = a_0 + a_1 * X + ... + a_n * X ^ n / # 1 + | b_0 * X | + | b_1 * X | ^ 2 + ... + | b_i * X | ^ {i + 1} z = nd.reshape(x, shape=(-1, )) xps = get_xps(weight_denominator, weight_numerator, z) numerator = nd.array([0], dtype='float32') for i, w_n in enumerate(weight_numerator): numerator = numerator + nd.multiply(w_n, xps[i]) denominator = nd.array([1.0], dtype='float32') for j, w_d in enumerate(weight_denominator): denominator = denominator + nd.abs(nd.multiply(w_d, xps[j + 1])) return nd.divide(numerator, denominator).reshape(x.shape)
def Rational_MXNET_D_F(x, weight_numerator, weight_denominator, training, random_deviation=0.1): # P(X)/Q(X) = noised(a_0) + noised(a_1)*X +noised(a_2)*X^2 + ... + noised(a_n)*X^n / # 1 + |noised(b_0)*X + noised(b_1)*X^2 + ... + noised(b_{n-1})*X^n| # Noised parameters have uniform noise to be in range [(1-random_deviation)*parameter,(1+random_deviation)*parameter]. if not training: # do not add noise return Rational_MXNET_B_F(x, weight_numerator, weight_denominator, training) z = nd.reshape(x, shape=(-1, )) lower_bound = nd.array([1 - random_deviation]) upper_bound = nd.array([1 + random_deviation]) xps = get_xps(weight_denominator, weight_numerator, z) numerator = nd.array([0], dtype='float32') for i, w_n in enumerate(weight_numerator): w_n_noised = nd.multiply( w_n, nd.sample_uniform(low=lower_bound, high=upper_bound, shape=z.shape, dtype='float32')) numerator = numerator + nd.multiply(w_n_noised, xps[i]) denominator = nd.array([0], dtype='float32') for j, w_d in enumerate(weight_denominator): w_d_noised = nd.multiply( w_d, nd.sample_uniform(low=lower_bound, high=upper_bound, shape=z.shape, dtype='float32')) denominator = denominator + nd.multiply(w_d_noised, xps[j + 1]) return nd.divide(numerator, (1 + nd.abs(denominator))).reshape(x.shape)
def getUniqueMatch(iou, min_threshold=1e-12): N, M = iou.shape iouf = iou.reshape((-1,)) argmax = nd.argsort(iouf, is_ascend=False) argrow = nd.floor(nd.divide(argmax, M)) argcol = nd.modulo(argmax, M) uniquel = set() uniquer = set() match = nd.ones((N,)) * -1 i = 0 while True: if argcol[i].asscalar() not in uniquel and argrow[i].asscalar() not in uniquer: uniquel.add(argcol[i].asscalar()) uniquer.add(argrow[i].asscalar()) if iou[argrow[i], argcol[i]] > min_threshold: match[argrow[i]] = argcol[i] if len(uniquel) == M or len(uniquer) == N: break i += 1 return match.reshape((1,-1))
def preDataset2(SNR, data, batch_size, shuffle=True, fixed=None, debug=True): dataset, _, RP, keys, fs, T, C, margin, _ = data # Window function dwindow = tukey(fs * T, alpha=1. / 8) data_block, label_block, chiMkeys_block, Mratiokeys_block, datasets, iterator = {}, {}, {}, {}, {}, {} for pre in ['train', 'test']: data_block[pre] = RP(dataset[pre]) # (nsample, C, T*fs) # data_block[pre] = nd.concat(RP(dataset[pre]), RP(dataset[pre]), dim=0) # 3150x1x4096 cpu nd.array if margin != 0.5: # global assert nd.sum( nd.abs(data_block[pre].argmax(-1) - fs // 2) > fs / 10).asscalar() == 0 # Check the peaks nsample = data_block[pre].shape[0] noise, noise_m_gps = Gen_noise( fs, T, C, fixed=fixed) # (4096, C, fs*T) cpu ndarray sigma = data_block[pre].max(axis=-1) / SNR / nd_std(noise[:nsample], axis=-1) signal = nd.divide(data_block[pre], sigma[:, 0].reshape( (nsample, 1, 1))) # taking H1 as leading data_block[pre] = signal + noise[:nsample] # (nsample, C, T*fs) if fixed: noise_m, noise_p_gps = noise, noise_m_gps else: noise_m, noise_p_gps = Gen_noise( fs, T, C, fixed=fixed) # (4096, C, fs*T) cpu ndarray data_block[pre] = nd.concat(data_block[pre], noise_m[:nsample], dim=0) # (nsample, 1, T*fs) cpu nd.array # (nsample, C, T*fs) # Note: use mixed data to gen PSD spsd_block_channel = [] for c in range(C): spsd_block = np.concatenate([ np.real(np.fft.ifft( 1 / np.sqrt(power_vec(i[c].asnumpy(), fs)))).reshape( 1, -1) for i in data_block[pre] ]) # (nsample, T*fs) np.array spsd_block_channel.append( nd.array(spsd_block).expand_dims(1).expand_dims( 1)) # (nsample, 1, 1, T*fs) nd.array cpu spsd_block = nd.concatenate(spsd_block_channel, axis=1) # (nsample, C, 1, T*fs) if debug: logger.debug('spsd_block for {}: {}', pre, spsd_block.shape) # data * dwindow data_block[pre] = (data_block[pre] * nd.array(dwindow)).expand_dims( 2) # (nsample, C, 1, T*fs) nd.array cpu if debug: logger.debug('data_block for {}: {}', pre, data_block[pre].shape) data_block[pre] = nd.concat(data_block[pre].expand_dims(1), spsd_block.expand_dims(1), dim=1) if debug: logger.debug( 'data_block(psd,nd) for {}: {}', pre, data_block[pre].shape) # (nsmaple, 2, C, 1, T*fs) cpu nd.array label_block[pre] = nd.array([1] * nsample + [0] * nsample) chiMkeys_block[pre] = nd.array( getchiM(keys[pre]).tolist() + [0] * nsample) Mratiokeys_block[pre] = nd.array( getMratio(keys[pre]).tolist() + [0] * nsample) datasets[pre] = gluon.data.ArrayDataset(data_block[pre], label_block[pre], chiMkeys_block[pre], Mratiokeys_block[pre]) iterator[pre] = gdata.DataLoader(datasets[pre], batch_size, shuffle=shuffle, last_batch='keep', num_workers=0) if debug: logger.debug('\nNoise from: {} | {}', noise_m_gps[0], noise_p_gps[0]) return dataset, iterator, (noise_m_gps[0] + noise_p_gps[0], np.concatenate((noise_m_gps[1][:nsample], noise_p_gps[1][:nsample]), axis=0))
def softmax(x): """Compute softmax values for each sets of scores in x.""" e_x = nd.exp(x) result = nd.divide(nd.transpose(e_x), nd.sum(e_x, axis=1)) return nd.transpose(result)