def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) if dgl.backend.backend_name == 'tensorflow' and (reducer in ['min', 'max']): pytest.skip() # tensorflow dlpack has problem writing into int32 arrays on GPU. print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(),) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(),) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): v = gspmm(g, msg, reducer, u, e) non_degree_indices = F.tensor( np.nonzero(F.asnumpy(g.in_degrees()) != 0)[0]) v = F.gather_row(v, non_degree_indices) if g.number_of_edges() > 0: F.backward(F.reduce_sum(v)) if msg != 'copy_rhs': grad_u = F.grad(u) if msg != 'copy_lhs': grad_e = F.grad(e) with F.record_grad(): g.update_all(udf_msg[msg], udf_reduce[reducer]) if g.number_of_edges() > 0: v1 = F.gather_row(g.dstdata['v'], non_degree_indices) assert F.allclose(v, v1) print('forward passed') F.backward(F.reduce_sum(v1)) if msg != 'copy_rhs': if reducer in ['min', 'max']: # there might be some numerical errors rate = F.reduce_sum(F.abs(F.grad(g.srcdata['x']) - grad_u)) /\ F.reduce_sum(F.abs(grad_u)) assert F.as_scalar(rate) < 1e-2, rate else: assert F.allclose(F.grad(g.srcdata['x']), grad_u) if msg != 'copy_lhs': if reducer in ['min', 'max']: rate = F.reduce_sum(F.abs(F.grad(g.edata['w']) - grad_e)) /\ F.reduce_sum(F.abs(grad_e)) assert F.as_scalar(rate) < 1e-2, rate else: assert F.allclose(F.grad(g.edata['w']), grad_e) print('backward passed') g.srcdata.pop('x') g.edata.pop('w') if 'v' in g.dstdata: g.dstdata.pop('v')
def test_spmm(idtype, g, shp, msg, reducer): g = g.astype(idtype).to(F.ctx()) print(g) print(g.idtype) hu = F.tensor(np.random.rand(*((g.number_of_src_nodes(), ) + shp[0])) + 1) he = F.tensor(np.random.rand(*((g.number_of_edges(), ) + shp[1])) + 1) print('u shape: {}, e shape: {}'.format(F.shape(hu), F.shape(he))) g.srcdata['x'] = F.attach_grad(F.clone(hu)) g.edata['w'] = F.attach_grad(F.clone(he)) print('SpMM(message func: {}, reduce func: {})'.format(msg, reducer)) u = F.attach_grad(F.clone(hu)) e = F.attach_grad(F.clone(he)) with F.record_grad(): v = gspmm(g, msg, reducer, u, e) if reducer in ['max', 'min']: v = F.replace_inf_with_zero(v) if g.number_of_edges() > 0: F.backward(F.reduce_sum(v)) if msg != 'copy_rhs': grad_u = F.grad(u) if msg != 'copy_lhs': grad_e = F.grad(e) with F.record_grad(): g.update_all(udf_msg[msg], udf_reduce[reducer]) if g.number_of_edges() > 0: v1 = g.dstdata['v'] assert F.allclose(v, v1) print('forward passed') F.backward(F.reduce_sum(v1)) if msg != 'copy_rhs': if reducer in ['min', 'max']: # there might be some numerical errors rate = F.reduce_sum(F.abs(F.grad(g.srcdata['x']) - grad_u)) /\ F.reduce_sum(F.abs(grad_u)) assert F.as_scalar(rate) < 1e-2, rate else: assert F.allclose(F.grad(g.srcdata['x']), grad_u) if msg != 'copy_lhs': if reducer in ['min', 'max']: rate = F.reduce_sum(F.abs(F.grad(g.edata['w']) - grad_e)) /\ F.reduce_sum(F.abs(grad_e)) assert F.as_scalar(rate) < 1e-2, rate else: assert F.allclose(F.grad(g.edata['w']), grad_e) print('backward passed') g.srcdata.pop('x') g.edata.pop('w') if 'v' in g.dstdata: g.dstdata.pop('v')
def call(self, x, **kwargs): debug_print("call") # filters = K.zeros(shape=(N_filt, Filt_dim)) min_freq = 50.0 min_band = 50.0 filt_beg_freq = K.abs(self.filt_b1) + min_freq / self.freq_scale filt_end_freq = filt_beg_freq + (K.abs(self.filt_band) + min_band / self.freq_scale) n = np.linspace(0, self.Filt_dim, self.Filt_dim) window = 0.54 - 0.46 * K.cos(2 * math.pi * n / self.Filt_dim) window = K.cast(window, "float32") window = K.variable(window) t_right_linspace = np.linspace(1, (self.Filt_dim - 1) / 2, int((self.Filt_dim - 1) / 2)) t_right = K.variable(t_right_linspace / self.fs) # Compute the filters. output_list = [] for i in range(self.N_filt): low_pass1 = ( 2 * self.filt_beg_freq[i] * sinc(self.filt_beg_freq[i] * self.freq_scale, self.t_right)) low_pass2 = ( 2 * self.filt_end_freq[i] * sinc(self.filt_end_freq[i] * self.freq_scale, self.t_right)) band_pass = low_pass2 - low_pass1 band_pass = band_pass / K.max(band_pass) output_list.append(band_pass * self.window) filters = K.stack(output_list) # (80, 251) filters = K.transpose(filters) # (251, 80) filters = K.reshape( filters, (self.Filt_dim, 1, self.N_filt) ) # (251,1,80) in TF: (filter_width, in_channels, out_channels) in # PyTorch (out_channels, in_channels, filter_width) """Given an input tensor of shape [batch, in_width, in_channels] if data_format is "NWC", or [batch, in_channels, in_width] if data_format is "NCW", and a filter / kernel tensor of shape [filter_width, in_channels, out_channels], this op reshapes the arguments to pass them to conv2d to perform the equivalent convolution operation. Internally, this op reshapes the input tensors and invokes tf.nn.conv2d. For example, if data_format does not start with "NC", a tensor of shape [batch, in_width, in_channels] is reshaped to [ batch, 1, in_width, in_channels], and the filter is reshaped to [1, filter_width, in_channels, out_channels]. The result is then reshaped back to [batch, out_width, out_channels] (where out_width is a function of the stride and padding as in conv2d) and returned to the caller. """ # Do the convolution. debug_print("call") debug_print(" x", x) debug_print(" filters", filters) out = K.conv1d(x, kernel=filters) debug_print(" out", out) return out
def build(self, input_shape): # The filters are trainable parameters. self.filt_b1 = self.add_weight( name="filt_b1", shape=(self.N_filt, ), initializer="he_uniform", trainable=True, ) self.filt_band = self.add_weight( name="filt_band", shape=(self.N_filt, ), initializer="he_uniform", trainable=True, ) # Mel Initialization of the filterbanks low_freq_mel = 80 high_freq_mel = 2595 * np.log10( 1 + (self.fs / 2) / 700) # Convert Hz to Mel mel_points = np.linspace(low_freq_mel, high_freq_mel, self.N_filt) # Equally spaced in Mel scale f_cos = 700 * (10**(mel_points / 2595) - 1) # Convert Mel to Hz b1 = np.roll(f_cos, 1) b2 = np.roll(f_cos, -1) b1[0] = 30 b2[-1] = (self.fs / 2) - 100 self.freq_scale = self.fs * 1.0 self.set_weights([b1 / self.freq_scale, (b2 - b1) / self.freq_scale]) # Get beginning and end frequencies of the filters. min_freq = 50.0 min_band = 50.0 self.filt_beg_freq = K.abs(self.filt_b1) + min_freq / self.freq_scale self.filt_end_freq = self.filt_beg_freq + (K.abs(self.filt_band) + min_band / self.freq_scale) # Filter window (hamming). n = np.linspace(0, self.Filt_dim, self.Filt_dim) window = 0.54 - 0.46 * K.cos(2 * math.pi * n / self.Filt_dim) window = K.cast(window, "float32") self.window = K.variable(window) debug_print(" window", self.window.shape) # TODO what is this? t_right_linspace = np.linspace(1, (self.Filt_dim - 1) / 2, int((self.Filt_dim - 1) / 2)) self.t_right = K.variable(t_right_linspace / self.fs) debug_print(" t_right", self.t_right) super(MusicSinc1D, self).build(input_shape) # Be sure to call this at the end
def test_edge_coarsening(idtype, g, weight, relabel): num_nodes = g.num_nodes() g = dgl.to_bidirected(g) g = g.astype(idtype).to(F.ctx()) edge_weight = None if weight: edge_weight = F.abs(F.randn((g.num_edges(),))).to(F.ctx()) node_labels = neighbor_matching(g, edge_weight, relabel_idx=relabel) unique_ids, counts = th.unique(node_labels, return_counts=True) num_result_ids = unique_ids.size(0) # shape correct assert node_labels.shape == (g.num_nodes(),) # all nodes marked assert F.reduce_sum(node_labels < 0).item() == 0 # number of unique node ids correct. assert num_result_ids >= num_nodes // 2 and num_result_ids <= num_nodes # each unique id has <= 2 nodes assert F.reduce_sum(counts > 2).item() == 0 # if two nodes have the same id, they must be neighbors idxs = F.arange(0, num_nodes, idtype) for l in unique_ids: l = l.item() idx = idxs[(node_labels == l)] if idx.size(0) == 2: u, v = idx[0].item(), idx[1].item() assert g.has_edges_between(u, v)
def graph1(): g = dgl.graph(([0, 0, 0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 6, 6, 7, 8, 9 ], [4, 5, 1, 2, 4, 7, 9, 8, 6, 4, 1, 0, 1, 0, 2, 3, 5]), device=F.cpu()) g.ndata['h'] = F.copy_to(F.randn((g.number_of_nodes(), 2)), F.cpu()) g.edata['scalar_w'] = F.copy_to(F.abs(F.randn((g.number_of_edges(), ))), F.cpu()) return g
def test_HinverseG(backendopt): for datatype in backendopt: T.set_backend(datatype) N = 10 T.seed(1224) A = T.random([N, N]) A = T.transpose(A) @ A A = A + T.identity(N) b = T.random([N]) def hess_fn(x): return [T.einsum("ab,b->a", A, x[0])] error_tol = 1e-9 x, = conjugate_gradient(hess_fn, [b], error_tol) assert (T.norm(T.abs(T.einsum("ab,b->a", A, x) - b)) <= 1e-4)
def get_reg(self, params): reg = self._l1_ * K.sum( [ K.sum( K.abs( param ) ) for param in params ] ) + \ self._l2_ * K.sum( [ K.sum( K.sqr( param ) ) for param in params ] ) return reg
def norm_lp(y_pred, y_gt, norm): return K.mean(K.sum(K.power(K.abs(y_pred - y_gt), norm), axis=-1))
def get_reg(self, params): return self._l1_ * K.sum([K.sum(K.abs(param)) for param in params])
def get_reg( self, params ): reg = self._l1_ * K.sum( [ K.sum( K.abs( param ) ) for param in params ] ) + \ self._l2_ * K.sum( [ K.sum( K.sqr( param ) ) for param in params ] ) return reg
def get_mask(input): return K.neq(K.sum(K.abs(input), axis=-1), 0.)
def get_reg( self, params ): return self._l1_ * K.sum( [ K.sum( K.abs( param ) ) for param in params ] )
def __call__(self, loss): loss += K.sum(K.abs(self.p)) * self.l1 / 2. loss += K.sum(K.square(self.p)) * self.l2 / 2. return loss
def __call__(self, loss): output = self.layer.get_output(True) loss += self.l1 * K.sum(K.mean(K.abs(output), axis=0)) loss += self.l2 * K.sum(K.mean(K.square(output), axis=0)) return loss
def __call__(self, loss): loss += K.sum(K.abs(self.p)) * self.l1 / 2. loss += K.sum(K.square(self.p)) * self.l2 / 2. return loss
def __call__(self, loss): output = self.layer.get_output(True) loss += self.l1 * K.sum(K.mean(K.abs(output), axis=0)) loss += self.l2 * K.sum(K.mean(K.square(output), axis=0)) return loss