def test_topk_nodes(): # test#1: basic g0 = dgl.DGLGraph(nx.path_graph(14)) feat0 = F.randn((g0.number_of_nodes(), 10)) g0.ndata['x'] = feat0 # to test the case where k > number of nodes. dgl.topk_nodes(g0, 'x', 20, idx=-1) # test correctness val, indices = dgl.topk_nodes(g0, 'x', 5, idx=-1) ground_truth = F.reshape( F.argsort(F.slice_axis(feat0, -1, 9, 10), 0, True)[:5], (5,)) assert F.allclose(ground_truth, indices) g0.ndata.pop('x') # test#2: batched graph g1 = dgl.DGLGraph(nx.path_graph(12)) feat1 = F.randn((g1.number_of_nodes(), 10)) bg = dgl.batch([g0, g1]) bg.ndata['x'] = F.cat([feat0, feat1], 0) # to test the case where k > number of nodes. dgl.topk_nodes(bg, 'x', 16, idx=1) # test correctness val, indices = dgl.topk_nodes(bg, 'x', 6, descending=False, idx=0) ground_truth_0 = F.reshape( F.argsort(F.slice_axis(feat0, -1, 0, 1), 0, False)[:6], (6,)) ground_truth_1 = F.reshape( F.argsort(F.slice_axis(feat1, -1, 0, 1), 0, False)[:6], (6,)) ground_truth = F.stack([ground_truth_0, ground_truth_1], 0) assert F.allclose(ground_truth, indices) # test idx=None val, indices = dgl.topk_nodes(bg, 'x', 6, descending=True) assert F.allclose(val, F.stack([F.topk(feat0, 6, 0), F.topk(feat1, 6, 0)], 0))
def test_send_twice_different_msg(): g = DGLGraph() g.set_n_initializer(dgl.init.zero_initializer) g.add_nodes(3) g.add_edge(0, 1) g.add_edge(2, 1) def _message_a(edges): return {'a': edges.src['a']} def _message_b(edges): return {'a': edges.src['a'] * 3} def _reduce(nodes): return {'a': F.max(nodes.mailbox['a'], 1)} old_repr = F.randn((3, 5)) g.ndata['a'] = old_repr g.send((0, 1), _message_a) g.send((0, 1), _message_b) g.recv(1, _reduce) new_repr = g.ndata['a'] assert F.allclose(new_repr[1], old_repr[0] * 3) g.ndata['a'] = old_repr g.send((0, 1), _message_a) g.send((2, 1), _message_b) g.recv(1, _reduce) new_repr = g.ndata['a'] assert F.allclose(new_repr[1], F.max(F.stack([old_repr[0], old_repr[2] * 3], 0), 0))
def test_simple_readout(): g1 = dgl.DGLGraph() g1.add_nodes(3) g2 = dgl.DGLGraph() g2.add_nodes(4) # no edges g1.add_edges([0, 1, 2], [2, 0, 1]) n1 = F.randn((3, 5)) n2 = F.randn((4, 5)) e1 = F.randn((3, 5)) s1 = F.sum(n1, 0) # node sums s2 = F.sum(n2, 0) se1 = F.sum(e1, 0) # edge sums m1 = F.mean(n1, 0) # node means m2 = F.mean(n2, 0) me1 = F.mean(e1, 0) # edge means w1 = F.randn((3, )) w2 = F.randn((4, )) max1 = F.max(n1, 0) max2 = F.max(n2, 0) maxe1 = F.max(e1, 0) ws1 = F.sum(n1 * F.unsqueeze(w1, 1), 0) ws2 = F.sum(n2 * F.unsqueeze(w2, 1), 0) wm1 = F.sum(n1 * F.unsqueeze(w1, 1), 0) / F.sum(F.unsqueeze(w1, 1), 0) wm2 = F.sum(n2 * F.unsqueeze(w2, 1), 0) / F.sum(F.unsqueeze(w2, 1), 0) g1.ndata['x'] = n1 g2.ndata['x'] = n2 g1.ndata['w'] = w1 g2.ndata['w'] = w2 g1.edata['x'] = e1 assert F.allclose(dgl.sum_nodes(g1, 'x'), s1) assert F.allclose(dgl.sum_nodes(g1, 'x', 'w'), ws1) assert F.allclose(dgl.sum_edges(g1, 'x'), se1) assert F.allclose(dgl.mean_nodes(g1, 'x'), m1) assert F.allclose(dgl.mean_nodes(g1, 'x', 'w'), wm1) assert F.allclose(dgl.mean_edges(g1, 'x'), me1) assert F.allclose(dgl.max_nodes(g1, 'x'), max1) assert F.allclose(dgl.max_edges(g1, 'x'), maxe1) g = dgl.batch([g1, g2]) s = dgl.sum_nodes(g, 'x') m = dgl.mean_nodes(g, 'x') max_bg = dgl.max_nodes(g, 'x') assert F.allclose(s, F.stack([s1, s2], 0)) assert F.allclose(m, F.stack([m1, m2], 0)) assert F.allclose(max_bg, F.stack([max1, max2], 0)) ws = dgl.sum_nodes(g, 'x', 'w') wm = dgl.mean_nodes(g, 'x', 'w') assert F.allclose(ws, F.stack([ws1, ws2], 0)) assert F.allclose(wm, F.stack([wm1, wm2], 0)) s = dgl.sum_edges(g, 'x') m = dgl.mean_edges(g, 'x') max_bg_e = dgl.max_edges(g, 'x') assert F.allclose(s, F.stack([se1, F.zeros(5)], 0)) assert F.allclose(m, F.stack([me1, F.zeros(5)], 0)) assert F.allclose(max_bg_e, F.stack([maxe1, F.zeros(5)], 0))
def call(self, x, **kwargs): debug_print("call") # filters = K.zeros(shape=(N_filt, Filt_dim)) min_freq = 50.0 min_band = 50.0 filt_beg_freq = K.abs(self.filt_b1) + min_freq / self.freq_scale filt_end_freq = filt_beg_freq + (K.abs(self.filt_band) + min_band / self.freq_scale) n = np.linspace(0, self.Filt_dim, self.Filt_dim) window = 0.54 - 0.46 * K.cos(2 * math.pi * n / self.Filt_dim) window = K.cast(window, "float32") window = K.variable(window) t_right_linspace = np.linspace(1, (self.Filt_dim - 1) / 2, int((self.Filt_dim - 1) / 2)) t_right = K.variable(t_right_linspace / self.fs) # Compute the filters. output_list = [] for i in range(self.N_filt): low_pass1 = ( 2 * self.filt_beg_freq[i] * sinc(self.filt_beg_freq[i] * self.freq_scale, self.t_right)) low_pass2 = ( 2 * self.filt_end_freq[i] * sinc(self.filt_end_freq[i] * self.freq_scale, self.t_right)) band_pass = low_pass2 - low_pass1 band_pass = band_pass / K.max(band_pass) output_list.append(band_pass * self.window) filters = K.stack(output_list) # (80, 251) filters = K.transpose(filters) # (251, 80) filters = K.reshape( filters, (self.Filt_dim, 1, self.N_filt) ) # (251,1,80) in TF: (filter_width, in_channels, out_channels) in # PyTorch (out_channels, in_channels, filter_width) """Given an input tensor of shape [batch, in_width, in_channels] if data_format is "NWC", or [batch, in_channels, in_width] if data_format is "NCW", and a filter / kernel tensor of shape [filter_width, in_channels, out_channels], this op reshapes the arguments to pass them to conv2d to perform the equivalent convolution operation. Internally, this op reshapes the input tensors and invokes tf.nn.conv2d. For example, if data_format does not start with "NC", a tensor of shape [batch, in_width, in_channels] is reshaped to [ batch, 1, in_width, in_channels], and the filter is reshaped to [1, filter_width, in_channels, out_channels]. The result is then reshaped back to [batch, out_width, out_channels] (where out_width is a function of the stride and padding as in conv2d) and returned to the caller. """ # Do the convolution. debug_print("call") debug_print(" x", x) debug_print(" filters", filters) out = K.conv1d(x, kernel=filters) debug_print(" out", out) return out
def test_broadcast_edges(): # test#1: basic g0 = dgl.DGLGraph(nx.path_graph(10)) feat0 = F.randn((40, )) ground_truth = F.stack([feat0] * g0.number_of_edges(), 0) assert F.allclose(dgl.broadcast_edges(g0, feat0), ground_truth) # test#2: batched graph g1 = dgl.DGLGraph(nx.path_graph(3)) g2 = dgl.DGLGraph() g3 = dgl.DGLGraph(nx.path_graph(12)) bg = dgl.batch([g0, g1, g2, g3]) feat1 = F.randn((40, )) feat2 = F.randn((40, )) feat3 = F.randn((40, )) ground_truth = F.stack( [feat0] * g0.number_of_edges() +\ [feat1] * g1.number_of_edges() +\ [feat2] * g2.number_of_edges() +\ [feat3] * g3.number_of_edges(), 0 ) assert F.allclose( dgl.broadcast_edges(bg, F.stack([feat0, feat1, feat2, feat3], 0)), ground_truth)
def answer(*args): return F.max(F.stack(args, 0), 0)
def test_level2(): #edges = { # 'follows': ([0, 1], [1, 2]), # 'plays': ([0, 1, 2, 1], [0, 0, 1, 1]), # 'wishes': ([0, 2], [1, 0]), # 'develops': ([0, 1], [0, 1]), #} g = create_test_heterograph() def rfunc(nodes): return {'y': F.sum(nodes.mailbox['m'], 1)} def rfunc2(nodes): return {'y': F.max(nodes.mailbox['m'], 1)} def mfunc(edges): return {'m': edges.src['h']} def afunc(nodes): return {'y': nodes.data['y'] + 1} ############################################################# # send_and_recv ############################################################# g.nodes['user'].data['h'] = F.ones((3, 2)) g.send_and_recv([2, 3], mfunc, rfunc, etype='plays') y = g.nodes['game'].data['y'] assert F.array_equal(y, F.tensor([[0., 0.], [2., 2.]])) # only one type g['plays'].send_and_recv([2, 3], mfunc, rfunc) y = g.nodes['game'].data['y'] assert F.array_equal(y, F.tensor([[0., 0.], [2., 2.]])) # test fail case # fail due to multiple types fail = False try: g.send_and_recv([2, 3], mfunc, rfunc) except dgl.DGLError: fail = True assert fail # test multi g.multi_send_and_recv( { 'plays': (g.edges(etype='plays'), mfunc, rfunc), ('user', 'wishes', 'game'): (g.edges(etype='wishes'), mfunc, rfunc2) }, 'sum') assert F.array_equal(g.nodes['game'].data['y'], F.tensor([[3., 3.], [3., 3.]])) # test multi g.multi_send_and_recv( { 'plays': (g.edges(etype='plays'), mfunc, rfunc, afunc), ('user', 'wishes', 'game'): (g.edges(etype='wishes'), mfunc, rfunc2) }, 'sum', afunc) assert F.array_equal(g.nodes['game'].data['y'], F.tensor([[5., 5.], [5., 5.]])) # test cross reducer g.nodes['user'].data['h'] = F.randn((3, 2)) for cred in ['sum', 'max', 'min', 'mean']: g.multi_send_and_recv( { 'plays': (g.edges(etype='plays'), mfunc, rfunc, afunc), 'wishes': (g.edges(etype='wishes'), mfunc, rfunc2) }, cred, afunc) y = g.nodes['game'].data['y'] g['plays'].send_and_recv(g.edges(etype='plays'), mfunc, rfunc, afunc) y1 = g.nodes['game'].data['y'] g['wishes'].send_and_recv(g.edges(etype='wishes'), mfunc, rfunc2) y2 = g.nodes['game'].data['y'] yy = get_redfn(cred)(F.stack([y1, y2], 0), 0) yy = yy + 1 # final afunc assert F.array_equal(y, yy) # test fail case # fail because cannot infer ntype fail = False try: g.multi_send_and_recv( { 'plays': (g.edges(etype='plays'), mfunc, rfunc), 'follows': (g.edges(etype='follows'), mfunc, rfunc2) }, 'sum') except dgl.DGLError: fail = True assert fail g.nodes['game'].data.clear() ############################################################# # pull ############################################################# g.nodes['user'].data['h'] = F.ones((3, 2)) g.pull(1, mfunc, rfunc, etype='plays') y = g.nodes['game'].data['y'] assert F.array_equal(y, F.tensor([[0., 0.], [2., 2.]])) # only one type g['plays'].pull(1, mfunc, rfunc) y = g.nodes['game'].data['y'] assert F.array_equal(y, F.tensor([[0., 0.], [2., 2.]])) # test fail case fail = False try: g.pull(1, mfunc, rfunc) except dgl.DGLError: fail = True assert fail # test multi g.multi_pull(1, { 'plays': (mfunc, rfunc), ('user', 'wishes', 'game'): (mfunc, rfunc2) }, 'sum') assert F.array_equal(g.nodes['game'].data['y'], F.tensor([[0., 0.], [3., 3.]])) # test multi g.multi_pull( 1, { 'plays': (mfunc, rfunc, afunc), ('user', 'wishes', 'game'): (mfunc, rfunc2) }, 'sum', afunc) assert F.array_equal(g.nodes['game'].data['y'], F.tensor([[0., 0.], [5., 5.]])) # test cross reducer g.nodes['user'].data['h'] = F.randn((3, 2)) for cred in ['sum', 'max', 'min', 'mean']: g.multi_pull(1, { 'plays': (mfunc, rfunc, afunc), 'wishes': (mfunc, rfunc2) }, cred, afunc) y = g.nodes['game'].data['y'] g['plays'].pull(1, mfunc, rfunc, afunc) y1 = g.nodes['game'].data['y'] g['wishes'].pull(1, mfunc, rfunc2) y2 = g.nodes['game'].data['y'] g.nodes['game'].data['y'] = get_redfn(cred)(F.stack([y1, y2], 0), 0) g.apply_nodes(afunc, 1, ntype='game') yy = g.nodes['game'].data['y'] assert F.array_equal(y, yy) # test fail case # fail because cannot infer ntype fail = False try: g.multi_pull(1, { 'plays': (mfunc, rfunc), 'follows': (mfunc, rfunc2) }, 'sum') except dgl.DGLError: fail = True assert fail g.nodes['game'].data.clear() ############################################################# # update_all ############################################################# g.nodes['user'].data['h'] = F.ones((3, 2)) g.update_all(mfunc, rfunc, etype='plays') y = g.nodes['game'].data['y'] assert F.array_equal(y, F.tensor([[2., 2.], [2., 2.]])) # only one type g['plays'].update_all(mfunc, rfunc) y = g.nodes['game'].data['y'] assert F.array_equal(y, F.tensor([[2., 2.], [2., 2.]])) # test fail case # fail due to multiple types fail = False try: g.update_all(mfunc, rfunc) except dgl.DGLError: fail = True assert fail # test multi g.multi_update_all( { 'plays': (mfunc, rfunc), ('user', 'wishes', 'game'): (mfunc, rfunc2) }, 'sum') assert F.array_equal(g.nodes['game'].data['y'], F.tensor([[3., 3.], [3., 3.]])) # test multi g.multi_update_all( { 'plays': (mfunc, rfunc, afunc), ('user', 'wishes', 'game'): (mfunc, rfunc2) }, 'sum', afunc) assert F.array_equal(g.nodes['game'].data['y'], F.tensor([[5., 5.], [5., 5.]])) # test cross reducer g.nodes['user'].data['h'] = F.randn((3, 2)) for cred in ['sum', 'max', 'min', 'mean', 'stack']: g.multi_update_all( { 'plays': (mfunc, rfunc, afunc), 'wishes': (mfunc, rfunc2) }, cred, afunc) y = g.nodes['game'].data['y'] g['plays'].update_all(mfunc, rfunc, afunc) y1 = g.nodes['game'].data['y'] g['wishes'].update_all(mfunc, rfunc2) y2 = g.nodes['game'].data['y'] if cred == 'stack': # stack has two both correct outcomes yy1 = F.stack([F.unsqueeze(y1, 1), F.unsqueeze(y2, 1)], 1) yy1 = yy1 + 1 # final afunc yy2 = F.stack([F.unsqueeze(y2, 1), F.unsqueeze(y1, 1)], 1) yy2 = yy2 + 1 # final afunc assert F.array_equal(y, yy1) or F.array_equal(y, yy2) else: yy = get_redfn(cred)(F.stack([y1, y2], 0), 0) yy = yy + 1 # final afunc assert F.array_equal(y, yy) # test fail case # fail because cannot infer ntype fail = False try: g.update_all({ 'plays': (mfunc, rfunc), 'follows': (mfunc, rfunc2) }, 'sum') except dgl.DGLError: fail = True assert fail g.nodes['game'].data.clear()
def test_level1(): #edges = { # 'follows': ([0, 1], [1, 2]), # 'plays': ([0, 1, 2, 1], [0, 0, 1, 1]), # 'wishes': ([0, 2], [1, 0]), # 'develops': ([0, 1], [0, 1]), #} g = create_test_heterograph() def rfunc(nodes): return {'y': F.sum(nodes.mailbox['m'], 1)} def rfunc2(nodes): return {'y': F.max(nodes.mailbox['m'], 1)} def mfunc(edges): return {'m': edges.src['h']} def afunc(nodes): return {'y': nodes.data['y'] + 1} g.nodes['user'].data['h'] = F.ones((3, 2)) g.send([2, 3], mfunc, etype='plays') g.recv([0, 1], rfunc, etype='plays') y = g.nodes['game'].data['y'] assert F.array_equal(y, F.tensor([[0., 0.], [2., 2.]])) g.nodes['game'].data.pop('y') # only one type play_g = g['plays'] play_g.send([2, 3], mfunc) play_g.recv([0, 1], rfunc) y = g.nodes['game'].data['y'] assert F.array_equal(y, F.tensor([[0., 0.], [2., 2.]])) # TODO(minjie): following codes will fail because messages are # not shared with the base graph. However, since send and recv # are rarely used, no fix at the moment. # g['plays'].send([2, 3], mfunc) # g['plays'].recv([0, 1], mfunc) # test fail case # fail due to multiple types fail = False try: g.send([2, 3], mfunc) except dgl.DGLError: fail = True assert fail fail = False try: g.recv([0, 1], rfunc) except dgl.DGLError: fail = True assert fail # test multi recv g.send(g.edges(etype='plays'), mfunc, etype='plays') g.send(g.edges(etype='wishes'), mfunc, etype='wishes') g.multi_recv([0, 1], { 'plays': rfunc, ('user', 'wishes', 'game'): rfunc2 }, 'sum') assert F.array_equal(g.nodes['game'].data['y'], F.tensor([[3., 3.], [3., 3.]])) # test multi recv with apply function g.send(g.edges(etype='plays'), mfunc, etype='plays') g.send(g.edges(etype='wishes'), mfunc, etype='wishes') g.multi_recv([0, 1], { 'plays': (rfunc, afunc), ('user', 'wishes', 'game'): rfunc2 }, 'sum', afunc) assert F.array_equal(g.nodes['game'].data['y'], F.tensor([[5., 5.], [5., 5.]])) # test cross reducer g.nodes['user'].data['h'] = F.randn((3, 2)) for cred in ['sum', 'max', 'min', 'mean']: g.send(g.edges(etype='plays'), mfunc, etype='plays') g.send(g.edges(etype='wishes'), mfunc, etype='wishes') g.multi_recv([0, 1], { 'plays': (rfunc, afunc), 'wishes': rfunc2 }, cred, afunc) y = g.nodes['game'].data['y'] g1 = g['plays'] g2 = g['wishes'] g1.send(g1.edges(), mfunc) g1.recv(g1.nodes('game'), rfunc, afunc) y1 = g.nodes['game'].data['y'] g2.send(g2.edges(), mfunc) g2.recv(g2.nodes('game'), rfunc2) y2 = g.nodes['game'].data['y'] yy = get_redfn(cred)(F.stack([y1, y2], 0), 0) yy = yy + 1 # final afunc assert F.array_equal(y, yy) # test fail case # fail because cannot infer ntype fail = False try: g.multi_recv([0, 1], {'plays': rfunc, 'follows': rfunc2}, 'sum') except dgl.DGLError: fail = True assert fail