def test_graph_basic(self): np.random.seed(0) v1 = Vector(10) v2 = Linear(10) self.assertEqual((v1 >> v2).left, v1.chain(v2).left) self.assertEqual((v1 >> v2).right, v1.chain(v2).right)
def test_graph_parameters(self): np.random.seed(0) net1 = Vector(10) >> Linear(10) net2 = net1 >> Linear(10) self.assertEqual(net1.get_state(as_list=True), net2.left.get_state(as_list=True))
def test_graph_parameters(self): np.random.seed(0) net1 = Vector(10) >> Full(10) net2 = net1 >> Full(10) self.assertEqual(net1.get_state(), net2.left.get_state())
def test_graph_basic(self): np.random.seed(0) net1 = Vector(10) >> Full(10) np.random.seed(0) net2 = Vector(10).chain(Full(10)) self.assertEqual(net1, net2)
def test_freeze_parameters2(self): np.random.seed(0) gan = (Vector(10) >> Full(20)) >> (Full(10) >> Full(2)) self.assertEqual(gan.left.freeze().get_state(), gan.left.get_state()) self.assertEqual(gan.right.freeze().get_state(), gan.right.get_state()) self.assertEqual(gan.right.freeze().get_parameters(), []) self.assertNotEqual(gan.right.get_parameters(), []) self.assertEqual((Vector(20) >> gan.right).freeze().get_parameters(), []) self.assertNotEqual((Vector(20) >> gan.right).get_parameters(), []) self.assertEqual((Vector(20) >> gan.right).freeze().get_state(), (Vector(20) >> gan.right).get_state()) self.assertEqual(gan.left.freeze().get_parameters(), []) self.assertNotEqual(gan.left.get_parameters(), [])
def test_freeze_parameters(self): np.random.seed(0) net1 = Vector(10) >> Linear(10) net1.initialize() self.assertEqual(net1.freeze().get_state(as_list=True), net1.get_state(as_list=True))
def test_freeze_parameters2(self): np.random.seed(0) gan = (Vector(10) >> Linear(20)) >> (Linear(10) >> Linear(2)) gan.initialize() self.assertEqual(gan.left.freeze().get_state(as_list=True), gan.left.get_state(as_list=True)) self.assertEqual(gan.right.freeze().get_state(as_list=True), gan.right.get_state(as_list=True)) self.assertEqual(gan.right.freeze().get_graph_parameters(), []) self.assertNotEqual(gan.right.get_graph_parameters(), []) self.assertEqual( (Vector(20) >> gan.right).freeze().get_graph_parameters(), []) self.assertNotEqual((Vector(20) >> gan.right).get_graph_parameters(), []) self.assertEqual( (Vector(20) >> gan.right).freeze().get_state(as_list=True), (Vector(20) >> gan.right).get_state(as_list=True)) self.assertEqual(gan.left.freeze().get_graph_parameters(), []) self.assertNotEqual(gan.left.get_graph_parameters(), [])
def test_reshape2(self): input = Vector(30) layer = Reshape((10, 3)) net_func = self.create_function(input >> layer) np.random.seed(0) X = np.random.normal(size=(100, 30)) result = np.reshape(X, (100, 10, 3)) result2 = net_func(X) self.assertEqual(result.shape, result2.shape) np.testing.assert_almost_equal(result, result2)
def run_lambda(self, func, np_func, shape_func=lambda x: x): np.random.seed(0) in_size = 10 batch_size = 100 layer = Lambda(func, shape_func=shape_func) input = Vector(in_size) net_func = self.create_function(input >> layer) B = batch_size X = np.random.normal(size=(B, in_size)) result = net_func(X) result2 = np_func(X) np.testing.assert_almost_equal(result, result2, decimal=5)
def run_simple_elementwise(self, type, activation, in_size=2, out_size=3, batch_size=5, **kwargs): np.random.seed(0) layer = type(**kwargs) input = Vector(in_size) net_func = self.create_function(input >> layer) B = batch_size X = np.random.normal(size=(B, in_size)) result = net_func(X) result2 = activation(X) np.testing.assert_almost_equal(result, result2, decimal=5)
def test_stateful_lstm2(self): weights = np.ones( (self.lstm.get_shape_in(), self.lstm.get_shape_out())) for _ in range(10): X = np.random.normal(size=(10, 1, 1)) for s in range(1, 3): state = np.zeros((1, self.lstm.get_shape_out())) out = np.zeros((1, self.lstm.get_shape_out())) for i in range(s): out, state = self.lstm_forward(X[i], out, state, weights) lstm = Sequence(Vector(1, 1), s) >> LSTM(1, 1, stateful=True) self.set_weights(lstm.right, 1) lstm_out = lstm.predict(X[:s])[-1] lstm_hidden = T.get_value(lstm.right.states[0]) lstm_state = T.get_value(lstm.right.states[1]) np.testing.assert_almost_equal(lstm_hidden, lstm_out, 5) np.testing.assert_almost_equal(lstm_out, out, 5) np.testing.assert_almost_equal(out, lstm_hidden, 5) np.testing.assert_almost_equal(lstm_state, state, 5)
def run_simple_full(self, type, activation, in_size=2, out_size=3, batch_size=5, **kwargs): np.random.seed(0) layer = type(in_size, out_size, **kwargs) input = Vector(in_size) net_func = self.create_function(input >> layer) B = batch_size X = np.random.normal(size=(B, in_size)) result = net_func(X) state = layer.get_state() W, b = np.array(state['W']), np.array(state['b']) result2 = activation(np.dot(X, W) + b) np.testing.assert_almost_equal(result, result2, decimal=5)
def log_likelihood(self, batch_z, batch): x = Vector(self.input_size, placeholder=batch, is_input=False) mu, sigma = (x >> self.q_network).get_graph_outputs() sigma = T.sqrt(T.exp(sigma)) return T.mean( log_normal(batch_z, mu, sigma, self.embedding_size, dim=2))
def setUp(self): self.lstm = Sequence(Vector(1)) >> LSTM(1, 1, use_forget_gate=False) self.lstm_forget = Sequence(Vector(1)) >> LSTM( 1, 1, use_forget_gate=True)
def test_freeze_parameters(self): np.random.seed(0) net1 = Vector(10) >> Full(10) self.assertEqual(net1.freeze().get_state(), net1.get_state())
def setUp(self): self.lstm = Sequence(Vector(1, 1), 1) >> LSTM(1, 1, stateful=True) self.lstm = Sequence(Vector(1, 1), 1) >> LSTM(1, 1, stateful=True)
def log_likelihood(self, batch, batch_z): z = Vector(self.input_size, placeholder=batch_z, is_input=False) p = (z >> self.p_network).get_graph_outputs()[0] return T.mean(batch * p + (1 - batch) * T.log(1 - p + 1e-10))
def test_freeze(self): net1 = Vector(10) >> Full(10) self.assertEqual(Freeze(net1).get_state(), net1.get_state()) self.assertEqual(Freeze(net1).get_parameters(), [])
def test_freeze(self): net1 = Vector(10) >> Linear(10) self.assertEqual(Freeze(net1).get_graph_parameters(), [])
return np.cumsum(1.0 / np.arange(1, M + 1)).astype(np.float32) T.set_default_device('/cpu:0') c = T.scalar(name='c') segments = T.matrix(dtype='int32', name='segments') a_idx = segments[:, 0] b_idx = segments[:, 1] leaf_segment = segments[:, 2] m = segments[:, 3] log_fac = segments[:, 4] x = T.matrix(name='x') e = T.matrix(name='e') q_network = Vector(X.shape[1], placeholder=x, is_input=False) >> Repeat(Tanh(200), 2) q_mu_network = q_network >> Linear(D) q_mu = q_mu_network.get_outputs()[0].get_placeholder() q_sigma_network = q_network >> Linear(D) q_sigma = tf.sqrt(tf.exp(q_sigma_network.get_outputs()[0].get_placeholder())) z = q_mu + e * q_sigma values, times = T.variable(values), T.variable(times) values = tf.concat(0, [z, values]) harmonic = T.variable(create_harmonic(M)) a_batch_values = T.gather(values, a_idx) a_batch_times = T.gather(times, a_idx) b_batch_values = T.gather(values, b_idx) b_batch_times = T.gather(times, b_idx) harmonic_m = T.gather(harmonic, m - 1)
def setUp(self): self.lstm = Sequence(Vector(1, batch_size=1), 1) >> LSTM( 1, 1, stateful=True) self.lstm.initialize()
def sample_z(self, batch, batch_noise, feed_dict={}): x = Vector(self.input_size, placeholder=batch, is_input=False) mu, sigma = (x >> self.q_network).get_graph_outputs() sigma = T.sqrt(T.exp(sigma)) return mu + sigma * batch_noise
argparser.add_argument('team') argparser.add_argument('model') argparser.add_argument('converter') argparser.add_argument('--browser', default='firefox') return argparser.parse_args() if __name__ == "__main__": args = parse_args() with open(args.team) as fp: team_text = fp.read() with open(args.converter) as fp: converter = pickle.load(fp) net = Vector(converter.get_input_dimension()) >> Repeat(Tanh(1000), 2) >> Softmax(converter.get_output_dimension()) with open(args.model) as fp: net.set_state(pickle.load(fp)) client = ShowdownClient(NeuralNetworkAgent(net, converter), browser=args.browser) client.start() client.choose_name('asdf141231232', 'onmabd') client.mute() client.teambuilder() client.create_team(team_text, 'lopunny') client.home() client.select_battle_format('ou') client.play(10)
def encode(self, batch): x = Vector(self.input_size, placeholder=batch, is_input=False) mu, sigma = (x >> self.q_network).get_graph_outputs() return mu