def __init__(self, model, batch_size=1, input_size=1): self.model = model # self.bins = np.linspace(-1, 1, self.model.num_classes) _, self.bins = mu_law_bins(self.num_classes) inputs = tf.placeholder(tf.float32, [batch_size, input_size], name='inputs') print('Make Generator.') count = 0 h = inputs init_ops = [] push_ops = [] for b in range(self.model.num_blocks): for i in range(self.model.num_layers): rate = 2**i name = 'b{}-l{}'.format(b, i) if count == 0: state_size = 1 else: state_size = self.model.num_hidden q = tf.FIFOQueue(rate, dtypes=tf.float32, shapes=(batch_size, state_size)) init = q.enqueue_many(tf.zeros((rate, batch_size, state_size))) state_ = q.dequeue() push = q.enqueue([h]) init_ops.append(init) push_ops.append(push) h = _causal_linear(h, state_, name=name, activation=tf.nn.relu) count += 1 outputs = _output_linear(h) out_ops = [tf.nn.softmax(outputs)] out_ops.extend(push_ops) self.inputs = inputs self.init_ops = init_ops self.out_ops = out_ops # Initialize queues. self.model.sess.run(self.init_ops)
def __init__(self, model, batch_size=1, input_size=1): self.model = model self.bins = np.linspace(-1, 1, 256) inputs = tf.placeholder(tf.float32, [batch_size, input_size], name='inputs') print 'Make Generator.' count = 0 h = inputs push_ops = [] for b in range(2): for i in range(14): rate = 2**i name = 'b{}-l{}'.format(b, i) if count == 0: state_size = 1 else: state_size = 128 q = Queue(batch_size=batch_size, state_size=state_size, buffer_size=rate, name=name) state_ = q.pop() push = q.push(h) push_ops.append(push) h = _causal_linear(h, state_, name=name, activation=tf.nn.relu) count += 1 outputs = _output_linear(h) out_ops = [tf.argmax(tf.nn.softmax(outputs), 1)] out_ops.extend(push_ops) # Initialize new variables new_vars = [ var for var in tf.trainable_variables() if 'pointer' in var.name or 'state_buffer' in var.name ] self.model.sess.run(tf.initialize_variables(new_vars)) self.inputs = inputs self.out_ops = out_ops