def get_data_from_line(self, batch_left): utt_id = self.utt_ids[self.align_ind] align = self.alignments[self.align_ind] ll = self.likelihoods[utt_id] # + 1 since add </s>, need this to match the batches from CharStream N = min(len(align) + 1, batch_left) data = empty((self.feat_dim, N)) for k in xrange(0, N): if len(align) > 0: a = align[max(self.char_ind - 1, 0)] llk = ll[:, a:a + SOURCE_CONTEXT] else: llk = blank_loglikes(1) if llk.shape[1] < SOURCE_CONTEXT: #llk = gnp.concatenate((llk, uniform_loglikes(SOURCE_CONTEXT - llk.shape[1])), axis=1) #llk = np.hstack((llk, uniform_loglikes(SOURCE_CONTEXT - llk.shape[1]))) llk = np.hstack( (llk, blank_loglikes(SOURCE_CONTEXT - llk.shape[1]))) data[:, k] = llk.ravel() self.char_ind += 1 return data
def bprop(self): logger.debug("%s backprop" % str(self)) # FIXME Assuming just 1 successor for now assert len(self.succ) == 1 succ_grad = self.succ[0].full_grad # FIXME Hack to avoid large sparse matrix multiply if type(self.succ[0]) is SumNode: # Which leads to softmax... if self.full_grad is None: self.full_grad = empty((self.W.shape[1], succ_grad.shape[1])) self.W.grad = empty(self.W.shape) for k in range(self.full_grad.shape[0]): # TODO self.full_grad[k, :] = ? # TODO self.W.grad[k, :] = ? pass else: self.full_grad = mult(self.W.out.T, succ_grad) # FIXME Multiplication below is wrong self.W.grad = mult(succ_grad, self.succ[0].out.T) # TODO Check this self.b.grad = succ_grad
def bprop(self): logger.debug('%s backprop' % str(self)) # FIXME Assuming just 1 successor for now assert len(self.succ) == 1 succ_grad = self.succ[0].full_grad # FIXME Hack to avoid large sparse matrix multiply if type(self.succ[0]) is SumNode: # Which leads to softmax... if self.full_grad is None: self.full_grad = empty((self.W.shape[1], succ_grad.shape[1])) self.W.grad = empty(self.W.shape) for k in range(self.full_grad.shape[0]): # TODO self.full_grad[k, :] = ? # TODO self.W.grad[k, :] = ? pass else: self.full_grad = mult(self.W.out.T, succ_grad) # FIXME Multiplication below is wrong self.W.grad = mult(succ_grad, self.succ[0].out.T) # TODO Check this self.b.grad = succ_grad
def alloc_params(self): hps = self.hps self.params['Wih'] = vp_init((hps.hidden_size, hps.input_size)) self.params['Wsh'] = vp_init((hps.hidden_size, hps.source_size)) self.params['bih'] = zeros((hps.hidden_size, 1)) for k in xrange(hps.hidden_layers - 1): self.params['W%d' % (k+1)] = vp_init((hps.hidden_size, hps.hidden_size)) self.params['b%d' % (k+1)] = zeros((hps.hidden_size, 1)) self.params['Who'] = vp_init((hps.output_size, hps.hidden_size)) self.params['bho'] = zeros((hps.output_size, 1)) self.count_params() # Allocate grads as well self.grads = {} for k in self.params: self.grads[k] = empty(self.params[k].shape) logger.info('Allocated gradients')
def get_data_from_line(self, batch_left): utt_id = self.utt_ids[self.align_ind] align = self.alignments[self.align_ind] ll = self.likelihoods[utt_id] # + 1 since add </s>, need this to match the batches from CharStream N = min(len(align) + 1, batch_left) data = empty((self.feat_dim, N)) for k in xrange(0, N): if len(align) > 0: a = align[max(self.char_ind-1, 0)] llk = ll[:, a:a+SOURCE_CONTEXT] else: llk = blank_loglikes(1) if llk.shape[1] < SOURCE_CONTEXT: #llk = gnp.concatenate((llk, uniform_loglikes(SOURCE_CONTEXT - llk.shape[1])), axis=1) #llk = np.hstack((llk, uniform_loglikes(SOURCE_CONTEXT - llk.shape[1]))) llk = np.hstack((llk, blank_loglikes(SOURCE_CONTEXT - llk.shape[1]))) data[:, k] = llk.ravel() self.char_ind += 1 return data
def __init__(self, name, data_inp, shape, init_fn=None): super(IndexedParamNode, self).__init__(name, shape, init_fn=init_fn) self.data_inp = data_inp self.params_batch = empty( (data_inp.feat_dim * self.params.shape[0], data_inp.batch_size))
def cost_and_grad(self, data, labels, back=True, prev_h0=None): hps = self.hps T = data.shape[1] bsize = data.shape[2] # FIXME gnumpy reallocates if try and use same parameters? #us = self.us[:, 0:T, 0:bsize] #dus = self.dus[:, 0:T, 0:bsize] #hs = self.hs[:, 0:T, 0:bsize] #dhs = self.dhs[:, 0:T, 0:bsize] #probs = self.probs[:, 0:T, 0:bsize] #dprobs = self.dprobs[:, 0:T, 0:bsize] #costs = self.costs[0:T, 0:bsize] us = list() dus = list() hs = list() dhs = list() h0 = list() for k in xrange(hps.hidden_layers): us.append(list()) dus.append(list()) hs.append(list()) dhs.append(list()) h0.append(empty((hps.hidden_size, bsize))) for t in xrange(T): us[k].append(zeros((hps.hidden_size, bsize))) dus[k].append(zeros((hps.hidden_size, bsize))) hs[k].append(zeros((hps.hidden_size, bsize))) dhs[k].append(zeros((hps.hidden_size, bsize))) probs = list() for t in xrange(T): probs.append(zeros((hps.output_size, bsize))) costs = np.zeros((T, bsize)) if prev_h0 is not None: h0 = prev_h0 else: for k in xrange(hps.hidden_layers): h0[k] = tile(self.params['h0'][:, k].reshape(-1, 1), bsize) bih = self.params['bih'] Wih = self.params['Wih'] Whh = self.params['Whh'] bhh = self.params['bhh'] Who = self.params['Who'] bho = self.params['bho'] # Forward prop for t in xrange(T): for k in xrange(hps.hidden_layers): if t == 0: hprev = h0[k] else: hprev = hs[k][t-1] if k == 0: us[k][t] = mult(Wih, data[:, t, :]) + bih else: us[k][t] = mult(self.params['Wh%d' % k], hs[k-1][t]) if k == hps.recurrent_layer - 1: us[k][t] += mult(Whh, hprev) + bhh # Clip maximum activation mask = us[k][t] < hps.max_act us[k][t] = us[k][t] * mask + hps.max_act * (1 - mask) elif k != 0: us[k][t] += self.params['bh%d' % k] hs[k][t] = self.nl(us[k][t]) probs[t] = softmax(mult(Who, hs[-1][t]) + bho) self.last_h = list() for k in xrange(hps.hidden_layers): self.last_h.append(hs[k][-1]) if labels is None: return None, probs probs_neg_log = list() dprobs = list() for t in xrange(T): probs_neg_log.append(as_np(-1 * log(probs[t]))) dprobs.append(as_np(probs[t].copy())) for k in xrange(bsize): for t in xrange(len(labels[k])): costs[t, k] = probs_neg_log[t][labels[k][t], k] dprobs[t][labels[k][t], k] -= 1 for t in xrange(T): dprobs[t] = array(dprobs[t]) # NOTE Summing costs over time # NOTE FIXME Dividing by T to get better sense if objective # is decreasing, remove for grad checking cost = costs.sum() / bsize / float(T) if not back: return cost, probs # Backprop for k in self.grads: self.grads[k][:] = 0 for t in reversed(xrange(T)): self.grads['bho'] += dprobs[t][:, :].sum(axis=-1).reshape((-1, 1)) / bsize self.grads['Who'] += mult(dprobs[t], hs[-1][t].T) / bsize for k in reversed(xrange(hps.hidden_layers)): if k == hps.hidden_layers - 1: dhs[k][t] += mult(Who.T, dprobs[t]) else: dhs[k][t] += mult(self.params['Wh%d' % (k+1)].T, dhs[k+1][t]) dus[k][t] += get_nl_grad(self.hps.nl, us[k][t]) * dhs[k][t] if k > 0: self.grads['Wh%d' % k] += mult(dus[k][t], hs[k-1][t].T) / bsize self.grads['bh%d' % k] += dus[k][t].sum(axis=-1).reshape((-1, 1)) / bsize if k == hps.recurrent_layer - 1: if t == 0: hprev = h0[k] self.grads['h0'][:, k] = mult(Whh.T, dus[k][t]).sum(axis=-1) / bsize else: hprev = hs[k][t-1] dhs[k][t-1] = mult(Whh.T, dus[k][t]) self.grads['Whh'] += mult(dus[k][t], hprev.T) / bsize self.grads['bhh'] += dus[k][t].sum(axis=-1).reshape((-1, 1)) / bsize self.grads['Wih'] += mult(dus[0][t], data[:, t, :].T) / bsize self.grads['bih'] += dus[0][t].sum(axis=-1).reshape((-1, 1)) / bsize return cost, self.grads
def cost_and_grad(self, data, labels, back=True, prev_h0=None): hps = self.hps T = data.shape[1] bsize = data.shape[2] # FIXME gnumpy reallocates if try and use same parameters? #us = self.us[:, 0:T, 0:bsize] #dus = self.dus[:, 0:T, 0:bsize] #hs = self.hs[:, 0:T, 0:bsize] #dhs = self.dhs[:, 0:T, 0:bsize] #probs = self.probs[:, 0:T, 0:bsize] #dprobs = self.dprobs[:, 0:T, 0:bsize] #costs = self.costs[0:T, 0:bsize] us = list() dus = list() hs = list() dhs = list() h0 = list() for k in xrange(hps.hidden_layers): us.append(list()) dus.append(list()) hs.append(list()) dhs.append(list()) h0.append(empty((hps.hidden_size, bsize))) for t in xrange(T): us[k].append(zeros((hps.hidden_size, bsize))) dus[k].append(zeros((hps.hidden_size, bsize))) hs[k].append(zeros((hps.hidden_size, bsize))) dhs[k].append(zeros((hps.hidden_size, bsize))) probs = list() for t in xrange(T): probs.append(zeros((hps.output_size, bsize))) costs = np.zeros((T, bsize)) if prev_h0 is not None: h0 = prev_h0 else: for k in xrange(hps.hidden_layers): h0[k] = tile(self.params['h0'][:, k].reshape(-1, 1), bsize) bih = self.params['bih'] Wih = self.params['Wih'] Whh = self.params['Whh'] bhh = self.params['bhh'] Who = self.params['Who'] bho = self.params['bho'] # Forward prop for t in xrange(T): for k in xrange(hps.hidden_layers): if t == 0: hprev = h0[k] else: hprev = hs[k][t - 1] if k == 0: us[k][t] = mult(Wih, data[:, t, :]) + bih else: us[k][t] = mult(self.params['Wh%d' % k], hs[k - 1][t]) if k == hps.recurrent_layer - 1: us[k][t] += mult(Whh, hprev) + bhh # Clip maximum activation mask = us[k][t] < hps.max_act us[k][t] = us[k][t] * mask + hps.max_act * (1 - mask) elif k != 0: us[k][t] += self.params['bh%d' % k] hs[k][t] = self.nl(us[k][t]) probs[t] = softmax(mult(Who, hs[-1][t]) + bho) self.last_h = list() for k in xrange(hps.hidden_layers): self.last_h.append(hs[k][-1]) if labels is None: return None, probs probs_neg_log = list() dprobs = list() for t in xrange(T): probs_neg_log.append(as_np(-1 * log(probs[t]))) dprobs.append(as_np(probs[t].copy())) for k in xrange(bsize): for t in xrange(len(labels[k])): costs[t, k] = probs_neg_log[t][labels[k][t], k] dprobs[t][labels[k][t], k] -= 1 for t in xrange(T): dprobs[t] = array(dprobs[t]) # NOTE Summing costs over time # NOTE FIXME Dividing by T to get better sense if objective # is decreasing, remove for grad checking cost = costs.sum() / bsize / float(T) if not back: return cost, probs # Backprop for k in self.grads: self.grads[k][:] = 0 for t in reversed(xrange(T)): self.grads['bho'] += dprobs[t][:, :].sum(axis=-1).reshape( (-1, 1)) / bsize self.grads['Who'] += mult(dprobs[t], hs[-1][t].T) / bsize for k in reversed(xrange(hps.hidden_layers)): if k == hps.hidden_layers - 1: dhs[k][t] += mult(Who.T, dprobs[t]) else: dhs[k][t] += mult(self.params['Wh%d' % (k + 1)].T, dhs[k + 1][t]) dus[k][t] += get_nl_grad(self.hps.nl, us[k][t]) * dhs[k][t] if k > 0: self.grads['Wh%d' % k] += mult(dus[k][t], hs[k - 1][t].T) / bsize self.grads['bh%d' % k] += dus[k][t].sum(axis=-1).reshape( (-1, 1)) / bsize if k == hps.recurrent_layer - 1: if t == 0: hprev = h0[k] self.grads['h0'][:, k] = mult( Whh.T, dus[k][t]).sum(axis=-1) / bsize else: hprev = hs[k][t - 1] dhs[k][t - 1] = mult(Whh.T, dus[k][t]) self.grads['Whh'] += mult(dus[k][t], hprev.T) / bsize self.grads['bhh'] += dus[k][t].sum(axis=-1).reshape( (-1, 1)) / bsize self.grads['Wih'] += mult(dus[0][t], data[:, t, :].T) / bsize self.grads['bih'] += dus[0][t].sum(axis=-1).reshape( (-1, 1)) / bsize return cost, self.grads
def __init__(self, name, data_inp, shape, init_fn=None): super(IndexedParamNode, self).__init__(name, shape, init_fn=init_fn) self.data_inp = data_inp self.params_batch = empty((data_inp.feat_dim * self.params.shape[0], data_inp.batch_size))
def alloc_grads(self): # Call after allocating parameters self.grads = {} for k in self.params: self.grads[k] = empty(self.params[k].shape) logger.info('Allocated gradients')