def test_mult(): print(mult([to_perm([1,2]), to_perm([1,2])])) print(Permutation([])) print(mult([to_perm([1,2]), to_perm([1,2])]) == Permutation([])) print(mult([to_perm([1,2]), to_perm([1,3])])) print(mult([to_perm([1,3]), to_perm([1,2])])) print(mult([to_perm([]), to_perm([1,2])]))
def check_belongs(chain: FullStabilizerChain, val: Permutation, step=0): if (step == len(chain.trees)): return (val == Permutation([]), []) b = chain.base[step] u = apply(val, [b])[0] if u not in chain.trees[step].orbit: return (False, []) node = chain.trees[step].node_dict[u] new_val = val cert = [] while u != b: new_val = mult([node.perm, new_val]) u = apply(node.perm, [u])[0] cert.append(inverse(node.perm)) node = node.parent res = check_belongs(chain, new_val, step + 1) cert.extend(res[1]) return (res[0], cert)
def bprop(self): logger.debug('%s backprop' % str(self)) # FIXME Assuming just 1 successor for now assert len(self.succ) == 1 succ_grad = self.succ[0].full_grad print self.W.shape self.full_grad = self.W.out.T * succ_grad # TODO Check this self.W.grad = mult(succ_grad, self.succ[0].out.T)
def bprop(self): logger.debug("%s backprop" % str(self)) # FIXME Assuming just 1 successor for now assert len(self.succ) == 1 succ_grad = self.succ[0].full_grad print self.W.shape self.full_grad = self.W.out.T * succ_grad # TODO Check this self.W.grad = mult(succ_grad, self.succ[0].out.T)
def get_h_u(self, u): assert(u in self.orbit) h = Permutation([]) node = self.node_dict[u] while node.val != self.root.val: h = mult([node.perm, h]) node = node.parent return inverse(h)
def make_gens(tree: Tree, S: Iterable[Permutation]): newS = set() for s in S: for u in tree.orbit: hu = tree.get_h_u(u) hsu = tree.get_h_u(apply(s, [u])[0]) newp = mult([inverse(hsu), s, hu]) if newp not in newS: newS.add(newp) # print(len(newS)) return newS
def bprop(self): logger.debug('%s backprop' % str(self)) # FIXME Assuming just 1 successor for now assert len(self.succ) == 1 succ_grad = self.succ[0].full_grad # FIXME Hack to avoid large sparse matrix multiply if type(self.succ[0]) is SumNode: # Which leads to softmax... if self.full_grad is None: self.full_grad = empty((self.W.shape[1], succ_grad.shape[1])) self.W.grad = empty(self.W.shape) for k in range(self.full_grad.shape[0]): # TODO self.full_grad[k, :] = ? # TODO self.W.grad[k, :] = ? pass else: self.full_grad = mult(self.W.out.T, succ_grad) # FIXME Multiplication below is wrong self.W.grad = mult(succ_grad, self.succ[0].out.T) # TODO Check this self.b.grad = succ_grad
def bprop(self): logger.debug("%s backprop" % str(self)) # FIXME Assuming just 1 successor for now assert len(self.succ) == 1 succ_grad = self.succ[0].full_grad # FIXME Hack to avoid large sparse matrix multiply if type(self.succ[0]) is SumNode: # Which leads to softmax... if self.full_grad is None: self.full_grad = empty((self.W.shape[1], succ_grad.shape[1])) self.W.grad = empty(self.W.shape) for k in range(self.full_grad.shape[0]): # TODO self.full_grad[k, :] = ? # TODO self.W.grad[k, :] = ? pass else: self.full_grad = mult(self.W.out.T, succ_grad) # FIXME Multiplication below is wrong self.W.grad = mult(succ_grad, self.succ[0].out.T) # TODO Check this self.b.grad = succ_grad
def normalize(S: Iterable[Permutation]): newS = set() n = max(S, key=lambda x: x.n).n base = [{} for _ in range(n)] for s in S: for x in range(1, n + 1): u = apply(s, [x])[0] if u != x: if u in base[x - 1]: s = mult([inverse(s), base[x - 1][u]]) else: base[x - 1][u] = s if s not in newS: newS.add(s) break # print(len(newS)) return newS
def test_mult(): print("a testar a multiplicação") a = float(random.random() * 100) b = float(random.random() * 100) if a < b: temp = a a = b b = temp assert (mult(a, a) == a * a and mult(a, a) > 0) assert (mult(a, b) == a * b and mult(a, b) > 0) assert (mult(a, -a) == -a * a and mult(a, -a) < 0) assert (mult(a, -b) == -b * a and mult(a, -b) < 0) assert (mult(b, a) == b * a and mult(b, a) > 0) assert (mult(b, b) == b * b and mult(b, b) > 0) assert (mult(b, -a) == b * -a and mult(b, -a) < 0) assert (mult(b, -b) == -b * b and mult(b, -b) < 0) assert (mult(-a, a) == -a * a and mult(-a, a) < 0) assert (mult(-a, b) == -a * b and mult(-a, b) < 0) assert (mult(-a, -a) == a * a and mult(-a, -a) > 0) assert (mult(-a, -b) == a * b and mult(-a, -b) > 0) assert (mult(-b, a) == -b * a and mult(-b, a) < 0) assert (mult(-b, b) == -b * b and mult(-b, b) < 0) assert (mult(-b, -a) == a * b and mult(-b, -a) > 0) assert (mult(-b, -b) == b * b and mult(-b, -b) > 0) assert mult(a, 0) == 0 assert mult(b, 0) == 0 assert mult(-a, 0) == 0 assert mult(-b, 0) == 0 assert mult(0, a) == 0 assert mult(0, b) == 0 assert mult(0, -a) == 0 assert mult(0, -b) == 0 assert mult(a, 1) == a assert mult(b, 1) == b assert mult(-a, 1) == -a assert mult(-b, 1) == -b assert mult(1, a) == a assert mult(1, b) == b assert mult(1, -a) == -a assert mult(1, -b) == -b
def fprop(self): logger.debug('%s prop: %s x %s + %s' % (str(self), str( self.W.shape), str(self.x.out.shape), str(self.b.out.shape))) self.out = mult(self.W.out, self.x.out) + self.b.out
def fprop(self): self.out = mult(self.W.out, self.x.out)
def cost_and_grad(self, data, labels, back=True, prev_h0=None): hps = self.hps T = data.shape[1] bsize = data.shape[2] # FIXME gnumpy reallocates if try and use same parameters? #us = self.us[:, 0:T, 0:bsize] #dus = self.dus[:, 0:T, 0:bsize] #hs = self.hs[:, 0:T, 0:bsize] #dhs = self.dhs[:, 0:T, 0:bsize] #probs = self.probs[:, 0:T, 0:bsize] #dprobs = self.dprobs[:, 0:T, 0:bsize] #costs = self.costs[0:T, 0:bsize] us = list() dus = list() hs = list() dhs = list() h0 = list() for k in xrange(hps.hidden_layers): us.append(list()) dus.append(list()) hs.append(list()) dhs.append(list()) h0.append(empty((hps.hidden_size, bsize))) for t in xrange(T): us[k].append(zeros((hps.hidden_size, bsize))) dus[k].append(zeros((hps.hidden_size, bsize))) hs[k].append(zeros((hps.hidden_size, bsize))) dhs[k].append(zeros((hps.hidden_size, bsize))) probs = list() for t in xrange(T): probs.append(zeros((hps.output_size, bsize))) costs = np.zeros((T, bsize)) if prev_h0 is not None: h0 = prev_h0 else: for k in xrange(hps.hidden_layers): h0[k] = tile(self.params['h0'][:, k].reshape(-1, 1), bsize) bih = self.params['bih'] Wih = self.params['Wih'] Whh = self.params['Whh'] bhh = self.params['bhh'] Who = self.params['Who'] bho = self.params['bho'] # Forward prop for t in xrange(T): for k in xrange(hps.hidden_layers): if t == 0: hprev = h0[k] else: hprev = hs[k][t-1] if k == 0: us[k][t] = mult(Wih, data[:, t, :]) + bih else: us[k][t] = mult(self.params['Wh%d' % k], hs[k-1][t]) if k == hps.recurrent_layer - 1: us[k][t] += mult(Whh, hprev) + bhh # Clip maximum activation mask = us[k][t] < hps.max_act us[k][t] = us[k][t] * mask + hps.max_act * (1 - mask) elif k != 0: us[k][t] += self.params['bh%d' % k] hs[k][t] = self.nl(us[k][t]) probs[t] = softmax(mult(Who, hs[-1][t]) + bho) self.last_h = list() for k in xrange(hps.hidden_layers): self.last_h.append(hs[k][-1]) if labels is None: return None, probs probs_neg_log = list() dprobs = list() for t in xrange(T): probs_neg_log.append(as_np(-1 * log(probs[t]))) dprobs.append(as_np(probs[t].copy())) for k in xrange(bsize): for t in xrange(len(labels[k])): costs[t, k] = probs_neg_log[t][labels[k][t], k] dprobs[t][labels[k][t], k] -= 1 for t in xrange(T): dprobs[t] = array(dprobs[t]) # NOTE Summing costs over time # NOTE FIXME Dividing by T to get better sense if objective # is decreasing, remove for grad checking cost = costs.sum() / bsize / float(T) if not back: return cost, probs # Backprop for k in self.grads: self.grads[k][:] = 0 for t in reversed(xrange(T)): self.grads['bho'] += dprobs[t][:, :].sum(axis=-1).reshape((-1, 1)) / bsize self.grads['Who'] += mult(dprobs[t], hs[-1][t].T) / bsize for k in reversed(xrange(hps.hidden_layers)): if k == hps.hidden_layers - 1: dhs[k][t] += mult(Who.T, dprobs[t]) else: dhs[k][t] += mult(self.params['Wh%d' % (k+1)].T, dhs[k+1][t]) dus[k][t] += get_nl_grad(self.hps.nl, us[k][t]) * dhs[k][t] if k > 0: self.grads['Wh%d' % k] += mult(dus[k][t], hs[k-1][t].T) / bsize self.grads['bh%d' % k] += dus[k][t].sum(axis=-1).reshape((-1, 1)) / bsize if k == hps.recurrent_layer - 1: if t == 0: hprev = h0[k] self.grads['h0'][:, k] = mult(Whh.T, dus[k][t]).sum(axis=-1) / bsize else: hprev = hs[k][t-1] dhs[k][t-1] = mult(Whh.T, dus[k][t]) self.grads['Whh'] += mult(dus[k][t], hprev.T) / bsize self.grads['bhh'] += dus[k][t].sum(axis=-1).reshape((-1, 1)) / bsize self.grads['Wih'] += mult(dus[0][t], data[:, t, :].T) / bsize self.grads['bih'] += dus[0][t].sum(axis=-1).reshape((-1, 1)) / bsize return cost, self.grads
def cost_and_grad(self, data, labels, back=True, prev_h0=None): hps = self.hps T = data.shape[1] bsize = data.shape[2] # FIXME gnumpy reallocates if try and use same parameters? #us = self.us[:, 0:T, 0:bsize] #dus = self.dus[:, 0:T, 0:bsize] #hs = self.hs[:, 0:T, 0:bsize] #dhs = self.dhs[:, 0:T, 0:bsize] #probs = self.probs[:, 0:T, 0:bsize] #dprobs = self.dprobs[:, 0:T, 0:bsize] #costs = self.costs[0:T, 0:bsize] us = list() dus = list() hs = list() dhs = list() h0 = list() for k in xrange(hps.hidden_layers): us.append(list()) dus.append(list()) hs.append(list()) dhs.append(list()) h0.append(empty((hps.hidden_size, bsize))) for t in xrange(T): us[k].append(zeros((hps.hidden_size, bsize))) dus[k].append(zeros((hps.hidden_size, bsize))) hs[k].append(zeros((hps.hidden_size, bsize))) dhs[k].append(zeros((hps.hidden_size, bsize))) probs = list() for t in xrange(T): probs.append(zeros((hps.output_size, bsize))) costs = np.zeros((T, bsize)) if prev_h0 is not None: h0 = prev_h0 else: for k in xrange(hps.hidden_layers): h0[k] = tile(self.params['h0'][:, k].reshape(-1, 1), bsize) bih = self.params['bih'] Wih = self.params['Wih'] Whh = self.params['Whh'] bhh = self.params['bhh'] Who = self.params['Who'] bho = self.params['bho'] # Forward prop for t in xrange(T): for k in xrange(hps.hidden_layers): if t == 0: hprev = h0[k] else: hprev = hs[k][t - 1] if k == 0: us[k][t] = mult(Wih, data[:, t, :]) + bih else: us[k][t] = mult(self.params['Wh%d' % k], hs[k - 1][t]) if k == hps.recurrent_layer - 1: us[k][t] += mult(Whh, hprev) + bhh # Clip maximum activation mask = us[k][t] < hps.max_act us[k][t] = us[k][t] * mask + hps.max_act * (1 - mask) elif k != 0: us[k][t] += self.params['bh%d' % k] hs[k][t] = self.nl(us[k][t]) probs[t] = softmax(mult(Who, hs[-1][t]) + bho) self.last_h = list() for k in xrange(hps.hidden_layers): self.last_h.append(hs[k][-1]) if labels is None: return None, probs probs_neg_log = list() dprobs = list() for t in xrange(T): probs_neg_log.append(as_np(-1 * log(probs[t]))) dprobs.append(as_np(probs[t].copy())) for k in xrange(bsize): for t in xrange(len(labels[k])): costs[t, k] = probs_neg_log[t][labels[k][t], k] dprobs[t][labels[k][t], k] -= 1 for t in xrange(T): dprobs[t] = array(dprobs[t]) # NOTE Summing costs over time # NOTE FIXME Dividing by T to get better sense if objective # is decreasing, remove for grad checking cost = costs.sum() / bsize / float(T) if not back: return cost, probs # Backprop for k in self.grads: self.grads[k][:] = 0 for t in reversed(xrange(T)): self.grads['bho'] += dprobs[t][:, :].sum(axis=-1).reshape( (-1, 1)) / bsize self.grads['Who'] += mult(dprobs[t], hs[-1][t].T) / bsize for k in reversed(xrange(hps.hidden_layers)): if k == hps.hidden_layers - 1: dhs[k][t] += mult(Who.T, dprobs[t]) else: dhs[k][t] += mult(self.params['Wh%d' % (k + 1)].T, dhs[k + 1][t]) dus[k][t] += get_nl_grad(self.hps.nl, us[k][t]) * dhs[k][t] if k > 0: self.grads['Wh%d' % k] += mult(dus[k][t], hs[k - 1][t].T) / bsize self.grads['bh%d' % k] += dus[k][t].sum(axis=-1).reshape( (-1, 1)) / bsize if k == hps.recurrent_layer - 1: if t == 0: hprev = h0[k] self.grads['h0'][:, k] = mult( Whh.T, dus[k][t]).sum(axis=-1) / bsize else: hprev = hs[k][t - 1] dhs[k][t - 1] = mult(Whh.T, dus[k][t]) self.grads['Whh'] += mult(dus[k][t], hprev.T) / bsize self.grads['bhh'] += dus[k][t].sum(axis=-1).reshape( (-1, 1)) / bsize self.grads['Wih'] += mult(dus[0][t], data[:, t, :].T) / bsize self.grads['bih'] += dus[0][t].sum(axis=-1).reshape( (-1, 1)) / bsize return cost, self.grads
def cost_and_grad(self, data, labels, back=True): hps = self.hps grads = self.grads # May not be full batch size if at end of dataset bsize = data.shape[-1] p = ParamStruct(**self.params) # Forward prop acts = list() acts.append(self.nl(mult(p.Wih, data) + p.bih)) for k in xrange(hps.hidden_layers - 1): W = self.params['W%d' % (k+1)] b = self.params['b%d' % (k+1)] acts.append(self.nl(mult(W, acts[-1]) + b)) y = mult(p.Who, acts[-1]) + p.bho probs = softmax(y) if labels is None: return None, probs # NOTE For more precision if necessary convert to nparray early cost_array = np.empty(bsize, dtype=np.float64) # Speed things up by doing assignments off gpu neg_log_prob = -1 * np.log(as_np(probs)) for k in xrange(bsize): cost_array[k] = neg_log_prob[labels[k], k] cost = cost_array.sum() / bsize if not back: return cost, probs # Backprop for k in self.grads: self.grads[k][:] = 0 # Do assignments off GPU to speed things up dLdy = as_np(probs) # NOTE This changes probs for k in xrange(bsize): dLdy[labels[k], k] -= 1 dLdy = array(dLdy) grads['bho'] = dLdy.sum(axis=1).reshape((-1, 1)) grads['Who'] = mult(dLdy, acts[-1].T) Ws = [p.Wih] + [self.params['W%d' % (k+1)] for k in xrange(hps.hidden_layers - 1)] + [p.Who] deltas = [dLdy] for k in reversed(xrange(hps.hidden_layers - 1)): delta = get_nl_grad(self.hps.nl, acts[k+1]) * mult(Ws[k + 2].T, deltas[-1]) deltas.append(delta) grads['b%d' % (k+1)] = delta.sum(axis=1).reshape((-1, 1)) grads['W%d' % (k+1)] = mult(delta, acts[k].T) delta = get_nl_grad(self.hps.nl, acts[0]) * mult(Ws[1].T, deltas[-1]) grads['bih'] = delta.sum(axis=1).reshape((-1, 1)) grads['Wih'] = mult(delta, data.T) # Normalize for k in self.grads: self.grads[k] /= bsize return cost, self.grads
def cost_and_grad(self, data, labels, back=True): hps = self.hps grads = self.grads # May not be full batch size if at end of dataset bsize = data.shape[-1] p = ParamStruct(**self.params) # Forward prop acts = list() acts.append(self.nl(mult(p.Wih, data) + p.bih)) for k in xrange(hps.hidden_layers - 1): W = self.params['W%d' % (k + 1)] b = self.params['b%d' % (k + 1)] acts.append(self.nl(mult(W, acts[-1]) + b)) y = mult(p.Who, acts[-1]) + p.bho probs = softmax(y) if labels is None: return None, probs # NOTE For more precision if necessary convert to nparray early cost_array = np.empty(bsize, dtype=np.float64) # Speed things up by doing assignments off gpu neg_log_prob = -1 * np.log(as_np(probs)) for k in xrange(bsize): cost_array[k] = neg_log_prob[labels[k], k] cost = cost_array.sum() / bsize if not back: return cost, probs # Backprop for k in self.grads: self.grads[k][:] = 0 # Do assignments off GPU to speed things up dLdy = as_np(probs) # NOTE This changes probs for k in xrange(bsize): dLdy[labels[k], k] -= 1 dLdy = array(dLdy) grads['bho'] = dLdy.sum(axis=1).reshape((-1, 1)) grads['Who'] = mult(dLdy, acts[-1].T) Ws = [p.Wih] + [ self.params['W%d' % (k + 1)] for k in xrange(hps.hidden_layers - 1) ] + [p.Who] deltas = [dLdy] for k in reversed(xrange(hps.hidden_layers - 1)): delta = get_nl_grad(self.hps.nl, acts[k + 1]) * mult( Ws[k + 2].T, deltas[-1]) deltas.append(delta) grads['b%d' % (k + 1)] = delta.sum(axis=1).reshape((-1, 1)) grads['W%d' % (k + 1)] = mult(delta, acts[k].T) delta = get_nl_grad(self.hps.nl, acts[0]) * mult(Ws[1].T, deltas[-1]) grads['bih'] = delta.sum(axis=1).reshape((-1, 1)) grads['Wih'] = mult(delta, data.T) # Normalize for k in self.grads: self.grads[k] /= bsize return cost, self.grads
def fprop(self): logger.debug( "%s prop: %s x %s + %s" % (str(self), str(self.W.shape), str(self.x.out.shape), str(self.b.out.shape)) ) self.out = mult(self.W.out, self.x.out) + self.b.out