def compute_Lx_batches(v, g, h, xw_mat, xv_mat, xa, xb, xc, bs, cbs): xw = xw_mat.flatten() xv = xv_mat.flatten() tv = v.reshape((bs // cbs, cbs, v.shape[1])) tg = g.reshape((bs // cbs, cbs, g.shape[1])) th = h.reshape((bs // cbs, cbs, h.shape[1])) final_w1 = T.unbroadcast(T.shape_padleft(T.zeros_like(xw_mat)),0) final_v1 = T.unbroadcast(T.shape_padleft(T.zeros_like(xv_mat)),0) final_a1 = T.unbroadcast(T.shape_padleft(T.zeros_like(xa)),0) final_b1 = T.unbroadcast(T.shape_padleft(T.zeros_like(xb)),0) final_c1 = T.unbroadcast(T.shape_padleft(T.zeros_like(xc)),0) def comp_step(lv, lg, lh, acc_w1, acc_v1, acc_a1, acc_b1, acc_c1): terms1 = compute_Lx_term1(lv, lg, lh, xw, xv, xa, xb, xc) accs1 = [acc_w1, acc_v1, acc_a1, acc_b1, acc_c1] rval = [] for (term1, acc) in zip(terms1,accs1): rval += [acc + term1] return rval rvals,_ = theano.sandbox.scan.scan( comp_step, sequences=[tv,tg,th], states=[ final_w1, final_v1, final_a1, final_b1, final_c1], n_steps=bs // cbs, profile=0, mode=theano.Mode(linker='cvm_nogc'), flags=['no_optimization'] ) accs1 = [x[0]/numpy.float32(bs//cbs) for x in rvals] accs2 = compute_Lx_term2(v,g,h,xw,xv,xa,xb,xc) return [x - y for x, y in zip(accs1, accs2)]
def get_debug(self, train=False): input_dict = self.get_input(train) X = input_dict[self.dec_input_name] prev_state = input_dict[self.enc_name] padded_mask = self.get_padded_shuffled_mask(train, X, pad=1) X = X.dimshuffle((1, 0, 2)) xi = T.dot(X, self.W_i) + self.b_i + T.dot(prev_state, self.We_i) xf = T.dot(X, self.W_f) + self.b_f + T.dot(prev_state, self.We_f) xc = T.dot(X, self.W_c) + self.b_c + T.dot(prev_state, self.We_c) xo = T.dot(X, self.W_o) + self.b_o + T.dot(prev_state, self.We_o) if train: STEP = self._step else: STEP = self._step_test [outputs, hiddens, memories], updates = theano.scan( STEP, sequences=[xi, xf, xo, xc, padded_mask], outputs_info=[ T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim),1), T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.hidden_dim), 1), T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.hidden_dim), 1) ], non_sequences=[self.U_i, self.U_f, self.U_o, self.U_c, prev_state], truncate_gradient=self.truncate_gradient, go_backwards=self.go_backwards) return outputs, hiddens, memories, prev_state
def generic_compute_Lx_batches(samples, weights, biases, bs, cbs): tsamples = [x.reshape((bs//cbs, cbs, x.shape[1])) for x in samples] final_ws = [T.unbroadcast(T.shape_padleft(T.zeros_like(x)),0) for x in weights] final_bs = [T.unbroadcast(T.shape_padleft(T.zeros_like(x)),0) for x in biases] n_samples = len(samples) n_weights = len(weights) n_biases = len(biases) def comp_step(*args): lsamples = args[:n_samples] terms1 = generic_compute_Lx_term1(lsamples, weights, biases) rval = [] for (term1, acc) in zip(terms1, args[n_samples:]): rval += [acc + term1] return rval rvals,_ = theano.sandbox.scan.scan( comp_step, sequences=tsamples, states=final_ws + final_bs, n_steps=bs // cbs, profile=0, mode=theano.Mode(linker='cvm_nogc'), flags=['no_optimization'] ) accs1 = [x[0]/numpy.float32(bs//cbs) for x in rvals] accs2 = generic_compute_Lx_term2(samples,weights,biases) return [x - y for x, y in zip(accs1, accs2)]
def __init__(self, pad_x=0, pad_y=0, d_row=-1, **kwargs): super(OneDToTwoDFixedSizeLayer, self).__init__(1, **kwargs) assert len(self.sources) == 1 X = self.sources[0].output assert X.ndim == 3 assert X.dtype == "float32" if d_row > 0: X = X.reshape((X.shape[0],X.shape[1],d_row,X.shape[2] / d_row)) Y = T.unbroadcast(X.dimshuffle(2, 0, 1, 3), 3) n_out = self.sources[0].attrs['n_out'] / d_row else: Y = X.dimshuffle(2, 0, 1, 'x') n_out = 1 if pad_x + pad_y > 0: tmp = T.zeros((Y.shape[1] + 2 * pad_x, Y.shape[2]), 'int8') self.index = T.set_subtensor(tmp[pad_x: pad_x + Y.shape[1]], self.sources[0].index) tmp = T.zeros((Y.shape[0] + 2 * pad_y, Y.shape[1] + 2 * pad_x, Y.shape[2], Y.shape[3]), 'float32') Y = T.set_subtensor(tmp[pad_y:pad_y + Y.shape[0],pad_x:pad_x + Y.shape[1]], Y) Y = T.unbroadcast(Y, 3) height = Y.shape[0] # if n_out <= 0 else n_out width = T.maximum(T.sum(self.index, axis=0), T.ones_like(self.index[0])) batch = Y.shape[2] sizes = T.zeros((batch, 2), dtype="float32") sizes = T.set_subtensor(sizes[:, 0], height) sizes = T.set_subtensor(sizes[:, 1], width) self.output = Y self.output_sizes = sizes self.set_attr('n_out', n_out)
def pos_phase(self, v, init_state, n_steps=1, eps=1e-3): """ Mixed mean-field + sampling inference in positive phase. :param v: input being conditioned on :param init: dictionary of initial values :param n_steps: number of Gibbs updates to perform afterwards. """ def pos_mf_iteration(g1, h1, v, pos_counter): h2 = self.h_hat(g1, v) s2_1 = self.s1_hat(g1, v) s2_0 = self.s0_hat(g1, v) g2 = self.g_hat(h2, s2_1, s2_0) # stopping criterion dl_dghat = T.max(abs(self.dlbound_dg(g2, h2, s2_1, s2_0, v))) dl_dhhat = T.max(abs(self.dlbound_dh(g2, h2, s2_1, s2_0, v))) stop = T.maximum(dl_dghat, dl_dhhat) return [g2, h2, s2_1, s2_0, v, pos_counter + 1], theano.scan_module.until(stop < eps) states = [T.unbroadcast(T.shape_padleft(init_state['g'])), T.unbroadcast(T.shape_padleft(init_state['h'])), {'steps': 1}, {'steps': 1}, T.unbroadcast(T.shape_padleft(v)), T.unbroadcast(T.shape_padleft(0.))] rvals, updates = scan( pos_mf_iteration, states = states, n_steps=n_steps) return [rval[0] for rval in rvals]
def generate_with_concat(self, start_token, concat, length, temperature): start_token = start_token[:, np.newaxis].T concat = concat[:, np.newaxis].T N = 1 H = self.lstm.n_hidden L = self.lstm.n_layers def step(input, previous_hidden, previous_state, temperature, concat): lstm_hidden, state = self.lstm.forward(T.concatenate([input, concat], axis=1), previous_hidden, previous_state) final_output = self.output.forward(lstm_hidden[:, -1, :], temperature) sample = self.rng.multinomial(n=1, size=(1,), pvals=final_output, dtype=theano.config.floatX) return sample, lstm_hidden, state hidden = T.unbroadcast(T.alloc(np.array(0).astype(theano.config.floatX), N, L, H), 1) state = T.unbroadcast(T.alloc(np.array(0).astype(theano.config.floatX), N, L, H), 1) (softmax_output, _, _), updates = theano.scan(step, outputs_info=[ start_token, hidden, state, ], non_sequences=[temperature, concat], n_steps=length) return softmax_output[:, 0, :], updates
def fprop(self, state_below): """ :description: :type state_below: theano matrix :param state_below: a two dimensional matrix where the first dim represents time and the second dim represents features: shape = (time, features) """ # init_output = T.alloc(np.cast[theano.config.floatX](0), state_below.shape[0], self.n_hid) init_output = T.alloc(np.cast[theano.config.floatX](0), self.n_hid) Wxh, bxh, Whh, bhh, Who, bho = self.Wxh, self.bxh, self.Whh, self.bhh, self.Who, self.bho state_below = T.dot(state_below, Wxh) + bxh if state_below.shape[0] == 1: init_output = T.unbroadcast(init_output, 0) if self.n_hid == 1: init_output = T.unbroadcast(init_output, 1) def fprop_step(state_below_timestep, state_before_timestep, Whh, bhh): return self.nonlinearity(state_below_timestep + T.dot(state_before_timestep, Whh) + bhh) outputs, updates = scan( fn=fprop_step, sequences=[state_below], outputs_info=[init_output], non_sequences=[Whh, bhh] ) # reconstruct input # outputs = T.dot(outputs, Who) + bho if self.return_indices is not None: if len(self.return_indices) > 1: return [outputs[idx] for idx in self.return_indices] else: return outputs[self.return_indices[0]] else: return outputs
def get_output(self, train=False): X = self.get_input(train) padded_mask = self.get_padded_shuffled_mask(train, X, pad=self.depth) X = X.dimshuffle((1, 0, 2)) x = T.dot(X, self.W) + self.b if self.depth == 1: initial = T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1) else: initial = T.unbroadcast(T.unbroadcast(alloc_zeros_matrix(self.depth, X.shape[1], self.output_dim), 0), 2) outputs, updates = theano.scan( self._step, sequences=[x, dict( input=padded_mask, taps=[(-i) for i in range(self.depth)] )], outputs_info=[dict( initial=initial, taps=[(-i-1) for i in range(self.depth)] )], non_sequences=self.Us, truncate_gradient=self.truncate_gradient ) if self.return_sequences: return outputs.dimshuffle((1, 0, 2)) return outputs[-1]
def fprop(self, data): if self.use_ground_truth: self.input_space.validate(data) features, phones = data init_h = T.alloc(numpy.cast[theano.config.floatX](0), self.nhid) init_out = T.alloc(numpy.cast[theano.config.floatX](0), 1) init_out = T.unbroadcast(init_out, 0) fn = lambda f, p, h, o: self.fprop_step(f, p, h, o) ((h, out), updates) = theano.scan(fn=fn, sequences=[features, phones], outputs_info=[dict(initial=init_h, taps=[-1]), init_out]) return out else: self.input_space.validate(data) features, phones = data init_in = features[0] init_h = T.alloc(numpy.cast[theano.config.floatX](0), self.nhid) init_out = T.alloc(numpy.cast[theano.config.floatX](0), 1) init_out = T.unbroadcast(init_out, 0) fn = lambda t, p, f, h, o: self.fprop_step_prime(t, p, f, h, o) ((f, h, out), updates) = theano.scan(fn=fn, sequences=[features, phones], outputs_info=[init_in, dict(initial=init_h, taps=[-1]), init_out]) return out
def _ct(self, other): ''' Helper function to make tensors dimensions compatible''' if (other.var_set == self.var_set): return (self.pt_tensor, other.pt_tensor) union_var_set = other.scope.union(self.scope) vidx1 = frozenset(self.var_indices) vidx2 = frozenset(other.var_indices) union_indices = vidx1.union(vidx2) shape1 = [] shape2 = [] b1 = [] b2 = [] u1 = [] u2 = [] for i,vidx in enumerate(sorted(union_indices)): if (vidx in vidx1): shape1.append(self.discrete_pgm.cardinalities[vidx]) u1.append(i) else: shape1.append(1) b1.append(i) if (vidx in vidx2): shape2.append(self.discrete_pgm.cardinalities[vidx]) u2.append(i) else: shape2.append(1) b2.append(i) t1 = T.addbroadcast(T.unbroadcast(self.pt_tensor.reshape(shape1, len(shape1)), *u1), *b1) t2 = T.addbroadcast(T.unbroadcast(other.pt_tensor.reshape(shape2, len(shape2)), *u2), *b2) return (t1, t2)
def get_output(self, train=False): X = self.get_input(train) padded_mask = self.get_padded_shuffled_mask(train, X, pad=1) X = X.dimshuffle((1, 0, 2)) xsum = T.dot(X, self.W_sum) + self.b_sum ### get gate's input xmax = T.dot(X, self.W_max) + self.b_max xmin = T.dot(X, self.W_min) + self.b_min xsubt = T.dot(X, self.W_subt) + self.b_subt xmul = T.dot(X, self.W_mul) + self.b_mul xres = T.dot(X, self.W_res) + self.b_res xone = T.dot(X, self.W_one) + self.b_one xi = T.dot(X, self.W_i) + self.b_i xf = T.dot(X, self.W_f) + self.b_f xc = T.dot(X, self.W_c) + self.b_c xo = T.dot(X, self.W_o) + self.b_o [outputs, memories], updates = theano.scan( self._step, sequences=[xsum, xmax, xmin, xsubt, xmul, xres, xone, xi, xf, xo, xc, padded_mask], ### update sequence input outputs_info=[ T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1), T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1) ], non_sequences=[self.U_sum, self.U_max, self.U_min, self.U_subt, self.U_mul, self.U_res, self.U_one, self.U_i, self.U_f, self.U_o, self.U_c], ### add gate's weight matrix truncate_gradient=self.truncate_gradient) if self.return_sequences: return outputs.dimshuffle((1, 0, 2)) return outputs[-1]
def outputs_info(self, n_samples): # initialize hidden states: c, h shape = (n_samples,) + self.output_shape return [ T.unbroadcast(T.alloc(numpy.asarray(0., dtype=theano.config.floatX), *shape), *range(len(shape))), # c T.unbroadcast(T.alloc(numpy.asarray(0., dtype=theano.config.floatX), *shape), *range(len(shape))) # h ]
def __call__(self, X, mask=None, init_state=None): if mask is None: mask = T.ones((X.shape[0], X.shape[1])) mask = T.shape_padright(mask) # (nb_samples, time, 1) mask = T.addbroadcast(mask, -1) # (time, nb_samples, 1) matrix. mask = mask.dimshuffle(1, 0, 2) # (time, nb_samples, 1) mask = mask.astype('int8') # mask, padded_mask = self.get_padded_shuffled_mask(mask, pad=1) X = X.dimshuffle((1, 0, 2)) x_z = T.dot(X, self.W_z) + self.b_z x_r = T.dot(X, self.W_r) + self.b_r x_h = T.dot(X, self.W_h) + self.b_h if init_state: # (batch_size, output_dim) outputs_info = T.unbroadcast(init_state, 1) else: outputs_info = T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1) outputs, updates = theano.scan( self._step, # sequences=[x_z, x_r, x_h, padded_mask, mask], sequences=[x_z, x_r, x_h, mask], outputs_info=outputs_info, non_sequences=[self.U_z, self.U_r, self.U_h]) if self.return_sequences: return outputs.dimshuffle((1, 0, 2)) return outputs[-1]
def get_output(self, train=False): X = self.get_input(train) padded_mask = self.get_padded_shuffled_mask(train, X, pad=1) X = X.dimshuffle((1, 0, 2)) # Create X_tm1 sequence through zero left-padding Z = T.zeros_like(X) X_tm1 = T.concatenate(([Z[0]], X), axis=0) x_f = T.dot(X, self.W_xf) + self.b_f x_z = T.dot(X, self.W_xz) + self.b_z x_o = T.dot(X, self.W_xo) + self.b_o h_info = T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1) c_info = T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1) [outputs, cells], updates = theano.scan( self._step, sequences=[x_f, x_z, x_o, padded_mask, X_tm1], outputs_info=[h_info, c_info], non_sequences=[self.U_hf, self.U_xz, self.U_xo], truncate_gradient=self.truncate_gradient, go_backwards=self.go_backwards) if self.return_sequences: return outputs.dimshuffle((1, 0, 2)) return outputs[-1]
def get_output(self, train=False): X = self.get_input(train) padded_mask = self.get_padded_shuffled_mask(train, X, pad=1) X = X.dimshuffle((1, 0, 2)) xi = T.dot(X, self.W_i) + self.b_i xf = T.dot(X, self.W_f) + self.b_f xc = T.dot(X, self.W_c) + self.b_c xo = T.dot(X, self.W_o) + self.b_o [outputs, memories], updates = theano.scan( self._step, sequences=[xi, xf, xo, xc, padded_mask], outputs_info=[ T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1), T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1) ], non_sequences=[self.U_i, self.U_f, self.U_o, self.U_c], truncate_gradient=self.truncate_gradient, go_backwards=self.go_backwards) if self.return_sequences and self.go_backwards: return outputs[::-1].dimshuffle((1, 0, 2)) elif self.return_sequences: return outputs.dimshuffle((1, 0, 2)) return outputs[-1]
def scalar_armijo_search(phi, phi0, derphi0, c1=constant(1e-4), n_iters=10, profile=0): alpha0 = one phi_a0 = phi(alpha0) alpha1 = -(derphi0) * alpha0 ** 2 / 2.0 /\ (phi_a0 - phi0 - derphi0 * alpha0) phi_a1 = phi(alpha1) csol1 = phi_a0 <= phi0 + c1 * derphi0 csol2 = phi_a1 <= phi0 + c1 * alpha1 * derphi0 def armijo(alpha0, alpha1, phi_a0, phi_a1): factor = alpha0 ** 2 * alpha1 ** 2 * (alpha1 - alpha0) a = alpha0 ** 2 * (phi_a1 - phi0 - derphi0 * alpha1) - \ alpha1 ** 2 * (phi_a0 - phi0 - derphi0 * alpha0) a = a / factor b = -alpha0 ** 3 * (phi_a1 - phi0 - derphi0 * alpha1) + \ alpha1 ** 3 * (phi_a0 - phi0 - derphi0 * alpha0) b = b / factor alpha2 = (-b + TT.sqrt(abs(b ** 2 - 3 * a * derphi0))) / (3.0 * a) phi_a2 = phi(alpha2) end_condition = phi_a2 <= phi0 + c1 * alpha2 * derphi0 end_condition = TT.bitwise_or( TT.isnan(alpha2), end_condition) end_condition = TT.bitwise_or( TT.isinf(alpha2), end_condition) alpha2 = TT.switch( TT.bitwise_or(alpha1 - alpha2 > alpha1 / constant(2.), one - alpha2 / alpha1 < 0.96), alpha1 / constant(2.), alpha2) return [alpha1, alpha2, phi_a1, phi_a2], \ theano.scan_module.until(end_condition) states = [] states += [TT.unbroadcast(TT.shape_padleft(alpha0), 0)] states += [TT.unbroadcast(TT.shape_padleft(alpha1), 0)] states += [TT.unbroadcast(TT.shape_padleft(phi_a0), 0)] states += [TT.unbroadcast(TT.shape_padleft(phi_a1), 0)] # print 'armijo' rvals, _ = scan( armijo, states=states, n_steps=n_iters, name='armijo', mode=theano.Mode(linker='cvm'), profile=profile) sol_scan = rvals[1][0] a_opt = ifelse(csol1, one, ifelse(csol2, alpha1, sol_scan)) score = ifelse(csol1, phi_a0, ifelse(csol2, phi_a1, rvals[2][0])) return a_opt, score
def _activate(self, x): """ Compute the actual activation of the layer. :param x: theano.tensor Symbolic representation of the layer's input. :return: theano.tensor Symbolic representation of the layer's activation. If the flag 'return_sequences' is set to True, the layer will return all the hidden states computed by scan. """ mask = self.get_padded_shuffled_mask(x) # input to block is (batch, time, input) # we want it to be (time, batch, input) x = x.dimshuffle((1, 0, 2)) xs = T.dot(x, self.W) + self.b xz, xi, xf, xo = self._slice(xs) if self.reset_initial_state: initial_state = T.unbroadcast(T.alloc( numpy.asarray(0., dtype=self.dtype), x.shape[1], self.n_out )) else: initial_state = self.initial_state initial_memory = T.unbroadcast(T.alloc( numpy.asarray(0., dtype=self.dtype), x.shape[1], self.n_out )) if self.use_peepholes: (state, memory), updates = theano.scan( self._step_peep, sequences=[xz, xi, xf, xo, mask], outputs_info=[initial_state, initial_memory], non_sequences=[self.R, self.P_i, self.P_f, self.P_o], n_steps=x.shape[0] # keep track of number of steps to return all computations ) else: (state, memory), updates = theano.scan( self._step, sequences=[xz, xi, xf, xo, mask], outputs_info=[initial_state, initial_memory], non_sequences=[self.R], n_steps=x.shape[0] # keep track of number of steps to return all computations ) if self.return_sequences: return state.dimshuffle((1, 0, 2)) else: return state[-1]
def get_update(self, cost, learning_rate, momentum_coeff, max_norm = None): grad_cost = T.grad(cost, self.params) updates = {} if hasattr(self.params, "__iter__"): for (param, gparam, mparam) in zip(self.params, grad_cost, self.params_momentum): if param.name is not None: print param.name print mparam.name next_momentum = momentum_coeff*mparam - gparam next_param = param + learning_rate*next_momentum updates[mparam] = T.cast(next_momentum, floatX) updates[param] = T.unbroadcast(T.cast(next_param, floatX)) next_momentum_norm = T.sqrt(reduce(lambda x,y:x + y, \ map(lambda x:(x**2).sum(), \ [updates[mparam] for mparam in self.params_momentum]))) for (param, gparam, mparam) in zip(self.params, grad_cost, self.params_momentum): next_momentum = momentum_coeff*mparam - gparam if self.mode == "normalized": next_momentum = next_momentum/next_momentum_norm elif self.mode == "clipped": assert max_norm is not None next_momentum = T.switch((next_momentum_norm < max_norm), \ next_momentum, \ next_momentum*(max_norm/next_momentum_norm)) next_param = param + learning_rate*next_momentum updates[mparam] = T.cast(next_momentum, floatX) updates[param] = T.unbroadcast(T.cast(next_param, floatX)) else: next_momentum = momentum_coeff*self.params_momentum - learning_rate*grad_cost next_param = self.params + next_momentum next_momentum_norm = T.sqrt((next_momentum**2).sum()) if self.mode == "normalized": assert max_norm is not None next_momentum = next_momentum*(max_norm/next_momentum_norm) elif self.mode == "clipped": assert max_norm is not None next_momentum = T.switch((next_momentum_norm < max_norm), \ next_momentum, \ next_momentum*(max_norm/next_momentum_norm)) updates[self.params_momentum] = T.cast(next_momentum, floatX) updates[self.params] = T.cast(next_param, floatX) return (updates, next_momentum_norm)
def get_output_for(self, input, get_details=False, **kwargs): input = input.dimshuffle(1, 0, 2) def step(x_t, M_tm1, h_tm1, state_tm1, ww_tm1, wr_tm1, *params): # Update the memory (using w_tm1 of the writing heads & M_tm1) M_t = self.write_heads.write(h_tm1, ww_tm1, M_tm1) # Get the read vector (using w_tm1 of the reading heads & M_t) r_t = self.read_heads.read(wr_tm1, M_t) # Apply the controller (using x_t, r_t & the requirements for the controller) h_t, state_t = self.controller.step(x_t, r_t, h_tm1, state_tm1) # Update the weights (using h_t, M_t & w_tm1) ww_t = self.write_heads.get_weights(h_t, ww_tm1, M_t) wr_t = self.read_heads.get_weights(h_t, wr_tm1, M_t) return [M_t, h_t, state_t, ww_t, wr_t] memory_init = T.tile(self.memory.memory_init, (input.shape[1], 1, 1)) memory_init = T.unbroadcast(memory_init, 0) write_weights_init = T.tile(self.write_heads.weights_init, (input.shape[1], 1, 1)) write_weights_init = T.unbroadcast(write_weights_init, 0) read_weights_init = T.tile(self.read_heads.weights_init, (input.shape[1], 1, 1)) read_weights_init = T.unbroadcast(read_weights_init, 0) non_seqs = self.controller.get_params() + self.memory.get_params() + \ self.write_heads.get_params() + self.read_heads.get_params() hids, _ = theano.scan( fn=step, sequences=input, outputs_info=[memory_init] + self.controller.outputs_info(input.shape[1]) + \ [write_weights_init, read_weights_init], non_sequences=non_seqs, strict=True) # dimshuffle back to (n_batch, n_time_steps, n_features) if get_details: hid_out = [ hids[0].dimshuffle(1, 0, 2, 3), hids[1].dimshuffle(1, 0, 2), hids[2].dimshuffle(1, 0, 2), hids[3].dimshuffle(1, 0, 2, 3), hids[4].dimshuffle(1, 0, 2, 3)] else: if self.only_return_final: hid_out = hids[1][-1] else: hid_out = hids[1].dimshuffle(1, 0, 2) return hid_out
def build_recurrent_lstm_layer_from_params(params, input_variable, mask, random_state, one_step=False): [W, U, b] = params hidden_size = U.shape[0] n_steps = input_variable.shape[0] n_samples = input_variable.shape[1] n_features = input_variable.shape[2] def _slice(X, n, hidden_size): # Function is needed because tensor size changes across calls to step? if X.ndim == 3: return X[:, :, n * hidden_size:(n + 1) * hidden_size] return X[:, n * hidden_size:(n + 1) * hidden_size] def step(x_t, m, h_tm1, c_tm1): preactivation = T.dot(h_tm1, U) preactivation += x_t preactivation += b i_t = T.nnet.sigmoid(_slice(preactivation, 0, hidden_size)) f_t = T.nnet.sigmoid(_slice(preactivation, 1, hidden_size)) o_t = T.nnet.sigmoid(_slice(preactivation, 2, hidden_size)) c_t = T.tanh(_slice(preactivation, 3, hidden_size)) c_t = f_t * c_tm1 + i_t * c_t c_t = m[:, None] * c_t + (1. - m)[:, None] * c_tm1 h_t = o_t * T.tanh(c_t) h_t = m[:, None] * h_t + (1. - m)[:, None] * h_tm1 return h_t, c_t, i_t, f_t, o_t, preactivation # Scan cannot handle batch sizes of 1? # Unbroadcast can fix it... but still weird #https://github.com/Theano/Theano/issues/1772 init_hidden = T.zeros((n_samples, hidden_size)) init_cell = T.zeros((n_samples, hidden_size)) init_hidden = T.unbroadcast(init_hidden, 0) init_cell = T.unbroadcast(init_cell, 0) x = T.dot(input_variable, W) + b if one_step: rval = step(x, mask, init_hidden, init_cell) else: rval, _ = theano.scan(step, sequences=[x, mask], outputs_info=[init_hidden, init_cell, None, None, None, None], n_steps=n_steps) hidden = rval[0] return hidden, params
def applyBn(rollingAverageForBatchNormalizationOverThatManyBatches, inputTrain, inputVal, inputTest, inputShapeTrain) : numberOfChannels = inputShapeTrain[1] gBn_values = np.ones( (numberOfChannels), dtype = 'float32' ) gBn = theano.shared(value=gBn_values, borrow=True) bBn_values = np.zeros( (numberOfChannels), dtype = 'float32') bBn = theano.shared(value=bBn_values, borrow=True) #for rolling average: muBnsArrayForRollingAverage = theano.shared(np.zeros( (rollingAverageForBatchNormalizationOverThatManyBatches, numberOfChannels), dtype = 'float32' ), borrow=True) varBnsArrayForRollingAverage = theano.shared(np.ones( (rollingAverageForBatchNormalizationOverThatManyBatches, numberOfChannels), dtype = 'float32' ), borrow=True) sharedNewMu_B = theano.shared(np.zeros( (numberOfChannels), dtype = 'float32'), borrow=True) sharedNewVar_B = theano.shared(np.ones( (numberOfChannels), dtype = 'float32'), borrow=True) e1 = np.finfo(np.float32).tiny #WARN, PROBLEM, THEANO BUG. The below was returning (True,) instead of a vector, if I have only 1 FM. (Vector is (False,)). Think I corrected this bug. mu_B = inputTrain.mean(axis=[0,2,3,4]) #average over all axis but the 2nd, which is the FM axis. mu_B = T.unbroadcast(mu_B, (0)) #The above was returning a broadcastable (True,) tensor when FM-number=1. Here I make it a broadcastable (False,), which is the "vector" type. This is the same type with the sharedNewMu_B, which we are updating with this. They need to be of the same type. var_B = inputTrain.var(axis=[0,2,3,4]) var_B = T.unbroadcast(var_B, (0)) var_B_plusE = var_B + e1 #---computing mu and var for inference from rolling average--- mu_RollingAverage = muBnsArrayForRollingAverage.mean(axis=0) effectiveSize = inputShapeTrain[0]*inputShapeTrain[2]*inputShapeTrain[3]*inputShapeTrain[4] #batchSize*voxels in a featureMap. See p5 of the paper. var_RollingAverage = (effectiveSize/(effectiveSize-1))*varBnsArrayForRollingAverage.mean(axis=0) var_RollingAverage_plusE = var_RollingAverage + e1 #OUTPUT FOR TRAINING normXi_train = (inputTrain - mu_B.dimshuffle('x', 0, 'x', 'x', 'x')) / T.sqrt(var_B_plusE.dimshuffle('x', 0, 'x', 'x', 'x')) normYi_train = gBn.dimshuffle('x', 0, 'x', 'x', 'x') * normXi_train + bBn.dimshuffle('x', 0, 'x', 'x', 'x') # dimshuffle makes b broadcastable. #OUTPUT FOR VALIDATION normXi_val = (inputVal - mu_RollingAverage.dimshuffle('x', 0, 'x', 'x', 'x')) / T.sqrt(var_RollingAverage_plusE.dimshuffle('x', 0, 'x', 'x', 'x')) normYi_val = gBn.dimshuffle('x', 0, 'x', 'x', 'x') * normXi_val + bBn.dimshuffle('x', 0, 'x', 'x', 'x') #OUTPUT FOR TESTING normXi_test = (inputTest - mu_RollingAverage.dimshuffle('x', 0, 'x', 'x', 'x')) / T.sqrt(var_RollingAverage_plusE.dimshuffle('x', 0, 'x', 'x', 'x')) normYi_test = gBn.dimshuffle('x', 0, 'x', 'x', 'x') * normXi_test + bBn.dimshuffle('x', 0, 'x', 'x', 'x') return (normYi_train, normYi_val, normYi_test, gBn, bBn, # For rolling average muBnsArrayForRollingAverage, varBnsArrayForRollingAverage, sharedNewMu_B, sharedNewVar_B, mu_B, # this is the current value of muB calculated in this training iteration. It will be saved in the "sharedNewMu_B" (update), in order to be used for updating the rolling average. Something could be simplified here. var_B )
def output(self, dropout_active=False): x_dot_w = self._compute_x_dot_w(dropout_active) outputs_info = self._prepare_outputs_info(x_dot_w) cells, out = self._compute_seq(x_dot_w, dropout_active, outputs_info) cells, out = self._reverse_if_backward(cells, out) cells = ifelse(x_dot_w.shape[0] > 0, cells, T.unbroadcast(outputs_info[0].dimshuffle('x', 0, 1), 0, 1)) out = ifelse(x_dot_w.shape[0] > 0, out, T.unbroadcast(outputs_info[1].dimshuffle('x', 0, 1), 0, 1)) self.outputs = [cells, out] return self._prepare_result(cells, out, outputs_info)
def get_output(self, train=False): X = self.get_input(train) padded_mask = self.get_padded_shuffled_mask(train, X, pad=1) X = X.dimshuffle((1, 0, 2)) x_z = T.dot(X, self.W_z) + self.b_z x_r = T.dot(X, self.W_r) + self.b_r x_h = T.dot(X, self.W_h) + self.b_h #outputs, updates = theano.scan( # self._step, # sequences=[x_z, x_r, x_h, padded_mask], # outputs_info=T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1), # non_sequences=[self.U_z, self.U_r, self.U_h], # truncate_gradient=self.truncate_gradient, # go_backwards=self.go_backwards) outputs = unroll_scan( self._step, sequences=[x_z, x_r, x_h, padded_mask], outputs_info=[T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1)], non_sequences=[self.U_z, self.U_r, self.U_h], n_steps = self.input_length, go_backwards=self.go_backwards)[-1] if self.return_sequences: return outputs.dimshuffle((1, 0, 2)) return outputs[-1]
def create_vars(self): if self.base is None: self.base = self.layer.base self.n = self.add_state_var(T.zeros((self.layer.index.shape[1],), 'float32'), 'n') self.bound = self.add_input(T.cast(T.sum(self.layer.index,axis=0), 'float32'), 'bound') if self.attrs['distance'] == 'rnn': n_tmp = self.attrs['template'] l = sqrt(6.) / sqrt(2 * n_tmp) values = numpy.asarray(self.layer.rng.uniform(low=-l, high=l, size=(n_tmp, n_tmp)), dtype=theano.config.floatX) self.A_re = self.add_param(self.layer.shared(value=values, borrow=True, name = "A_re")) if self.attrs['distance'] == 'transpose': n_tmp = self.attrs['template'] l = sqrt(6.) / sqrt(2 * n_tmp) values = numpy.asarray(self.layer.rng.uniform(low=-l, high=l, size=(n_tmp,)), dtype=theano.config.floatX) self.W_T = self.add_param(self.layer.shared(value=values, name="W_T")) if self.attrs['lm'] != "none": self.W_lm_in = self.add_var(self.layer.W_lm_in, name="W_lm_in") self.W_lm_out = self.add_var(self.layer.W_lm_out, name="W_lm_out") self.drop_mask = self.add_var(self.layer.lmmask, "drop_mask") y = self.layer.y_in[self.layer.attrs['target']].flatten() nil = T.unbroadcast(self.W_lm_out[0].dimshuffle('x','x',0),1).repeat(self.layer.index.shape[1],axis=1) if self.layer.attrs['direction'] == 1: y_t = self.W_lm_out[y].reshape((self.layer.index.shape[0],self.layer.index.shape[1],self.layer.unit.n_in))[:-1] # (T-1)BD self.cls = T.concatenate([nil, y_t], axis=0) else: y_t = self.W_lm_out[y].reshape((self.layer.index.shape[0],self.layer.index.shape[1],self.layer.unit.n_in))[1:] # (T-1)BD self.cls = T.concatenate([nil,y_t[::-1]], axis=0) self.add_input(self.cls, 'cls')
def build_initialize_function(self): def rho(s): return T.nnet.sigmoid(4.*s-2.) # sigmoid x_init = self.outside_world.x_data # initialize by clamping x_data # h1_init = T.unbroadcast(T.constant(np.zeros((self.batch_size, self.n_hidden[0]), dtype=theano.config.floatX)),0) # initialize h=0 and y=0 # h2_init = T.unbroadcast(T.constant(np.zeros((self.batch_size, self.n_hidden[1]), dtype=theano.config.floatX)),0) # initialize h=0 and y=0 y_init = T.unbroadcast(T.constant(np.zeros((self.batch_size, self.n_output), dtype=theano.config.floatX)),0) # initialize h=0 and y=0 # h1_init = self.theano_rng.uniform(size=self.h1.shape, low=0., high=.01, dtype=theano.config.floatX) # initialize h1, h2 and y at random # h2_init = self.theano_rng.uniform(size=self.h2.shape, low=0., high=.01, dtype=theano.config.floatX) # initialize h1, h2 and y at random # y_init = self.theano_rng.uniform(size=self.y.shape, low=0., high=.01, dtype=theano.config.floatX) # initialize h1, h2 and y at random h1_init = 2. *(T.dot(rho(x_init), self.W1) + self.bh1) # initialize h1, h2 and y by forward propagation h2_init = T.dot(rho(h1_init), self.W2) + self.bh2 # initialize h1, h2 and y by forward propagation # y_init = T.dot(rho(h2_init), self.W3) + self.by # initialize h1, h2 and y by forward propagation updates_states = [(self.x, x_init), (self.h1, h1_init), (self.h2, h2_init), (self.y, y_init)] initialize = theano.function( inputs=[], outputs=[], updates=updates_states ) return initialize
def _e_step(psamples, W_list, b_list, n_steps=100, eps=1e-5): """ Performs 'n_steps' of mean-field inference (used to compute positive phase statistics) Parameters ---------- psamples : array-like object of theano shared variables State of each layer of the DBM (during the inference process). psamples[0] points to the input n_steps : integer Number of iterations of mean-field to perform """ depth = len(psamples) new_psamples = [T.unbroadcast(T.shape_padleft(psample)) for psample in psamples] # now alternate mean-field inference for even/odd layers def mf_iteration(*psamples): new_psamples = [p for p in psamples] for i in xrange(1, depth, 2): new_psamples[i] = hi_given(psamples, i, W_list, b_list) for i in xrange(2, depth, 2): new_psamples[i] = hi_given(psamples, i, W_list, b_list) score = 0.0 for i in xrange(1, depth): score = T.maximum(T.mean(abs(new_psamples[i] - psamples[i])), score) return new_psamples, theano.scan_module.until(score < eps) new_psamples, updates = scan(mf_iteration, states=new_psamples, n_steps=n_steps) return [x[0] for x in new_psamples]
def _fprop_step(state_below, state_below_in, state_below_z, state_below_r, state_before, W_recurrent, W_in, b, W_z, U_z, b_z, W_r, U_r, b_r): print "state before 1", state_before, state_before.dtype, state_before.type, state_before.broadcastable #state_before = tensor.unbroadcast(state_before, 0) z = tensor.nnet.sigmoid(state_below_z + tensor.dot(state_before, U_z) + b_z) r = tensor.nnet.sigmoid(state_below_r + tensor.dot(state_before, U_r) + b_r) #print "r dim", r.type.ndim #W_rec = self.project1(W_recurrent, state_below) print "State below step", state_below, state_below.broadcastable, state_below.ndim print "state before 2", state_before, state_before.dtype, state_before.type, state_before.broadcastable W_rec = W_recurrent[state_below] bias = b[state_below] # !!! Move to efficient indexing #shape = (state_below.shape[0], state_below.shape[1], self.dim) pre_h = ( state_below_in + r * tensor.batched_dot(state_before, W_rec)#.reshape(shape) + bias ) print "pre_h dim", pre_h, pre_h.type.ndim #print "W_recurrent[state_below] dim", W_rec, W_rec.ndim # print "W_rec * state before", (state_before* W_rec).ndim new_h = tensor.tanh(pre_h) #print "new_h", new_h h = z * state_before + (1. - z) * new_h print "final h dim", h, h.type, h.broadcastable, h.ndim h = tensor.unbroadcast(h, 0) return h
def __call__(self, target, context, mask=None): target = target * T.cast(T.shape_padright(mask), 'float32') padded_mask = self.get_padded_shuffled_mask(mask, pad=1) # target = theano.printing.Print('X::' + self.name)(target) X_shifted = T.concatenate([alloc_zeros_matrix(target.shape[0], 1, self.input_dim), target[:, 0:-1, :]], axis=-2) # X = theano.printing.Print('X::' + self.name)(X) # X = T.zeros_like(target) # T.set_subtensor(X[:, 1:, :], target[:, 0:-1, :]) X = X_shifted.dimshuffle((1, 0, 2)) ctx_step = context.dimshuffle(('x', 0, 1)) x_z = T.dot(X, self.W_z) + T.dot(ctx_step, self.C_z) + self.b_z x_r = T.dot(X, self.W_r) + T.dot(ctx_step, self.C_r) + self.b_r x_h = T.dot(X, self.W_h) + T.dot(ctx_step, self.C_h) + self.b_h h, updates = theano.scan( self._step, sequences=[x_z, x_r, x_h, padded_mask], outputs_info=T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.hidden_dim), 1), non_sequences=[self.U_z, self.U_r, self.U_h]) # (batch_size, max_token_len, hidden_dim) h = h.dimshuffle((1, 0, 2)) # (batch_size, max_token_len, vocab_size) predicts = T.dot(h, self.U_y) + T.dot(context.dimshuffle((0, 'x', 1)), self.C_y) + self.b_y # + T.dot(X_shifted, self.W_y) predicts_flatten = predicts.reshape((-1, predicts.shape[2])) return T.nnet.softmax(predicts_flatten).reshape((predicts.shape[0], predicts.shape[1], predicts.shape[2]))
def train_one(self, x, target): x, target = tt.unbroadcast(x, 0), tt.unbroadcast(target, 0) # F'ing scan states = {} for layer in self.layers: x, layer_state = layer.forward_pass_and_state(x, count_ops=True) states[layer]=layer_state loss = self.loss(x, target) param_grad_pairs = [] grad = None for layer in self.layers[::-1]: grad, param_grads = layer.backward_pass(state=states[layer], grad=grad, cost = loss, count_ops=True) loss = None param_grad_pairs += list(izip_equal(layer.parameters, param_grads)) all_params, all_param_grads = zip(*param_grad_pairs) self.optimizer.update_from_gradients(parameters=all_params, gradients=all_param_grads) return create_constant(0.) # scan demands some return
def get_output(self, train = False, get_tuple = False): input_dict = self.get_input(train) ### # X_encoder = input_dict['encoder_context'] X_encoder = input_dict[self.enc_name] X_encoder = X_encoder.reshape((X_encoder.shape[0],X_encoder.shape[1],-1)) ### # X = input_dict['recurrent_context'] X = input_dict[self.rec_name] X = X.dimshuffle((1, 0, 2)) attention_encoder = T.dot(X_encoder,self.W_e2a) outputs, updates = theano.scan( self._step, sequences=[X], outputs_info=[ T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.enc_dim), 1) ], non_sequences=[X_encoder,attention_encoder], truncate_gradient=self.truncate_gradient, go_backwards=self.go_backwards) if self.return_sequences and self.go_backwards: return outputs[::-1].dimshuffle((1, 0, 2)) elif self.return_sequences: return outputs.dimshuffle((1, 0, 2)) return outputs[-1]
def frame_level_rnn(input_sequences, input_sequences_lab, h0, reset): """ input_sequences.shape: (batch size, n frames * FRAME_SIZE) h0.shape: (batch size, N_RNN, DIM) reset.shape: () output.shape: (batch size, n frames * FRAME_SIZE, DIM) """ # print('-----------') # print(type(input_sequences)) # print(input_sequences.shape) # print('-----------') frames = input_sequences.reshape( (input_sequences.shape[0], input_sequences.shape[1] // FRAME_SIZE, FRAME_SIZE)) frames = T.concatenate([frames, input_sequences_lab], axis=2) # Rescale frames from ints in [0, Q_LEVELS) to floats in [-2, 2] # (a reasonable range to pass as inputs to the RNN) frames = (frames.astype('float32') / lib.floatX(Q_LEVELS / 2)) - lib.floatX(1) frames *= lib.floatX(2) # (128, 64, 4) # Initial state of RNNs learned_h0 = lib.param( 'FrameLevel.h0', numpy.zeros((N_RNN, H0_MULT * DIM), dtype=theano.config.floatX)) # Handling LEARN_H0 learned_h0.param = LEARN_H0 learned_h0 = T.alloc(learned_h0, h0.shape[0], N_RNN, H0_MULT * DIM) learned_h0 = T.unbroadcast(learned_h0, 0, 1, 2) h0 = theano.ifelse.ifelse(reset, learned_h0, h0) # Handling RNN_TYPE # Handling SKIP_CONN if RNN_TYPE == 'GRU': rnns_out, last_hidden = lib.ops.stackedGRU('FrameLevel.GRU', N_RNN, FRAME_SIZE + LAB_DIM, DIM, frames, h0=h0, weightnorm=WEIGHT_NORM, skip_conn=SKIP_CONN) elif RNN_TYPE == 'LSTM': rnns_out, last_hidden = lib.ops.stackedLSTM('FrameLevel.LSTM', N_RNN, FRAME_SIZE + LAB_DIM, DIM, frames, h0=h0, weightnorm=WEIGHT_NORM, skip_conn=SKIP_CONN) # rnns_out (bs, seqlen, dim) (128, 64, 512) #seqlen ~ N_FRAME = 64 = frames.shape[1] output = lib.ops.Linear('FrameLevel.Output', DIM, FRAME_SIZE * DIM, rnns_out, initialization='he', weightnorm=WEIGHT_NORM) # output: (2, 9, 4*dim) output = output.reshape( (output.shape[0], output.shape[1] * FRAME_SIZE, DIM)) # output: (2, 9*4, dim) return (output, last_hidden)
def scan(fn, sequences=None, outputs_info=None, non_sequences=None, n_steps=None, truncate_gradient=-1, go_backwards=False, mode=None, name=None, options=None, profile=False): """ This function constructs and applies a Scan op to the provided arguments. :param fn: ``fn`` is a function that describes the operations involved in one step of ``scan``. ``fn`` should construct variables describing the output of one iteration step. It should expect as input theano variables representing all the slices of the input sequences and previous values of the outputs, as well as all other arguments given to scan as ``non_sequences``. The order in which scan passes these variables to ``fn`` is the following : * all time slices of the first sequence * all time slices of the second sequence * ... * all time slices of the last sequence * all past slices of the first output * all past slices of the second otuput * ... * all past slices of the last output * all other arguments (the list given as `non_sequences` to scan) The order of the sequences is the same as the one in the list `sequences` given to scan. The order of the outputs is the same as the order of ``output_info``. For any sequence or output the order of the time slices is the same as the one in which they have been given as taps. For example if one writes the following : .. code-block:: python scan(fn, sequences = [ dict(input= Sequence1, taps = [-3,2,-1]) , Sequence2 , dict(input = Sequence3, taps = 3) ] , outputs_info = [ dict(initial = Output1, taps = [-3,-5]) , dict(initial = Output2, taps = None) , Output3 ] , non_sequences = [ Argument1, Argument 2]) ``fn`` should expect the following arguments in this given order: #. ``Sequence1[t-3]`` #. ``Sequence1[t+2]`` #. ``Sequence1[t-1]`` #. ``Sequence2[t]`` #. ``Sequence3[t+3]`` #. ``Output1[t-3]`` #. ``Output1[t-5]`` #. ``Output3[t-1]`` #. ``Argument1`` #. ``Argument2`` The list of ``non_sequences`` can also contain shared variables used in the function, though ``scan`` is able to figure those out on its own so they can be skipped. For the clarity of the code we recommand though to provide them to scan. To some extend ``scan`` can also figure out other ``non sequences`` (not shared) even if not passed to scan (but used by `fn`). A simple example of this would be : .. code-block:: python import theano.tensor as TT W = TT.matrix() W_2 = W**2 def f(x): return TT.dot(x,W_2) The function is expected to return two things. One is a list of outputs ordered in the same order as ``outputs_info``, with the difference that there should be only one output variable per output initial state (even if no tap value is used). Secondly `fn` should return an update dictionary (that tells how to update any shared variable after each iteration step). The dictionary can optionally be given as a list of tuples. There is no constraint on the order of these two list, ``fn`` can return either ``(outputs_list, update_dictionary)`` or ``(update_dictionary, outputs_list)`` or just one of the two (in case the other is empty). To use ``scan`` as a while loop, the user needs to change the function ``fn`` such that also a stopping condition is returned. To do so, he/she needs to wrap the condition in an ``until`` class. The condition should be returned as a third element, for example: .. code-block:: python ... return [y1_t, y2_t], {x:x+1}, theano.scan_module.until(x < 50) Note that a number of steps (considered in here as the maximum number of steps ) is still required even though a condition is passed (and it is used to allocate memory if needed). = {}): :param sequences: ``sequences`` is the list of Theano variables or dictionaries describing the sequences ``scan`` has to iterate over. If a sequence is given as wrapped in a dictionary, then a set of optional information can be provided about the sequence. The dictionary should have the following keys: * ``input`` (*mandatory*) -- Theano variable representing the sequence. * ``taps`` -- Temporal taps of the sequence required by ``fn``. They are provided as a list of integers, where a value ``k`` impiles that at iteration step ``t`` scan will pass to ``fn`` the slice ``t+k``. Default value is ``[0]`` Any Theano variable in the list ``sequences`` is automatically wrapped into a dictionary where ``taps`` is set to ``[0]`` :param outputs_info: ``outputs_info`` is the list of Theano variables or dictionaries describing the initial state of the outputs computed recurrently. When this initial states are given as dictionary optional information can be provided about the output corresponding to these initial states. The dictionary should have the following keys: * ``initial`` -- Theano variable that represents the initial state of a given output. In case the output is not computed recursively (think of a map) and does not require a initial state this field can be skiped. Given that only the previous time step of the output is used by ``fn`` the initial state should have the same shape as the output. If multiple time taps are used, the initial state should have one extra dimension that should cover all the possible taps. For example if we use ``-5``, ``-2`` and ``-1`` as past taps, at step 0, ``fn`` will require (by an abuse of notation) ``output[-5]``, ``output[-2]`` and ``output[-1]``. This will be given by the initial state, which in this case should have the shape (5,)+output.shape. If this variable containing the initial state is called ``init_y`` then ``init_y[0]`` *corresponds to* ``output[-5]``. ``init_y[1]`` *correponds to* ``output[-4]``, ``init_y[2]`` corresponds to ``output[-3]``, ``init_y[3]`` coresponds to ``output[-2]``, ``init_y[4]`` corresponds to ``output[-1]``. While this order might seem strange, it comes natural from splitting an array at a given point. Assume that we have a array ``x``, and we choose ``k`` to be time step ``0``. Then our initial state would be ``x[:k]``, while the output will be ``x[k:]``. Looking at this split, elements in ``x[:k]`` are ordered exactly like those in ``init_y``. * ``taps`` -- Temporal taps of the output that will be pass to ``fn``. They are provided as a list of *negative* integers, where a value ``k`` implies that at iteration step ``t`` scan will pass to ``fn`` the slice ``t+k``. ``scan`` will follow this logic if partial information is given: * If an output is not wrapped in a dictionary, ``scan`` will wrap it in one assuming that you use only the last step of the output (i.e. it makes your tap value list equal to [-1]). * If you wrap an output in a dictionary and you do not provide any taps but you provide an initial state it will assume that you are using only a tap value of -1. * If you wrap an output in a dictionary but you do not provide any initial state, it assumes that you are not using any form of taps. * If you provide a ``None`` instead of a variable or a empty dictionary ``scan`` assumes that you will not use any taps for this output (like for example in case of a map) If ``outputs_info`` is an empty list or None, ``scan`` assumes that no tap is used for any of the outputs. If information is provided just for a subset of the outputs an exception is raised (because there is no convention on how scan should map the provided information to the outputs of ``fn``) :param non_sequences: ``non_sequences`` is the list of arguments that are passed to ``fn`` at each steps. One can opt to exclude variable used in ``fn`` from this list as long as they are part of the computational graph, though for clarity we encourage not to do so. :param n_steps: ``n_steps`` is the number of steps to iterate given as an int or Theano scalar. If any of the input sequences do not have enough elements, scan will raise an error. If the *value is 0* the outputs will have *0 rows*. If the value is negative, ``scan`` will run backwards in time. If the ``go_backwards`` flag is already set and also ``n_steps`` is negative, ``scan`` will run forward in time. If n stpes is not provided, ``scan`` will figure out the amount of steps it should run given its input sequences. :param truncate_gradient: ``truncate_gradient`` is the number of steps to use in truncated BPTT. If you compute gradients through a scan op, they are computed using backpropagation through time. By providing a different value then -1, you choose to use truncated BPTT instead of classical BPTT, where you go for only ``truncate_gradient`` number of steps back in time. :param go_backwards: ``go_backwards`` is a flag indicating if ``scan`` should go backwards through the sequences. If you think of each sequence as indexed by time, making this flag True would mean that ``scan`` goes back in time, namely that for any sequence it starts from the end and goes towards 0. :param name: When profiling ``scan``, it is crucial to provide a name for any instance of ``scan``. The profiler will produce an overall profile of your code as well as profiles for the computation of one step of each instance of ``scan``. The ``name`` of the instance appears in those profiles and can greatly help to disambiguate information. :param mode: It is recommended to leave this argument to None, especially when profiling ``scan`` (otherwise the results are not going to be accurate). If you prefer the computations of one step of ``scan`` to be done differently then the entire function, you can use this parameter to describe how the computations in this loop are done (see ``theano.function`` for details about possible values and their meaning). :param profile: Flag or string. If true, or different from the empty string, a profile object will be created and attached to the inner graph of scan. In case ``profile`` is True, the profile object will have the name of the scan instance, otherwise it will have the passed string. Profile object collect (and print) information only when running the inner graph with the new cvm linker ( with default modes, other linkers this argument is useless) :rtype: tuple :return: tuple of the form (outputs, updates); ``outputs`` is either a Theano variable or a list of Theano variables representing the outputs of ``scan`` (in the same order as in ``outputs_info``). ``updates`` is a subclass of dictionary specifying the update rules for all shared variables used in scan This dictionary should be passed to ``theano.function`` when you compile your function. The change compared to a normal dictionary is that we validate that keys are SharedVariable and addition of those dictionary are validated to be consistent. """ # Note : see the internal documentation of the scan op for naming # conventions and all other details if options is None: options = {} rvals = scan_utils.canonical_arguments(sequences, outputs_info, non_sequences, go_backwards, n_steps) inputs, states_and_outputs_info, parameters, T = rvals # If we provided a known number of steps ( before compilation) # and if that number is 1 or -1, then we can skip the Scan Op, # and just apply the inner function once # To do that we check here to see the nature of n_steps T_value = None if isinstance(n_steps, (float, int)): T_value = int(n_steps) else: try: T_value = opt.get_constant_value(n_steps) except (TypeError, AttributeError): T_value = None if T_value in (1, -1): return one_step_scan(fn, inputs, states_and_outputs_info, parameters, truncate_gradient) # 1. Variable representing the current time step t = scalar_shared(numpy.int64(0), name='t') # 2. Allocate memory for the states of scan. mintaps = [] lengths = [] for pos, arg_info in enumerate(states_and_outputs_info): if arg_info.get('taps', None) == [-1]: mintaps.append(1) lengths.append(scalar_shared(numpy.int64(0), name='l%d' % pos)) arg_info['initial'] = scan_utils.expand(tensor.unbroadcast( tensor.shape_padleft(arg_info['initial']), 0), T) elif arg_info.get('taps', None): if numpy.any(numpy.array(arg_info.get('taps', [])) > 0): # Make sure we do not have requests for future values of a # sequence we can not provide such values raise ValueError('Can not use future taps of outputs', arg_info) mintap = abs(numpy.min(arg_info['taps'])) lengths.append(scalar_shared(numpy.int64(0), name='l%d' % pos)) mintaps.append(mintap) arg_info['initial'] = scan_utils.expand( arg_info['initial'][:mintap], T) else: mintaps.append(0) lengths.append(scalar_shared(numpy.int64(0), name='l%d' % pos)) # 3. Generate arguments for the function passed to scan. This will # function will return the outputs that need to be computed at every # timesteps inputs_slices = [input[t] for input in inputs] states_slices = [] for n, state in enumerate(states_and_outputs_info): # Check if it is actually a state and not an output if mintaps[n] != 0: for k in state['taps']: states_slices.append( state['initial'][(t + mintaps[n] + k) % lengths[n]]) # 4. Construct outputs that are to be computed by the inner # function of scan args = inputs_slices + states_slices + parameters cond, states_and_outputs, updates = \ scan_utils.get_updates_and_outputs(fn(*args)) # User is allowed to provide no information if it only behaves like a # map if (len(states_and_outputs) != len(states_and_outputs_info) and len(states_and_outputs_info) == 0): mintaps = [0] * len(states_and_outputs) # 5. Construct the scan op # 5.1 Construct list of shared variables with updates (those that # can be treated as states (i.e. of TensorType) and those that can not # (like Random States) if cond is not None: _cond = [cond] else: _cond = [] rvals = rebuild_collect_shared( states_and_outputs + _cond, updates=updates, rebuild_strict=True, copy_inputs_over=True, no_default_updates=False) # extracting the arguments input_variables, cloned_outputs, other_rval = rvals clone_d, update_d, update_expr, shared_inputs = other_rval additional_input_states = [] additional_output_states = [] additional_lengths = [] additional_mintaps = [] original_numeric_shared_variables = [] non_numeric_input_states = [] non_numeric_output_states = [] original_non_numeric_shared_variables = [] pos = len(lengths) for sv in shared_inputs: if sv in update_d: if isinstance(sv, TensorType): # We can treat it as a sit sot nw_state = scan_utils.expand( tensor.unbroadcast(tensor.shape_padleft(sv, 0), T)) additional_lengths.append(scalar_shared(numpy.int64(0), name='l%d' % pos)) pos = pos + 1 additional_mintaps.append(1) additional_input_states.append(nw_state) additional_output_states.append( scan_utils.clone(tensor.set_subtensor( nw_state[(t + 1) % additional_lengths[-1]], update_d[sv]))) original_numeric_shared_variables.append(sv) else: non_numeric_input_states.append(sv) non_numeric_output_states.append(update_d[sv]) original_non_numeric_shared_variables.append(sv) # 5.2 Collect inputs/outputs of the inner function inputs = [] outputs = [] for n, mintap in enumerate(mintaps): if mintap != 0: input_state = states_and_outputs_info[n]['initial'] inputs.append(input_state) outputs.append( tensor.set_subtensor( input_state[(t + mintap) % lengths[n]], states_and_outputs[n])) else: mem_buffer = scan_utils.allocate_memory( T, states_and_outputs_info[n], states_and_outputs[n]) inputs.append(output) outputs.append( tensor.set_subtensor(output[t % lengths[n]], states_and_outputs[n])) inputs.extend(additional_input_states) outputs.extend(additional_output_states) lengths.extend(additional_lengths) mintaps.extend(additional_mintaps) inputs.extend(non_numeric_input_states) outputs.extend(non_numeric_output_states) all_other_inputs = gof.graph.inputs(outputs) parameters = [x for x in all_other_inputs if (x not in inputs and x not in lengths and x is not t and isinstance(x, gof.Variable) and not isinstance(x, gof.Constant))] inputs.extend(parameters) # 5.3 Construct the the options dictionary options['name'] = name options['profile'] = profile options['mode'] = mode options['inplace'] = False options['gpu'] = False options['truncate_gradient'] = truncate_gradient options['hash_inner_graph'] = 0 # 5.4 Construct the ScanOp instance local_op = scan_op.ScanOp(inputs=inputs, outputs=outputs, lengths=lengths, switches=[], mintaps=mintaps, index=t, options=options, as_repeatUntil=cond) # Note that we get here all the outputs followed by the update rules to # the shared variables we had in our scan # we know that we have (in this given order): # * len(states_and_outputs) real outputs # * len(additional_input_states) updates for numeric shared variable # * len(non_numeric_input_states) updates for non numeric shared # variables scan_inputs = [T] + inputs scan_outputs_update_rules = scan_utils.to_list(local_op(*scan_inputs)) # 5.5 Collect outputs and add permutation object scan_outputs = [] for pos in xrange(len(states_and_outputs)): out = scan_utils.ScanPermutation(mintaps[pos])( scan_outputs_update_rules[pos], t) scan_outputs.append(out[mintap:]) # 5.6 Construct updates dictionary update_rules = scan_outputs_update_rules[len(states_and_outputs):] updates = {} for v, u in izip(original_numeric_shared_variables, update_rules[:len(additional_input_states)]): updates[v] = u[-1] for v, u in izip(original_non_numeric_shared_variables, update_rules[len(additional_input_states):]): updates[v] = u # Step 5.7 We are done and can return everything back to the user return scan_outputs, updates
def outputs_info(self, batch_size): ones_vector = T.ones((batch_size, 1)) hid_init = T.dot(ones_vector, self.hid_init) hid_init = T.unbroadcast(hid_init, 0) return [hid_init, hid_init]
def scan(fn, sequences=None, states=None, params=None, n_steps=None, mode=None, name=None, profile=False, allow_gc=None): """ Similar to Theano's official scan, this function gives the user more control over the scan op, avoiding certain difficulties that arose from missing optimizations. :param fn: lambda function that describes one step of scan (see the official Theano scan function) :param sequences: similar to the official Theano's scan. This version of scan does not support taps for the sequences (it can only be a list of tensor). Scan assumes that sequences have the right length and it does not check for this. :param states: similar to outputs_info of the official scan function. There is one crucial difference though, namely that the `initial` key in the dictionary has been replace by 'membuf' key. This reflects the change of meaning. Instead of passing to scan just the initial steps misisng, one has now to pass a memory buffer in which scan will try to store its output. In this memory buffer the first entries should be set to the initial states of the corresponding states. Providing a memory buffer that has less entries then the number of steps, mneans scan will only use that amount of memory. The user has to match the memory buffer size with the number of steps, otherwise scan will produce wrong results. Also if gradients are to be computed through the scan, the memory buffer should have the same length as the number of steps. For states that do not require a initial state, one has to provide a dictionary with a single key 'steps' that says how many intermediate results to store. See examples below for more insight. :param n_steps: This parameter is mandatory and it will represent the number of steps scan will do (scan will not check sequences or any other source of information to figure out how many steps it needs to do). :param mode: Same as for the official scan :param name: Same as for the official scan :param profile: Same as for the official scan Note: - there is no truncate / go_backwards anymore ! - the outputs returned by scan contain the initial states as well (i.e. if I loop over k steps, with my smallest tap for an output -3 and keep al intermediate results, my output will be of length k+3 Examples: (a) if you do not want to store any intermediate results (just the last one) # The memory buffer can be the initial state, just that we need to # add one extra dimension in front of it state = TT.unbroadcast(TT.shape_padleft(x0),0) out,_ = scan(lambda x:x+1, states = state, n_steps = 5) # Once we got our result we need to remove the extra dimension out = out[0] (b) if you want to keep every intermediate results state = TT.alloc(TT.constant(0), 6, x0.shape[0]) state = TT.set_subtensor(state[0], x0) out,_ = scan(lambda x:x+1, states = state, n_steps = 5) out = out[1:] """ def wrap_into_list(x): ''' Wrap the input into a list if it is not already a list ''' if x is None: return [] elif not isinstance(x, (list, tuple)): return [x] else: return list(x) seqs = wrap_into_list(sequences) outs_info = wrap_into_list(states) if allow_gc is None: allow_gc = config.scan.allow_gc # Make sure we get rid of numpy arrays or ints or anything like that # passed as inputs to scan non_seqs = [] for elem in wrap_into_list(params): if not isinstance(elem, gof.Variable): non_seqs.append(tensor.as_tensor_variable(elem)) else: non_seqs.append(elem) # If we provided a known number of steps ( before compilation) # and if that number is 1 or -1, then we can skip the Scan Op, # and just apply the inner function once # To do that we check here to see the nature of n_steps n_fixed_steps = None if isinstance(n_steps, (float, int)): n_fixed_steps = int(n_steps) else: try: n_fixed_steps = opt.get_scalar_constant_value(n_steps) except tensor.basic.NotScalarConstantError: n_fixed_steps = None # Check n_steps is an int if (hasattr(n_steps, 'dtype') and str(n_steps.dtype)[:3] not in ('uin', 'int')): raise ValueError(' n_steps must be an int. dtype provided ' 'is %s' % n_steps.dtype) # compute number of sequences and number of outputs n_seqs = len(seqs) n_outs = len(outs_info) return_steps = OrderedDict() # wrap outputs info in a dictionary if they are not already in one for i in xrange(n_outs): if outs_info[i] is not None: if not isinstance(outs_info[i], dict): # by default any output has a tap value of -1 outs_info[i] = dict(membuf=outs_info[i], taps=[-1]) elif (not outs_info[i].get('membuf', None) and outs_info[i].get('taps', None)): # ^ no initial state but taps provided raise ValueError(('If you are using slices of an output ' 'you need to provide a memory buffer for ' 'the state '), outs_info[i]) elif (outs_info[i].get('membuf', None) and not outs_info[i].get('taps', None)): # ^ initial state but taps not provided if 'taps' in outs_info[i]: # ^ explicitly provided a None for taps _logger.warning( 'Output %s (index %d) has a memory ' 'buffer but taps is explicitly set to None ', getattr(outs_info[i]['membuf'], 'name', 'None'), i) outs_info[i]['taps'] = [-1] else: # if a None is provided as the output info we replace it # with an dict(steps=n_steps) to simplify handling outs_info[i] = dict(steps=n_steps) ## # Step 2. Generate inputs and outputs of the inner functions # for compiling a dummy function (Iteration #1) ## # create theano inputs for the recursive function # note : this is a first batch of possible inputs that will # be compiled in a dummy function; we used this dummy # function to detect shared variables and their updates # and to construct a new and complete list of inputs and # outputs n_seqs = 0 scan_seqs = [] # Variables passed as inputs to the scan op inner_seqs = [] # Variables passed as inputs to the inner function inner_slices = [] # Actual slices if scan is removed from the picture # go through sequences picking up time slices as needed for i, seq in enumerate(seqs): if isinstance(seq, dict): seq = seq['input'] actual_slice = seq[0] _seq_val = tensor.as_tensor_variable(seq) _seq_val_slice = _seq_val[0] nw_slice = _seq_val_slice.type() # Try to transfer test_value to the new variable if config.compute_test_value != 'off': try: nw_slice.tag.test_value = gof.Op._get_test_value( _seq_val_slice) except AttributeError as e: if config.compute_test_value != 'ignore': # No need to print a warning or raise an error now, # it will be done when fn will be called. _logger.info(('Cannot compute test value for ' 'the inner function of scan, input value ' 'missing %s'), e) if seq.name: nw_slice.name = seq.name + '[t]' scan_seqs.append(_seq_val) inner_seqs.append(nw_slice) inner_slices.append(actual_slice) n_seqs += 1 actual_n_steps = tensor.as_tensor(n_steps) # Conventions : # mit_mot = multiple input taps, multiple output taps ( only provided # by the gradient function ) # mit_sot = multiple input taps, single output tap (t + 0) # sit_sot = single input tap, single output tap (t + 0) # nit_sot = no input tap, single output tap (t + 0) # MIT_MOT -- not provided by the user only by the grad function n_mit_mot = 0 n_mit_mot_outs = 0 mit_mot_scan_inputs = [] mit_mot_inner_inputs = [] mit_mot_inner_outputs = [] mit_mot_out_slices = [] mit_mot_rightOrder = [] # SIT_SOT -- provided by the user n_mit_sot = 0 mit_sot_scan_inputs = [] mit_sot_inner_inputs = [] mit_sot_inner_slices = [] mit_sot_inner_outputs = [] mit_sot_return_steps = OrderedDict() mit_sot_tap_array = [] mit_sot_rightOrder = [] n_sit_sot = 0 sit_sot_scan_inputs = [] sit_sot_inner_inputs = [] sit_sot_inner_slices = [] sit_sot_inner_outputs = [] sit_sot_return_steps = OrderedDict() sit_sot_rightOrder = [] nit_sot_steps = [] # go through outputs picking up time slices as needed for i, init_out in enumerate(outs_info): # Note that our convention dictates that if an output uses # just the previous time step, as a initial state we will only # provide a tensor of the same dimension as one time step; This # makes code much cleaner for those who do not use taps. Otherwise # they would always had to shape_padleft the initial state .. # which is ugly # Note, 'taps' might not be in the dictionary if 'taps' in init_out and init_out['taps'] == [-1]: actual_arg = init_out['membuf'] arg = safe_new(init_out['membuf'][0]) if isinstance(arg, tensor.Constant): # safe new returns a clone of the constants, but that is not # what we need for initial states arg = arg.type() # Try to transfer test_value to the new variable if config.compute_test_value != 'off': try: arg.tag.test_value = gof.Op._get_test_value(actual_arg) except AttributeError as e: if config.compute_test_value != 'ignore': # No need to print a warning or raise an error now, # it will be done when fn will be called. _logger.info(('Cannot compute test value for the ' 'inner function of scan, input value missing %s'), e) if getattr(init_out['membuf'], 'name', None) is not None: arg.name = init_out['membuf'].name + '[t-1]' # We need now to allocate space for storing the output and copy # the initial state over. We do this using the expand function # defined in scan utils sit_sot_scan_inputs.append(actual_arg) sit_sot_inner_slices.append(actual_arg[0]) if i in return_steps: sit_sot_return_steps[n_sit_sot] = return_steps[i] sit_sot_inner_inputs.append(arg) sit_sot_rightOrder.append(i) n_sit_sot += 1 elif init_out.get('taps', None): if numpy.any(numpy.array(init_out.get('taps', [])) > 0): # Make sure we do not have requests for future values of a # sequence we can not provide such values raise ValueError('Can not use future taps of outputs', init_out) # go through the taps mintap = abs(numpy.min(init_out['taps'])) mit_sot_tap_array.append(init_out['taps']) idx_offset = abs(numpy.min(init_out['taps'])) # Sequence mit_sot_scan_inputs.append(init_out['membuf']) if i in return_steps: mit_sot_return_steps[n_mit_sot] = return_steps[i] mit_sot_rightOrder.append(i) n_mit_sot += 1 for k in init_out['taps']: # create a new slice actual_nw_slice = init_out['membuf'][k + mintap] _init_out_var = tensor.as_tensor_variable(init_out['membuf']) _init_out_var_slice = _init_out_var[k + mintap] nw_slice = _init_out_var_slice.type() # Try to transfer test_value to the new variable if config.compute_test_value != 'off': try: nw_slice.tag.test_value = gof.Op._get_test_value( _init_out_var_slice) except AttributeError as e: if config.compute_test_value != 'ignore': # No need to print a warning or raise an error now, # it will be done when fn will be called. _logger.info(('Cannot compute test value for ' 'the inner function of scan, input value ' 'missing. %s'), e) # give it a name or debugging and pretty printing if getattr(init_out['membuf'], 'name', None) is not None: if k > 0: nw_slice.name = (init_out['membuf'].name + '[t+%d]' % k) elif k == 0: nw_slice.name = init_out['membuf'].name + '[t]' else: nw_slice.name = (init_out['membuf'].name + '[t%d]' % k) mit_sot_inner_inputs.append(nw_slice) mit_sot_inner_slices.append(actual_nw_slice) else: pass # Re-order args max_mit_sot = numpy.max([-1] + mit_sot_rightOrder) + 1 max_sit_sot = numpy.max([-1] + sit_sot_rightOrder) + 1 n_elems = numpy.max([max_mit_sot, max_sit_sot]) _ordered_args = [[] for x in xrange(n_elems)] offset = 0 for idx in xrange(n_mit_sot): n_inputs = len(mit_sot_tap_array[idx]) if n_fixed_steps == 1: _ordered_args[mit_sot_rightOrder[idx]] = \ mit_sot_inner_slices[offset:offset + n_inputs] else: _ordered_args[mit_sot_rightOrder[idx]] = \ mit_sot_inner_inputs[offset:offset + n_inputs] offset += n_inputs for idx in xrange(n_sit_sot): if n_fixed_steps == 1: _ordered_args[sit_sot_rightOrder[idx]] = \ [sit_sot_inner_slices[idx]] else: _ordered_args[sit_sot_rightOrder[idx]] = \ [sit_sot_inner_inputs[idx]] ordered_args = [] for ls in _ordered_args: ordered_args += ls if n_fixed_steps == 1: args = (inner_slices + ordered_args + non_seqs) else: args = (inner_seqs + ordered_args + non_seqs) # add only the non-shared variables and non-constants to the arguments of # the dummy function [ a function should not get shared variables or # constants as input ] dummy_args = [arg for arg in args if (not isinstance(arg, SharedVariable) and not isinstance(arg, tensor.Constant))] # when we apply the lambda expression we get a mixture of update rules # and outputs that needs to be separated lambda_result = fn(*args) condition, outputs, updates = scan_utils.get_updates_and_outputs( lambda_result) if condition is not None: as_while = True else: as_while = False ## # Step 3. Check if we actually need scan and remove it if we don't ## if n_fixed_steps == 1: # We do not need to use the scan op anymore, so we can just return # the outputs and updates we have if condition is not None: _logger.warning(('When the number of steps is fixed and equal ' 'to 1, the provided stopping condition, ', str(condition), ' is ignored')) for pos, inner_out in enumerate(outputs): # we need to see if we need to pad our sequences with an # unbroadcastable dimension; case example : we return an # output for which we want all intermediate. If n_steps is 1 # then, if we return the output as given by the innner function # this will represent only a slice and it will have one # dimension less. if (isinstance(inner_out.type, tensor.TensorType) and return_steps.get(pos, 0) != 1): outputs[pos] = tensor.unbroadcast( tensor.shape_padleft(inner_out), 0) if len(outputs) == 1: outputs = outputs[0] return (outputs, updates) ## # Step 4. Compile the dummy function ## # We can now compile a dummy function just to see what shared variable # we have and what are their update rules (note that the user has # the option not to pass the shared variable to scan, so we need to # pick them manually and add them to scan) # make the compilation as fast as possible by not applying any # optimization or conversion to C [ note this region is not important # for performance so we can do stuff as unoptimal as we wish ] # extract still missing inputs (there still might be so) and add them # as non sequences at the end of our args fake_nonseqs = [x.type() for x in non_seqs] fake_outputs = scan_utils.clone(outputs + list(updates.values()), replace=dict(izip(non_seqs, fake_nonseqs))) all_inputs = ifilter( lambda x: (isinstance(x, gof.Variable) and not isinstance(x, SharedVariable) and not isinstance(x, gof.Constant)), gof.graph.inputs(fake_outputs)) extra_inputs = [x for x in all_inputs if x not in args + fake_nonseqs] non_seqs += extra_inputs # Note we do not use all_inputs directly since the order of variables # in args is quite important dummy_args += extra_inputs dummy_outs = outputs if condition is not None: dummy_outs.append(condition) # If we use a regular dict here, the results are non-deterministic if not isinstance(updates, (list, tuple)): if isinstance(updates, dict) and \ not isinstance(updates, OrderedDict): warnings.warn("Using non-deterministic dictionary.") dummy_f = function(dummy_args, dummy_outs, updates=updates, mode=compile.mode.Mode(linker='py', optimizer=None), on_unused_input='ignore') ## # Step 5. Re-arange inputs of scan into a more strict order ## # Step 5.0 Check the outputs of the dummy function to see if they # match with user provided data # if the number of outputs to the function does not match the number of # assumed outputs until now (provided by the user) there can be # only one explanation: No information is provided for any of the # outputs (i.e. we are dealing with a map) tmp_dummy_f_outs = len(dummy_f.maker.outputs) if as_while: tmp_dummy_f_outs -= 1 if not (tmp_dummy_f_outs == n_outs or outs_info == []): raise ValueError('Please provide None as outputs_info for ' 'any output that does not feed back into ' 'scan (i.e. it behaves like a map) ') if outs_info == []: n_outs = len(dummy_f.maker.outputs) if as_while: n_outs = n_outs - 1 outs_info = [dict(steps=n_steps) for x in xrange(n_outs)] # Step 5.1 Outputs with taps different then -1 for i, out in enumerate(outs_info): if 'taps' in out and out['taps'] != [-1]: mit_sot_inner_outputs.append(outputs[i]) # Step 5.2 Outputs with tap equal to -1 for i, out in enumerate(outs_info): if 'taps' in out and out['taps'] == [-1]: sit_sot_inner_outputs.append(outputs[i]) # Step 5.3 Outputs that correspond to update rules of shared variables givens = OrderedDict() n_shared_outs = 0 shared_scan_inputs = [] shared_inner_inputs = [] shared_inner_outputs = [] for input in dummy_f.maker.expanded_inputs: if isinstance(input.variable, SharedVariable) and input.update: new_var = safe_new(input.variable) if getattr(input.variable, 'name', None) is not None: new_var.name = input.variable.name + '_copy' shared_inner_inputs.append(new_var) shared_scan_inputs.append(input.variable) shared_inner_outputs.append(input.update) givens[input.variable] = new_var n_shared_outs += 1 # Step 5.4 Outputs with no taps used in the input n_nit_sot = 0 nit_sot_inner_outputs = [] nit_sot_return_steps = OrderedDict() nit_sot_rightOrder = [] for i, out in enumerate(outs_info): if not 'taps' in out: nit_sot_inner_outputs.append(outputs[i]) if i in return_steps: nit_sot_return_steps[n_nit_sot] = return_steps[i] nit_sot_rightOrder.append(i) nit_sot_steps.append(out['steps']) n_nit_sot += 1 # Step 5.5 all other arguments including extra inputs other_scan_args = [] other_inner_args = [] other_scan_args += [arg for arg in non_seqs if (not isinstance(arg, SharedVariable) and not isinstance(arg, tensor.Constant))] # Step 5.6 all shared variables with no update rules other_inner_args += [safe_new(arg, '_copy') for arg in non_seqs if (not isinstance(arg, SharedVariable) and not isinstance(arg, tensor.Constant))] givens.update(dict(izip(other_scan_args, other_inner_args))) other_shared_scan_args = [arg.variable for arg in dummy_f.maker.expanded_inputs if (isinstance(arg.variable, SharedVariable) and not arg.update)] other_shared_inner_args = [safe_new(arg.variable, '_copy') for arg in dummy_f.maker.expanded_inputs if (isinstance(arg.variable, SharedVariable) and not arg.update)] givens.update(dict(izip(other_shared_scan_args, other_shared_inner_args))) ## # Step 6. Re-order the outputs and clone them replacing things # using the givens ## inner_inputs = (inner_seqs + mit_mot_inner_inputs + mit_sot_inner_inputs + sit_sot_inner_inputs + shared_inner_inputs + other_shared_inner_args + other_inner_args) inner_outs = (mit_mot_inner_outputs + mit_sot_inner_outputs + sit_sot_inner_outputs + nit_sot_inner_outputs + shared_inner_outputs) if condition is not None: inner_outs.append(condition) new_givens = OrderedDict() for w, w_copy in iteritems(givens): new_givens[w] = w.type.filter_variable(w_copy) new_outs = scan_utils.clone(inner_outs, replace=new_givens) ## # Step 7. Create the Scan Op ## tap_array = mit_sot_tap_array + [[-1] for x in xrange(n_sit_sot)] info = OrderedDict() info['tap_array'] = tap_array info['n_seqs'] = n_seqs info['n_mit_mot'] = n_mit_mot info['n_mit_mot_outs'] = n_mit_mot_outs info['mit_mot_out_slices'] = mit_mot_out_slices info['n_mit_sot'] = n_mit_sot info['n_sit_sot'] = n_sit_sot info['n_shared_outs'] = n_shared_outs info['n_nit_sot'] = n_nit_sot info['truncate_gradient'] = -1 info['name'] = name info['mode'] = mode info['destroy_map'] = OrderedDict() info['inplace'] = False info['gpu'] = False info['as_while'] = as_while info['profile'] = profile info['_scan_savemem_visited'] = True info['allow_gc'] = allow_gc local_op = scan_op.Scan(inner_inputs, new_outs, info) ## # Step 8. Compute the outputs using the scan op ## _scan_inputs = (scan_seqs + mit_mot_scan_inputs + mit_sot_scan_inputs + sit_sot_scan_inputs + shared_scan_inputs + nit_sot_steps + other_shared_scan_args + other_scan_args) scan_inputs = [] for arg in [actual_n_steps] + _scan_inputs: if not isinstance(arg, gof.Variable): arg = tensor.as_tensor_variable(arg) scan_inputs += [arg] scan_outs = local_op(*scan_inputs) if type(scan_outs) not in (list, tuple): scan_outs = [scan_outs] ## # Step 9. Figure out which outs are update rules for shared variables # and so on ... ## update_map = OrderedUpdates() offset = n_mit_mot offsets = [abs(numpy.min(x)) for x in mit_sot_tap_array] mit_sot_outs = scan_outs[offset:offset + n_mit_sot] offset += n_mit_sot offsets = [1 for x in xrange(n_sit_sot)] sit_sot_outs = scan_outs[offset:offset + n_sit_sot] offset += n_sit_sot nit_sot_outs = scan_outs[offset:offset + n_nit_sot] offset += n_nit_sot for idx, update_rule in enumerate( scan_outs[offset:offset + n_shared_outs]): update_map[shared_scan_inputs[idx]] = update_rule _scan_out_list = (mit_sot_outs + sit_sot_outs + nit_sot_outs) # Step 10. I need to reorder the outputs to be in the order expected by # the user rightOrder = (mit_sot_rightOrder + sit_sot_rightOrder + nit_sot_rightOrder) scan_out_list = [None] * len(rightOrder) for idx, pos in enumerate(rightOrder): scan_out_list[pos] = _scan_out_list[idx] if len(scan_out_list) == 1: scan_out_list = scan_out_list[0] elif len(scan_out_list) == 0: scan_out_list = None assert isinstance(update_map, OrderedDict) return (scan_out_list, update_map)
def _zoom(a_lo, a_hi, phi_lo, phi_hi, derphi_lo, phi, derphi, phi0, derphi0, c1, c2, n_iters=10, profile=False): """ TODO: re-write me Part of the optimization algorithm in `scalar_search_wolfe2`. a_lo : scalar (step size) a_hi : scalar (step size) phi_lo : scalar (value of f at a_lo) phi_hi : scalar ( value of f at a_hi) derphi_lo : scalar ( value of derivative at a_lo) phi : callable -> generates computational graph derphi: callable -> generates computational graph phi0 : scalar ( value of f at 0) derphi0 : scalar (value of the derivative at 0) c1 : scalar (wolfe parameter) c2 : scalar (wolfe parameter) profile: if you want printouts of profiling information """ # Function reprensenting the computations of one step of the while loop def while_zoom(phi_rec, a_rec, a_lo, a_hi, phi_hi, phi_lo, derphi_lo, a_star, val_star, valprime): # interpolate to find a trial step length between a_lo and # a_hi Need to choose interpolation here. Use cubic # interpolation and then if the result is within delta * # dalpha or outside of the interval bounded by a_lo or a_hi # then use quadratic interpolation, if the result is still too # close, then use bisection dalpha = a_hi - a_lo a = TT.switch(dalpha < zero, a_hi, a_lo) b = TT.switch(dalpha < zero, a_lo, a_hi) # minimizer of cubic interpolant # (uses phi_lo, derphi_lo, phi_hi, and the most recent value of phi) # # if the result is too close to the end points (or out of the # interval) then use quadratic interpolation with phi_lo, # derphi_lo and phi_hi if the result is stil too close to the # end points (or out of the interval) then use bisection # cubic interpolation cchk = delta1 * dalpha a_j_cubic = _cubicmin(a_lo, phi_lo, derphi_lo, a_hi, phi_hi, a_rec, phi_rec) # quadric interpolation qchk = delta2 * dalpha a_j_quad = _quadmin(a_lo, phi_lo, derphi_lo, a_hi, phi_hi) cond_q = lazy_or('condq', TT.isnan(a_j_quad), a_j_quad > b - qchk, a_j_quad < a + qchk) a_j_quad = TT.switch(cond_q, a_lo + numpy.asarray(0.5, dtype=theano.config.floatX) * \ dalpha, a_j_quad) # pick between the two .. cond_c = lazy_or( 'condc', TT.isnan(a_j_cubic), TT.bitwise_or(a_j_cubic > b - cchk, a_j_cubic < a + cchk)) # this lazy if actually decides if we need to run the quadric # interpolation a_j = TT.switch(cond_c, a_j_quad, a_j_cubic) #a_j = ifelse(cond_c, a_j_quad, a_j_cubic) # Check new value of a_j phi_aj = phi(a_j) derphi_aj = derphi(a_j) stop = lazy_and( 'stop', TT.bitwise_and(phi_aj <= phi0 + c1 * a_j * derphi0, phi_aj < phi_lo), abs(derphi_aj) <= -c2 * derphi0) cond1 = TT.bitwise_or(phi_aj > phi0 + c1 * a_j * derphi0, phi_aj >= phi_lo) cond2 = derphi_aj * (a_hi - a_lo) >= zero # Switches just make more sense here because they have a C # implementation and they get composed phi_rec = ifelse(cond1, phi_hi, TT.switch(cond2, phi_hi, phi_lo), name='phi_rec') a_rec = ifelse(cond1, a_hi, TT.switch(cond2, a_hi, a_lo), name='a_rec') a_hi = ifelse(cond1, a_j, TT.switch(cond2, a_lo, a_hi), name='a_hi') phi_hi = ifelse(cond1, phi_aj, TT.switch(cond2, phi_lo, phi_hi), name='phi_hi') a_lo = TT.switch(cond1, a_lo, a_j) phi_lo = TT.switch(cond1, phi_lo, phi_aj) derphi_lo = ifelse(cond1, derphi_lo, derphi_aj, name='derphi_lo') a_star = a_j val_star = phi_aj valprime = ifelse(cond1, nan, TT.switch(cond2, derphi_aj, nan), name='valprime') return ([ phi_rec, a_rec, a_lo, a_hi, phi_hi, phi_lo, derphi_lo, a_star, val_star, valprime ], theano.scan_module.scan_utils.until(stop)) maxiter = n_iters # cubic interpolant check delta1 = TT.constant(numpy.asarray(0.2, dtype=theano.config.floatX)) # quadratic interpolant check delta2 = TT.constant(numpy.asarray(0.1, dtype=theano.config.floatX)) phi_rec = phi0 a_rec = zero # Initial iteration dalpha = a_hi - a_lo a = TT.switch(dalpha < zero, a_hi, a_lo) b = TT.switch(dalpha < zero, a_lo, a_hi) #a = ifelse(dalpha < 0, a_hi, a_lo) #b = ifelse(dalpha < 0, a_lo, a_hi) # minimizer of cubic interpolant # (uses phi_lo, derphi_lo, phi_hi, and the most recent value of phi) # # if the result is too close to the end points (or out of the # interval) then use quadratic interpolation with phi_lo, # derphi_lo and phi_hi if the result is stil too close to the # end points (or out of the interval) then use bisection # quadric interpolation qchk = delta2 * dalpha a_j = _quadmin(a_lo, phi_lo, derphi_lo, a_hi, phi_hi) cond_q = lazy_or('mcond_q', TT.isnan(a_j), TT.bitwise_or(a_j > b - qchk, a_j < a + qchk)) a_j = TT.switch(cond_q, a_lo + numpy.asarray(0.5, dtype=theano.config.floatX) * \ dalpha, a_j) # Check new value of a_j phi_aj = phi(a_j) derphi_aj = derphi(a_j) cond1 = TT.bitwise_or(phi_aj > phi0 + c1 * a_j * derphi0, phi_aj >= phi_lo) cond2 = derphi_aj * (a_hi - a_lo) >= zero # Switches just make more sense here because they have a C # implementation and they get composed phi_rec = ifelse(cond1, phi_hi, TT.switch(cond2, phi_hi, phi_lo), name='mphirec') a_rec = ifelse(cond1, a_hi, TT.switch(cond2, a_hi, a_lo), name='marec') a_hi = ifelse(cond1, a_j, TT.switch(cond2, a_lo, a_hi), name='mahi') phi_hi = ifelse(cond1, phi_aj, TT.switch(cond2, phi_lo, phi_hi), name='mphihi') onlyif = lazy_and( 'only_if', TT.bitwise_and(phi_aj <= phi0 + c1 * a_j * derphi0, phi_aj < phi_lo), abs(derphi_aj) <= -c2 * derphi0) a_lo = TT.switch(cond1, a_lo, a_j) phi_lo = TT.switch(cond1, phi_lo, phi_aj) derphi_lo = ifelse(cond1, derphi_lo, derphi_aj, name='derphi_lo_main') phi_rec.name = 'phi_rec' a_rec.name = 'a_rec' a_lo.name = 'a_lo' a_hi.name = 'a_hi' phi_hi.name = 'phi_hi' phi_lo.name = 'phi_lo' derphi_lo.name = 'derphi_lo' vderphi_aj = ifelse(cond1, nan, TT.switch(cond2, derphi_aj, nan), name='vderphi_aj') states = [] states += [TT.unbroadcast(TT.shape_padleft(phi_rec), 0)] states += [TT.unbroadcast(TT.shape_padleft(a_rec), 0)] states += [TT.unbroadcast(TT.shape_padleft(a_lo), 0)] states += [TT.unbroadcast(TT.shape_padleft(a_hi), 0)] states += [TT.unbroadcast(TT.shape_padleft(phi_hi), 0)] states += [TT.unbroadcast(TT.shape_padleft(phi_lo), 0)] states += [TT.unbroadcast(TT.shape_padleft(derphi_lo), 0)] states += [TT.unbroadcast(TT.shape_padleft(zero), 0)] states += [TT.unbroadcast(TT.shape_padleft(zero), 0)] states += [TT.unbroadcast(TT.shape_padleft(zero), 0)] # print'while_zoom' outs, updates = scan(while_zoom, states=states, n_steps=maxiter, name='while_zoom', mode=theano.Mode(linker='cvm_nogc'), profile=profile) # print 'done_while' a_star = ifelse(onlyif, a_j, outs[7][0], name='astar') val_star = ifelse(onlyif, phi_aj, outs[8][0], name='valstar') valprime = ifelse(onlyif, vderphi_aj, outs[9][0], name='valprime') ## WARNING !! I ignore updates given by scan which I should not do !!! return a_star, val_star, valprime
def big_frame_level_rnn(input_sequences, h0, reset): """ input_sequences.shape: (batch size, n big frames * BIG_FRAME_SIZE) h0.shape: (batch size, N_BIG_RNN, BIG_DIM) reset.shape: () output[0].shape: (batch size, n frames, DIM) output[1].shape: same as h0.shape output[2].shape: (batch size, seq len, Q_LEVELS) """ frames = input_sequences.reshape( (input_sequences.shape[0], input_sequences.shape[1] // BIG_FRAME_SIZE, BIG_FRAME_SIZE)) # Rescale frames from ints in [0, Q_LEVELS) to floats in [-2, 2] # (a reasonable range to pass as inputs to the RNN) frames = (frames.astype('float32') / lib.floatX(Q_LEVELS / 2)) - lib.floatX(1) frames *= lib.floatX(2) # Initial state of RNNs learned_h0 = lib.param( 'BigFrameLevel.h0', numpy.zeros((N_BIG_RNN, H0_MULT * BIG_DIM), dtype=theano.config.floatX)) # Handling LEARN_H0 learned_h0.param = LEARN_H0 learned_h0 = T.alloc(learned_h0, h0.shape[0], N_BIG_RNN, H0_MULT * BIG_DIM) learned_h0 = T.unbroadcast(learned_h0, 0, 1, 2) h0 = theano.ifelse.ifelse(reset, learned_h0, h0) # Handling RNN_TYPE # Handling SKIP_CONN if RNN_TYPE == 'GRU': rnns_out, last_hidden = lib.ops.stackedGRU('BigFrameLevel.GRU', N_BIG_RNN, BIG_FRAME_SIZE, BIG_DIM, frames, h0=h0, weightnorm=WEIGHT_NORM, skip_conn=SKIP_CONN) elif RNN_TYPE == 'LSTM': rnns_out, last_hidden = lib.ops.stackedLSTM('BigFrameLevel.LSTM', N_BIG_RNN, BIG_FRAME_SIZE, BIG_DIM, frames, h0=h0, weightnorm=WEIGHT_NORM, skip_conn=SKIP_CONN) output = lib.ops.Linear('BigFrameLevel.Output', BIG_DIM, DIM * BIG_FRAME_SIZE / FRAME_SIZE, rnns_out, initialization='he', weightnorm=WEIGHT_NORM) output = output.reshape( (output.shape[0], output.shape[1] * BIG_FRAME_SIZE / FRAME_SIZE, DIM)) independent_preds = lib.ops.Linear('BigFrameLevel.IndependentPreds', BIG_DIM, Q_LEVELS * BIG_FRAME_SIZE, rnns_out, initialization='he', weightnorm=WEIGHT_NORM) independent_preds = independent_preds.reshape( (independent_preds.shape[0], independent_preds.shape[1] * BIG_FRAME_SIZE, Q_LEVELS)) return (output, last_hidden, independent_preds)
def sample_level_rnn(input_sequences, h0, reset): """ input_sequences.shape: (batch size, seq len) h0.shape: (batch size, N_RNN, DIM) reset.shape: () output.shape: (batch size, seq len, DIM) """ # Embedded inputs # Handling EMB_SIZE ################# FRAME_SIZE = EMB_SIZE frames = lib.ops.Embedding('SampleLevel.Embedding', Q_LEVELS, EMB_SIZE, input_sequences) # Real-valued inputs #################### # # 'frames' of size 1 # FRAME_SIZE = 1 # frames = input_sequences.reshape(( # input_sequences.shape[0], # input_sequences.shape[1], # 1 # )) # # Rescale frames from ints in [0, Q_LEVELS) to floats in [-2, 2] # # (a reasonable range to pass as inputs to the RNN) # frames = (frames.astype('float32') / lib.floatX(Q_LEVELS/2)) - lib.floatX(1) # frames *= lib.floatX(2) # Initial state of RNNs learned_h0 = lib.param( 'SampleLevel.h0', numpy.zeros((N_RNN, H0_MULT * DIM), dtype=theano.config.floatX)) # Handling LEARN_H0 learned_h0.param = LEARN_H0 learned_h0 = T.alloc(learned_h0, h0.shape[0], N_RNN, H0_MULT * DIM) learned_h0 = T.unbroadcast(learned_h0, 0, 1, 2) h0 = theano.ifelse.ifelse(reset, learned_h0, h0) # Handling RNN_TYPE # Handling SKIP_CONN if RNN_TYPE == 'GRU': rnns_out, last_hidden = lib.ops.stackedGRU('SampleLevel.GRU', N_RNN, FRAME_SIZE, DIM, frames, h0=h0, weightnorm=WEIGHT_NORM, skip_conn=SKIP_CONN) elif RNN_TYPE == 'LSTM': rnns_out, last_hidden = lib.ops.stackedLSTM('SampleLevel.LSTM', N_RNN, FRAME_SIZE, DIM, frames, h0=h0, weightnorm=WEIGHT_NORM, skip_conn=SKIP_CONN) out = lib.ops.Linear('SampleLevel.L1', DIM, DIM, rnns_out, initialization='he', weightnorm=WEIGHT_NORM) out = T.nnet.relu(out) out = lib.ops.Linear('SampleLevel.L2', DIM, DIM, out, initialization='he', weightnorm=WEIGHT_NORM) out = T.nnet.relu(out) out = lib.ops.Linear('SampleLevel.L3', DIM, DIM, out, initialization='he', weightnorm=WEIGHT_NORM) out = T.nnet.relu(out) # We apply the softmax later out = lib.ops.Linear('SampleLevel.Output', DIM, Q_LEVELS, out, initialization='he', weightnorm=WEIGHT_NORM) return (out, last_hidden)
# No need to print a warning or raise an error now, # it will be done when fn will be called. _logger.info( ('Cannot compute test value for the ' 'inner function of scan, input value missing %s'), e) if getattr(init_out['initial'], 'name', None) is not None: arg.name = init_out['initial'].name + '[t-1]' # We need now to allocate space for storing the output and copy # the initial state over. We do this using the expand function # defined in scan utils sit_sot_scan_inputs.append( scan_utils.expand( tensor.unbroadcast(tensor.shape_padleft(actual_arg), 0), actual_n_steps)) sit_sot_inner_slices.append(actual_arg) if i in return_steps: sit_sot_return_steps[n_sit_sot] = return_steps[i] sit_sot_inner_inputs.append(arg) sit_sot_rightOrder.append(i) n_sit_sot += 1 elif init_out.get('taps', None): if numpy.any(numpy.array(init_out.get('taps', [])) > 0): # Make sure we do not have requests for future values of a # sequence we can not provide such values raise ValueError('Can not use future taps of outputs',
def rnn(step_function, inputs, initial_states, go_backwards=False, mask=None, constants=None, unroll=False, input_length=None): '''Iterates over the time dimension of a tensor. # Arguments inputs: tensor of temporal data of shape (samples, time, ...) (at least 3D). step_function: Parameters: input: tensor with shape (samples, ...) (no time dimension), representing input for the batch of samples at a certain time step. states: list of tensors. Returns: output: tensor with shape (samples, ...) (no time dimension), new_states: list of tensors, same length and shapes as 'states'. initial_states: tensor with shape (samples, ...) (no time dimension), containing the initial values for the states used in the step function. go_backwards: boolean. If True, do the iteration over the time dimension in reverse order. mask: binary tensor with shape (samples, time), with a zero for every element that is masked. constants: a list of constant values passed at each step. unroll: whether to unroll the RNN or to use a symbolic loop (`scan`). input_length: must be specified if using `unroll`. # Returns A tuple (last_output, outputs, new_states). last_output: the latest output of the rnn, of shape (samples, ...) outputs: tensor with shape (samples, time, ...) where each entry outputs[s, t] is the output of the step function at time t for sample s. new_states: list of tensors, latest states returned by the step function, of shape (samples, ...). ''' ndim = inputs.ndim assert ndim >= 3, 'Input should be at least 3D.' if unroll: if input_length is None: raise Exception('When specifying `unroll=True`, an `input_length` ' 'must be provided to `rnn`.') axes = [1, 0] + list(range(2, ndim)) inputs = inputs.dimshuffle(axes) if constants is None: constants = [] if mask is not None: if mask.ndim == ndim-1: mask = expand_dims(mask) assert mask.ndim == ndim mask = mask.dimshuffle(axes) if unroll: indices = list(range(input_length)) if go_backwards: indices = indices[::-1] successive_outputs = [] successive_states = [] states = initial_states for i in indices: output, new_states = step_function(inputs[i], states + constants) if len(successive_outputs) == 0: prev_output = zeros_like(output) else: prev_output = successive_outputs[-1] output = T.switch(mask[i], output, prev_output) kept_states = [] for state, new_state in zip(states, new_states): kept_states.append(T.switch(mask[i], new_state, state)) states = kept_states successive_outputs.append(output) successive_states.append(states) outputs = T.stack(*successive_outputs) states = [] for i in range(len(successive_states[-1])): states.append(T.stack(*[states_at_step[i] for states_at_step in successive_states])) else: # build an all-zero tensor of shape (samples, output_dim) initial_output = step_function(inputs[0], initial_states + constants)[0] * 0 # Theano gets confused by broadcasting patterns in the scan op initial_output = T.unbroadcast(initial_output, 0, 1) def _step(input, mask, output_tm1, *states): output, new_states = step_function(input, states) # output previous output if masked. output = T.switch(mask, output, output_tm1) return_states = [] for state, new_state in zip(states, new_states): return_states.append(T.switch(mask, new_state, state)) return [output] + return_states results, _ = theano.scan( _step, sequences=[inputs, mask], outputs_info=[initial_output] + initial_states, non_sequences=constants, go_backwards=go_backwards) # deal with Theano API inconsistency if type(results) is list: outputs = results[0] states = results[1:] else: outputs = results states = [] else: if unroll: indices = list(range(input_length)) if go_backwards: indices = indices[::-1] successive_outputs = [] successive_states = [] states = initial_states for i in indices: output, states = step_function(inputs[i], states + constants) successive_outputs.append(output) successive_states.append(states) outputs = T.stack(*successive_outputs) states = [] for i in range(len(successive_states[-1])): states.append(T.stack(*[states_at_step[i] for states_at_step in successive_states])) else: def _step(input, *states): output, new_states = step_function(input, states) return [output] + new_states results, _ = theano.scan( _step, sequences=inputs, outputs_info=[None] + initial_states, non_sequences=constants, go_backwards=go_backwards) # deal with Theano API inconsistency if type(results) is list: outputs = results[0] states = results[1:] else: outputs = results states = [] outputs = T.squeeze(outputs) last_output = outputs[-1] axes = [1, 0] + list(range(2, outputs.ndim)) outputs = outputs.dimshuffle(axes) states = [T.squeeze(state[-1]) for state in states] return last_output, outputs, states
def __call__(self, y0, theta, return_sens=False, **kwargs): if isinstance(y0, (list, tuple)) and not len(y0) == self.n_states: raise ShapeError("Length of y0 is wrong.", actual=(len(y0), ), expected=(self.n_states, )) if isinstance(theta, (list, tuple)) and not len(theta) == self.n_theta: raise ShapeError("Length of theta is wrong.", actual=(len(theta), ), expected=(self.n_theta, )) # convert inputs to tensors (and check their types) y0 = tt.cast(tt.unbroadcast(tt.as_tensor_variable(y0), 0), floatX) theta = tt.cast(tt.unbroadcast(tt.as_tensor_variable(theta), 0), floatX) inputs = [y0, theta] for i, (input_val, itype) in enumerate(zip(inputs, self._itypes)): if not input_val.type == itype: raise ValueError( f"Input {i} of type {input_val.type} does not have the expected type of {itype}" ) # use default implementation to prepare symbolic outputs (via make_node) states, sens = super(theano.Op, self).__call__(y0, theta, **kwargs) if theano.config.compute_test_value != "off": # compute test values from input test values test_states, test_sens = self._simulate( y0=get_test_value(y0), theta=get_test_value(theta)) # check types of simulation result if not test_states.dtype == self._otypes[0].dtype: raise DtypeError( "Simulated states have the wrong type.", actual=test_states.dtype, expected=self._otypes[0].dtype, ) if not test_sens.dtype == self._otypes[1].dtype: raise DtypeError( "Simulated sensitivities have the wrong type.", actual=test_sens.dtype, expected=self._otypes[1].dtype, ) # check shapes of simulation result expected_states_shape = (self.n_times, self.n_states) expected_sens_shape = (self.n_times, self.n_states, self.n_p) if not test_states.shape == expected_states_shape: raise ShapeError( "Simulated states have the wrong shape.", test_states.shape, expected_states_shape, ) if not test_sens.shape == expected_sens_shape: raise ShapeError( "Simulated sensitivities have the wrong shape.", test_sens.shape, expected_sens_shape, ) # attach results as test values to the outputs states.tag.test_value = test_states sens.tag.test_value = test_sens if return_sens: return states, sens return states
def __init__(self, model, trn_data, trn_loss, trn_target): """ :param model: model to train :param trn_data: train data :param trn_loss: train loss :param trn_target: train target """ # prepare train data n_trn_data_list = set([x.shape[0] for x in trn_data]) assert len( n_trn_data_list) == 1, 'Number of train data is not consistent.' trn_data = [theano.shared(x.astype(dtype)) for x in trn_data] # prepare train inputs trn_inputs = [model.input ] if trn_target is None else [model.input, trn_target] # potential energy self.U = theano.function(inputs=[], outputs=trn_loss, givens=zip(trn_inputs, trn_data)) # theano variables step = tt.scalar('step') mass = tt.scalar('mass') srng = RandomStreams() # theano function for drawing random momentum variables ps = [ theano.shared(np.zeros_like(x.get_value(borrow=True)), borrow=True) for x in model.parms ] ps_rand = [ srng.normal(x.get_value().shape, std=tt.sqrt(mass), dtype=dtype) for x in model.parms ] ps_rand = [ tt.unbroadcast(pr, *range(x.get_value().ndim)) for pr, x in izip(ps_rand, model.parms) ] self.draw_momentum = theano.function(inputs=[mass], updates=zip(ps, ps_rand), allow_input_downcast=True) # theano function for calculating kinetic energy K = sum([tt.sum(p**2) for p in ps]) / (2.0 * mass) self.calc_kinetic = theano.function(inputs=[mass], outputs=K, allow_input_downcast=True) # theano function for updating momentum variables dUs = tt.grad(trn_loss, model.parms) new_ps = [p - step * dU for p, dU in izip(ps, dUs)] self.update_momentum = theano.function(inputs=[step], updates=zip(ps, new_ps), givens=zip( trn_inputs, trn_data), allow_input_downcast=True) # theano function for updating model parameters new_parms = [x + step / mass * p for x, p in izip(model.parms, ps)] self.update_parms = theano.function(inputs=[step, mass], updates=zip( model.parms, new_parms), allow_input_downcast=True) # initialize self.U_prev = self.U() self.model = model
def test_rebroadcast(self): # I need the sum, because the setup expects the output to be a # vector self.check_rop_lop(tensor.unbroadcast( self.x[:4].dimshuffle('x', 0), 0).sum(axis=1), (1,))
def gru_layer(tparams, state_below, options, prefix='gru', mask=None, one_step=False, init_state=None, **kwargs): # Waino: state_below: x from the slides (embedding of previous word) if one_step: assert init_state, 'previous state must be provided' nsteps = state_below.shape[0] if state_below.ndim == 3: n_samples = state_below.shape[1] else: n_samples = state_below.shape[0] dim = tparams[_p(prefix, 'Ux')].shape[1] if mask == None: mask = tensor.alloc(1., state_below.shape[0], 1) def _slice(_x, n, dim): if _x.ndim == 3: return _x[:, :, n * dim:(n + 1) * dim] return _x[:, n * dim:(n + 1) * dim] # Waino: these are known at train time without needing to scan, # Waino: (x:s are known, and W:s are constant during the minibatch) # Waino: and thus can be calculated for all words in one operation state_below_ = tensor.dot(state_below, tparams[_p( prefix, 'W')]) + tparams[_p(prefix, 'b')] state_belowx = tensor.dot(state_below, tparams[_p( prefix, 'Wx')]) + tparams[_p(prefix, 'bx')] U = tparams[_p(prefix, 'U')] Ux = tparams[_p(prefix, 'Ux')] def _step_slice(m_, x_, xx_, h_, U, Ux): # Waino: m_ = mask # Waino: x_ = state_below_ (concatenation of Wr * x + br, Wu * x + bu) # Waino: xx_= state_belowx (W * x + b) # Waino: h_ = h at previous timestep, initialized to init_state (h_(t-1)) # Waino: U = U (concatenated Ur, Uu) # Waino: Ux = Ux # Waino: h = h_t preact = tensor.dot(h_, U) preact += x_ r = tensor.nnet.sigmoid(_slice(preact, 0, dim)) u = tensor.nnet.sigmoid(_slice(preact, 1, dim)) # Waino: maybe Ux is just U on the slides? # Waino: elementwise mult can be moved outside the matrix mult preactx = tensor.dot(h_, Ux) preactx = preactx * r preactx = preactx + xx_ h = tensor.tanh(preactx) # Waino: u is negated compared to slides (doesn't matter due to symmetry) h = u * h_ + (1. - u) * h h = m_[:, None] * h + (1. - m_)[:, None] * h_ return h #, r, u, preact, preactx seqs = [mask, state_below_, state_belowx] _step = _step_slice shared_vars = [tparams[_p(prefix, 'U')], tparams[_p(prefix, 'Ux')]] if init_state is None: init_state = tensor.unbroadcast(tensor.alloc(0., n_samples, dim), 0) if one_step: rval = _step(*(seqs + [init_state] + shared_vars)) else: rval, updates = theano.scan(_step, sequences=seqs, outputs_info=[init_state], non_sequences=shared_vars, name=_p(prefix, '_layers'), n_steps=nsteps, profile=options['profile'], strict=True) rval = [rval] return rval
def frame_level_rnn(input_sequences, other_input, h0, reset): """ input_sequences.shape: (batch size, n frames * FRAME_SIZE) other_input.shape: (batch size, n frames, DIM) h0.shape: (batch size, N_RNN, DIM) reset.shape: () output.shape: (batch size, n frames * FRAME_SIZE, DIM) """ frames = input_sequences.reshape( (input_sequences.shape[0], input_sequences.shape[1] // FRAME_SIZE, FRAME_SIZE)) # Rescale frames from ints in [0, Q_LEVELS) to floats in [-2, 2] # (a reasonable range to pass as inputs to the RNN) frames = (frames.astype('float32') / lib.floatX(Q_LEVELS / 2)) - lib.floatX(1) frames *= lib.floatX(2) gru_input = lib.ops.Linear( 'FrameLevel.InputExpand', FRAME_SIZE, DIM, frames, initialization='he', weightnorm=WEIGHT_NORM, ) + other_input # Initial state of RNNs learned_h0 = lib.param( 'FrameLevel.h0', numpy.zeros((N_RNN, H0_MULT * DIM), dtype=theano.config.floatX)) # Handling LEARN_H0 learned_h0.param = LEARN_H0 learned_h0 = T.alloc(learned_h0, h0.shape[0], N_RNN, H0_MULT * DIM) learned_h0 = T.unbroadcast(learned_h0, 0, 1, 2) #learned_h0 = T.patternbroadcast(learned_h0, [False] * learned_h0.ndim) h0 = theano.ifelse.ifelse(reset, learned_h0, h0) # Handling RNN_TYPE # Handling SKIP_CONN if RNN_TYPE == 'GRU': rnns_out, last_hidden = lib.ops.stackedGRU('FrameLevel.GRU', N_RNN, DIM, DIM, gru_input, h0=h0, weightnorm=WEIGHT_NORM, skip_conn=SKIP_CONN) elif RNN_TYPE == 'LSTM': rnns_out, last_hidden = lib.ops.stackedLSTM('FrameLevel.LSTM', N_RNN, DIM, DIM, gru_input, h0=h0, weightnorm=WEIGHT_NORM, skip_conn=SKIP_CONN) output = lib.ops.Linear('FrameLevel.Output', DIM, FRAME_SIZE * DIM, rnns_out, initialization='he', weightnorm=WEIGHT_NORM) output = output.reshape( (output.shape[0], output.shape[1] * FRAME_SIZE, DIM)) return (output, last_hidden)
def scalar_search_wolfe2(phi, derphi, phi0=None, old_phi0=None, derphi0=None, n_iters=20, c1=1e-4, c2=0.9, profile=False): """Find alpha that satisfies strong Wolfe conditions. alpha > 0 is assumed to be a descent direction. Parameters ---------- phi : callable f(x) Objective scalar function. derphi : callable f'(x) Objective function derivative (can be None) phi0 : float, optional Value of phi at s=0 old_phi0 : float, optional Value of phi at previous point derphi0 : float, optional Value of derphi at s=0 c1 : float Parameter for Armijo condition rule. c2 : float Parameter for curvature condition rule. profile : flag (boolean) True if you want printouts of profiling information Returns ------- alpha_star : float Best alpha phi_star phi at alpha_star phi0 phi at 0 derphi_star derphi at alpha_star Notes ----- Uses the line search algorithm to enforce strong Wolfe conditions. See Wright and Nocedal, 'Numerical Optimization', 1999, pg. 59-60. For the zoom phase it uses an algorithm by [...]. """ if phi0 is None: phi0 = phi(zero) else: phi0 = phi0 if derphi0 is None and derphi is not None: derphi0 = derphi(zero) else: derphi0 = derphi0 alpha0 = zero alpha0.name = 'alpha0' if old_phi0 is not None: alpha1 = TT.minimum(one, numpy.asarray(1.01, dtype=theano.config.floatX) * numpy.asarray(2, dtype=theano.config.floatX) * \ (phi0 - old_phi0) / derphi0) else: old_phi0 = nan alpha1 = one alpha1 = TT.switch(alpha1 < zero, one, alpha1) alpha1.name = 'alpha1' # This shouldn't happen. Perhaps the increment has slipped below # machine precision? For now, set the return variables skip the # useless while loop, and raise warnflag=2 due to possible imprecision. phi0 = TT.switch(TT.eq(alpha1, zero), old_phi0, phi0) # I need a lazyif for alpha1 == 0 !!! phi_a1 = ifelse(TT.eq(alpha1, zero), phi0, phi(alpha1), name='phi_a1') phi_a1.name = 'phi_a1' phi_a0 = phi0 phi_a0.name = 'phi_a0' derphi_a0 = derphi0 derphi_a0.name = 'derphi_a0' # Make sure variables are tensors otherwise strange things happen c1 = TT.as_tensor_variable(c1) c2 = TT.as_tensor_variable(c2) maxiter = n_iters def while_search(alpha0, alpha1, phi_a0, phi_a1, derphi_a0, i_t, alpha_star, phi_star, derphi_star): derphi_a1 = derphi(alpha1) cond1 = TT.bitwise_or(phi_a1 > phi0 + c1 * alpha1 * derphi0, TT.bitwise_and(phi_a1 >= phi_a0, i_t > zero)) cond2 = abs(derphi_a1) <= -c2 * derphi0 cond3 = derphi_a1 >= zero alpha_star_c1, phi_star_c1, derphi_star_c1 = \ _zoom(alpha0, alpha1, phi_a0, phi_a1, derphi_a0, phi, derphi, phi0, derphi0, c1, c2, profile=profile) alpha_star_c3, phi_star_c3, derphi_star_c3 = \ _zoom(alpha1, alpha0, phi_a1, phi_a0, derphi_a1, phi, derphi, phi0, derphi0, c1, c2, profile=profile) nw_alpha1 = alpha1 * numpy.asarray(2, dtype=theano.config.floatX) nw_phi = phi(nw_alpha1) alpha_star, phi_star, derphi_star = \ ifelse(cond1, (alpha_star_c1, phi_star_c1, derphi_star_c1), ifelse(cond2, (alpha1, phi_a1, derphi_a1), ifelse(cond3, (alpha_star_c3, phi_star_c3, derphi_star_c3), (nw_alpha1, nw_phi, nan), name='alphastar_c3'), name='alphastar_c2'), name='alphastar_c1') return ([ alpha1, nw_alpha1, phi_a1, ifelse(lazy_or('allconds', cond1, cond2, cond3), phi_a1, nw_phi, name='nwphi1'), ifelse(cond1, derphi_a0, derphi_a1, name='derphi'), i_t + one, alpha_star, phi_star, derphi_star ], theano.scan_module.scan_utils.until( lazy_or('until_cond_', TT.eq(nw_alpha1, zero), cond1, cond2, cond3))) states = [] states += [TT.unbroadcast(TT.shape_padleft(alpha0), 0)] states += [TT.unbroadcast(TT.shape_padleft(alpha1), 0)] states += [TT.unbroadcast(TT.shape_padleft(phi_a0), 0)] states += [TT.unbroadcast(TT.shape_padleft(phi_a1), 0)] states += [TT.unbroadcast(TT.shape_padleft(derphi_a0), 0)] # i_t states += [TT.unbroadcast(TT.shape_padleft(zero), 0)] # alpha_star states += [TT.unbroadcast(TT.shape_padleft(zero), 0)] # phi_star states += [TT.unbroadcast(TT.shape_padleft(zero), 0)] # derphi_star states += [TT.unbroadcast(TT.shape_padleft(zero), 0)] # print 'while_search' outs, updates = scan(while_search, states=states, n_steps=maxiter, name='while_search', mode=theano.Mode(linker='cvm_nogc'), profile=profile) # print 'done_while_search' out3 = outs[-3][0] out2 = outs[-2][0] out1 = outs[-1][0] alpha_star, phi_star, derphi_star = \ ifelse(TT.eq(alpha1, zero), (nan, phi0, nan), (out3, out2, out1), name='main_alphastar') return alpha_star, phi_star, phi0, derphi_star
def gru_layer(tparams, state_below, options, prefix='gru', mask=None, one_step=False, init_state=None, backwards=False, **kwargs): if one_step: assert init_state, 'previous state must be provided' nsteps = state_below.shape[0] if state_below.ndim == 3: n_samples = state_below.shape[1] else: n_samples = state_below.shape[0] dim = tparams[prefix + '_Ux'].shape[1] if mask is None: mask = tensor.alloc(1., state_below.shape[0], 1) # state_below is the input word embeddings state_below_ = (tensor.dot(state_below, tparams[prefix + '_W']) + tparams[prefix + '_b']) state_belowx = (tensor.dot(state_below, tparams[prefix + '_Wx']) + tparams[prefix + '_bx']) def _step_slice(m_, x_, xx_, h_, U, Ux): preact = tensor.dot(h_, U) preact += x_ # reset and update gates r = tensor.nnet.sigmoid(_slice(preact, 0, dim)) u = tensor.nnet.sigmoid(_slice(preact, 1, dim)) # compute the hidden state proposal preactx = tensor.dot(h_, Ux) preactx = preactx * r preactx = preactx + xx_ # hidden state proposal h = tensor.tanh(preactx) # leaky integrate and obtain next hidden state h = u * h_ + (1. - u) * h h = m_[:, None] * h + (1. - m_)[:, None] * h_ return h # prepare scan arguments seqs = [mask, state_below_, state_belowx] _step = _step_slice shared_vars = [tparams[prefix + '_U'], tparams[prefix + '_Ux']] # set initial state to all zeros if init_state is None: init_state = tensor.unbroadcast(tensor.alloc(0., n_samples, dim), 0) else: init_state = init_state if one_step: # sampling rval = _step(*(seqs + [init_state] + shared_vars)) else: rval, updates = theano.scan(_step, sequences=seqs, outputs_info=[init_state], non_sequences=shared_vars, name=prefix + '_layers', n_steps=nsteps, profile=profile, strict=True, go_backwards=backwards) rval = [rval] return rval
def get_initial_state(self, x): input_shape = self.input_spec[0].shape init_nb_row = input_shape[self.row_axis] init_nb_col = input_shape[self.column_axis] base_initial_state = K.zeros_like( x) # (samples, timesteps) + image_shape non_channel_axis = -1 if self.data_format == 'channels_first' else -2 for _ in range(2): base_initial_state = K.sum(base_initial_state, axis=non_channel_axis) base_initial_state = K.sum(base_initial_state, axis=1) # (samples, nb_channels) initial_states = [] states_to_pass = ['r', 'c', 'a'] nlayers_to_pass = {u: self.nb_layers for u in states_to_pass} if self.extrap_start_time is not None: states_to_pass.append( 'ahat' ) # pass prediction in states so can use as actual for t+1 when extrapolating nlayers_to_pass['ahat'] = 1 for u in states_to_pass: ds_factor = 1 for l in range(nlayers_to_pass[u]): nb_row = init_nb_row // ds_factor nb_col = init_nb_col // ds_factor if l < self.nb_layers - 1: ds_factor *= self.upsample_size[l] if u in ['r', 'c']: stack_size = self.R_stack_sizes[l] elif u == 'a': stack_size = self.stack_sizes[l] elif u == 'ahat': stack_size = self.stack_sizes[l] output_size = stack_size * nb_row * nb_col # flattened size reducer = K.zeros((input_shape[self.channel_axis], output_size)) # (nb_channels, output_size) initial_state = K.dot(base_initial_state, reducer) # (samples, output_size) if self.data_format == 'channels_first': output_shp = (-1, stack_size, nb_row, nb_col) else: output_shp = (-1, nb_row, nb_col, stack_size) initial_state = K.reshape(initial_state, output_shp) initial_states += [initial_state] if K._BACKEND == 'theano': from theano import tensor as T # There is a known issue in the Theano scan op when dealing with inputs whose shape is 1 along a dimension. # In our case, this is a problem when training on grayscale images, and the below line fixes it. initial_states = [ T.unbroadcast(init_state, 0, 1) for init_state in initial_states ] if self.extrap_start_time is not None: initial_states += [ K.variable(0, int if K.backend() != 'tensorflow' else 'int32') ] # the last state will correspond to the current timestep return initial_states
def scan(fn, sequences=None, outputs_info=None, non_sequences=None, n_steps=None, truncate_gradient=-1, go_backwards=False, mode=None, name=None, profile=False, allow_gc=None, strict=False): """ This function constructs and applies a Scan op to the provided arguments. Parameters ---------- fn ``fn`` is a function that describes the operations involved in one step of ``scan``. ``fn`` should construct variables describing the output of one iteration step. It should expect as input theano variables representing all the slices of the input sequences and previous values of the outputs, as well as all other arguments given to scan as ``non_sequences``. The order in which scan passes these variables to ``fn`` is the following : * all time slices of the first sequence * all time slices of the second sequence * ... * all time slices of the last sequence * all past slices of the first output * all past slices of the second otuput * ... * all past slices of the last output * all other arguments (the list given as `non_sequences` to scan) The order of the sequences is the same as the one in the list `sequences` given to scan. The order of the outputs is the same as the order of ``outputs_info``. For any sequence or output the order of the time slices is the same as the one in which they have been given as taps. For example if one writes the following : .. code-block:: python scan(fn, sequences = [ dict(input= Sequence1, taps = [-3,2,-1]) , Sequence2 , dict(input = Sequence3, taps = 3) ] , outputs_info = [ dict(initial = Output1, taps = [-3,-5]) , dict(initial = Output2, taps = None) , Output3 ] , non_sequences = [ Argument1, Argument2]) ``fn`` should expect the following arguments in this given order: #. ``Sequence1[t-3]`` #. ``Sequence1[t+2]`` #. ``Sequence1[t-1]`` #. ``Sequence2[t]`` #. ``Sequence3[t+3]`` #. ``Output1[t-3]`` #. ``Output1[t-5]`` #. ``Output3[t-1]`` #. ``Argument1`` #. ``Argument2`` The list of ``non_sequences`` can also contain shared variables used in the function, though ``scan`` is able to figure those out on its own so they can be skipped. For the clarity of the code we recommend though to provide them to scan. To some extend ``scan`` can also figure out other ``non sequences`` (not shared) even if not passed to scan (but used by `fn`). A simple example of this would be : .. code-block:: python import theano.tensor as TT W = TT.matrix() W_2 = W**2 def f(x): return TT.dot(x,W_2) The function is expected to return two things. One is a list of outputs ordered in the same order as ``outputs_info``, with the difference that there should be only one output variable per output initial state (even if no tap value is used). Secondly `fn` should return an update dictionary (that tells how to update any shared variable after each iteration step). The dictionary can optionally be given as a list of tuples. There is no constraint on the order of these two list, ``fn`` can return either ``(outputs_list, update_dictionary)`` or ``(update_dictionary, outputs_list)`` or just one of the two (in case the other is empty). To use ``scan`` as a while loop, the user needs to change the function ``fn`` such that also a stopping condition is returned. To do so, he/she needs to wrap the condition in an ``until`` class. The condition should be returned as a third element, for example: .. code-block:: python ... return [y1_t, y2_t], {x:x+1}, theano.scan_module.until(x < 50) Note that a number of steps (considered in here as the maximum number of steps ) is still required even though a condition is passed (and it is used to allocate memory if needed). = {}): sequences ``sequences`` is the list of Theano variables or dictionaries describing the sequences ``scan`` has to iterate over. If a sequence is given as wrapped in a dictionary, then a set of optional information can be provided about the sequence. The dictionary should have the following keys: * ``input`` (*mandatory*) -- Theano variable representing the sequence. * ``taps`` -- Temporal taps of the sequence required by ``fn``. They are provided as a list of integers, where a value ``k`` impiles that at iteration step ``t`` scan will pass to ``fn`` the slice ``t+k``. Default value is ``[0]`` Any Theano variable in the list ``sequences`` is automatically wrapped into a dictionary where ``taps`` is set to ``[0]`` outputs_info ``outputs_info`` is the list of Theano variables or dictionaries describing the initial state of the outputs computed recurrently. When this initial states are given as dictionary optional information can be provided about the output corresponding to these initial states. The dictionary should have the following keys: * ``initial`` -- Theano variable that represents the initial state of a given output. In case the output is not computed recursively (think of a map) and does not require an initial state this field can be skipped. Given that (only) the previous time step of the output is used by ``fn``, the initial state **should have the same shape** as the output and **should not involve a downcast** of the data type of the output. If multiple time taps are used, the initial state should have one extra dimension that should cover all the possible taps. For example if we use ``-5``, ``-2`` and ``-1`` as past taps, at step 0, ``fn`` will require (by an abuse of notation) ``output[-5]``, ``output[-2]`` and ``output[-1]``. This will be given by the initial state, which in this case should have the shape (5,)+output.shape. If this variable containing the initial state is called ``init_y`` then ``init_y[0]`` *corresponds to* ``output[-5]``. ``init_y[1]`` *correponds to* ``output[-4]``, ``init_y[2]`` corresponds to ``output[-3]``, ``init_y[3]`` coresponds to ``output[-2]``, ``init_y[4]`` corresponds to ``output[-1]``. While this order might seem strange, it comes natural from splitting an array at a given point. Assume that we have a array ``x``, and we choose ``k`` to be time step ``0``. Then our initial state would be ``x[:k]``, while the output will be ``x[k:]``. Looking at this split, elements in ``x[:k]`` are ordered exactly like those in ``init_y``. * ``taps`` -- Temporal taps of the output that will be pass to ``fn``. They are provided as a list of *negative* integers, where a value ``k`` implies that at iteration step ``t`` scan will pass to ``fn`` the slice ``t+k``. ``scan`` will follow this logic if partial information is given: * If an output is not wrapped in a dictionary, ``scan`` will wrap it in one assuming that you use only the last step of the output (i.e. it makes your tap value list equal to [-1]). * If you wrap an output in a dictionary and you do not provide any taps but you provide an initial state it will assume that you are using only a tap value of -1. * If you wrap an output in a dictionary but you do not provide any initial state, it assumes that you are not using any form of taps. * If you provide a ``None`` instead of a variable or a empty dictionary ``scan`` assumes that you will not use any taps for this output (like for example in case of a map) If ``outputs_info`` is an empty list or None, ``scan`` assumes that no tap is used for any of the outputs. If information is provided just for a subset of the outputs an exception is raised (because there is no convention on how scan should map the provided information to the outputs of ``fn``) non_sequences ``non_sequences`` is the list of arguments that are passed to ``fn`` at each steps. One can opt to exclude variable used in ``fn`` from this list as long as they are part of the computational graph, though for clarity we encourage not to do so. n_steps ``n_steps`` is the number of steps to iterate given as an int or Theano scalar. If any of the input sequences do not have enough elements, scan will raise an error. If the *value is 0* the outputs will have *0 rows*. If the value is negative, ``scan`` will run backwards in time. If the ``go_backwards`` flag is already set and also ``n_steps`` is negative, ``scan`` will run forward in time. If n_steps is not provided, ``scan`` will figure out the amount of steps it should run given its input sequences. truncate_gradient ``truncate_gradient`` is the number of steps to use in truncated BPTT. If you compute gradients through a scan op, they are computed using backpropagation through time. By providing a different value then -1, you choose to use truncated BPTT instead of classical BPTT, where you go for only ``truncate_gradient`` number of steps back in time. go_backwards ``go_backwards`` is a flag indicating if ``scan`` should go backwards through the sequences. If you think of each sequence as indexed by time, making this flag True would mean that ``scan`` goes back in time, namely that for any sequence it starts from the end and goes towards 0. name When profiling ``scan``, it is crucial to provide a name for any instance of ``scan``. The profiler will produce an overall profile of your code as well as profiles for the computation of one step of each instance of ``scan``. The ``name`` of the instance appears in those profiles and can greatly help to disambiguate information. mode It is recommended to leave this argument to None, especially when profiling ``scan`` (otherwise the results are not going to be accurate). If you prefer the computations of one step of ``scan`` to be done differently then the entire function, you can use this parameter to describe how the computations in this loop are done (see ``theano.function`` for details about possible values and their meaning). profile Flag or string. If true, or different from the empty string, a profile object will be created and attached to the inner graph of scan. In case ``profile`` is True, the profile object will have the name of the scan instance, otherwise it will have the passed string. Profile object collect (and print) information only when running the inner graph with the new cvm linker ( with default modes, other linkers this argument is useless) allow_gc Set the value of allow gc for the internal graph of scan. If set to None, this will use the value of config.scan.allow_gc. strict If true, all the shared variables used in ``fn`` must be provided as a part of ``non_sequences`` or ``sequences``. Returns ------- tuple Tuple of the form (outputs, updates); ``outputs`` is either a Theano variable or a list of Theano variables representing the outputs of ``scan`` (in the same order as in ``outputs_info``). ``updates`` is a subclass of dictionary specifying the update rules for all shared variables used in scan. This dictionary should be passed to ``theano.function`` when you compile your function. The change compared to a normal dictionary is that we validate that keys are SharedVariable and addition of those dictionary are validated to be consistent. """ # General observation : this code is executed only once, at creation # of the computational graph, so we don't yet need to be smart about # anything (to speed things up) ## # Step 1. Wrap all inputs in dictionaries and add default values ## # check if inputs are just single variables instead of lists def wrap_into_list(x): """ Wrap the input into a list if it is not already a list. """ if x is None: return [] elif not isinstance(x, (list, tuple)): return [x] else: return list(x) seqs = wrap_into_list(sequences) outs_info = wrap_into_list(outputs_info) # Make sure we get rid of numpy arrays or ints or anything like that # passed as inputs to scan non_seqs = [] for elem in wrap_into_list(non_sequences): if not isinstance(elem, gof.Variable): non_seqs.append(tensor.as_tensor_variable(elem)) else: non_seqs.append(elem) # If we provided a known number of steps ( before compilation) # and if that number is 1 or -1, then we can skip the Scan Op, # and just apply the inner function once # To do that we check here to see the nature of n_steps n_fixed_steps = None if isinstance(n_steps, (float, integer_types)): n_fixed_steps = int(n_steps) else: try: n_fixed_steps = opt.get_scalar_constant_value(n_steps) except tensor.basic.NotScalarConstantError: n_fixed_steps = None # Check n_steps is an int if (hasattr(n_steps, 'dtype') and str(n_steps.dtype)[:3] not in ('uin', 'int')): raise ValueError(' n_steps must be an int. dtype provided ' 'is %s' % n_steps.dtype) # compute number of sequences and number of outputs n_seqs = len(seqs) n_outs = len(outs_info) return_steps = OrderedDict() # wrap sequences in a dictionary if they are not already dictionaries for i in xrange(n_seqs): if not isinstance(seqs[i], dict): seqs[i] = OrderedDict([('input', seqs[i]), ('taps', [0])]) elif seqs[i].get('taps', None) is not None: seqs[i]['taps'] = wrap_into_list(seqs[i]['taps']) elif seqs[i].get('taps', None) is None: # seqs dictionary does not have the ``taps`` key seqs[i]['taps'] = [0] # wrap outputs info in a dictionary if they are not already in one for i in xrange(n_outs): if outs_info[i] is not None: if isinstance(outs_info[i], dict): # DEPRECATED : if outs_info[i].get('return_steps', None) is not None: raise ValueError( "Using `return_steps` has been deprecated. " "Simply select the entries you need using a " "subtensor. Scan will optimize memory " "consumption, so do not worry about that.") # END if not isinstance(outs_info[i], dict): # by default any output has a tap value of -1 outs_info[i] = OrderedDict([('initial', outs_info[i]), ('taps', [-1])]) elif (outs_info[i].get('initial', None) is None and outs_info[i].get('taps', None) is not None): # ^ no initial state but taps provided raise ValueError(('If you are using slices of an output ' 'you need to provide a initial state ' 'for it'), outs_info[i]) elif (outs_info[i].get('initial', None) is not None and outs_info[i].get('taps', None) is None): # ^ initial state but taps not provided if 'taps' in outs_info[i]: # ^ explicitly provided a None for taps _logger.warning( 'Output %s ( index %d) has a initial ' 'state but taps is explicitly set to None ', getattr(outs_info[i]['initial'], 'name', 'None'), i) outs_info[i]['taps'] = [-1] else: # if a None is provided as the output info we replace it # with an empty OrdereDict() to simplify handling outs_info[i] = OrderedDict() ## # Step 2. Generate inputs and outputs of the inner functions # for compiling a dummy function (Iteration #1) ## # create theano inputs for the recursive function # note : this is a first batch of possible inputs that will # be compiled in a dummy function; we used this dummy # function to detect shared variables and their updates # and to construct a new and complete list of inputs and # outputs n_seqs = 0 scan_seqs = [] # Variables passed as inputs to the scan op inner_seqs = [] # Variables passed as inputs to the inner function inner_slices = [] # Actual slices if scan is removed from the picture # go through sequences picking up time slices as needed for i, seq in enumerate(seqs): # Note that you can have something like no taps for # a sequence, though is highly unlikely in practice if 'taps' in seq: # go through the indicated slice mintap = numpy.min(seq['taps']) maxtap = numpy.max(seq['taps']) for k in seq['taps']: # create one slice of the input # Later on, if we decide not to use scan because we are # going for just one step, it makes things easier if we # compute the correct outputs here. This way we can use # the output of the lambda expression directly to replace # the output of scan. # If not we need to use copies, that will be replaced at # each frame by the corresponding slice actual_slice = seq['input'][k - mintap] _seq_val = tensor.as_tensor_variable(seq['input']) _seq_val_slice = _seq_val[k - mintap] nw_slice = _seq_val_slice.type() # Try to transfer test_value to the new variable if config.compute_test_value != 'off': try: nw_slice.tag.test_value = gof.Op._get_test_value( _seq_val_slice) except AttributeError as e: if config.compute_test_value != 'ignore': # No need to print a warning or raise an error now, # it will be done when fn will be called. _logger.info( ('Cannot compute test value for ' 'the inner function of scan, input value ' 'missing %s'), e) # Add names to slices for debugging and pretty printing .. # that is if the input already has a name if getattr(seq['input'], 'name', None) is not None: if k > 0: nw_name = seq['input'].name + '[t+%d]' % k elif k == 0: nw_name = seq['input'].name + '[t]' else: nw_name = seq['input'].name + '[t%d]' % k nw_slice.name = nw_name # We cut the sequence such that seq[i] to correspond to # seq[i-k]. For the purposes of cutting the sequences, we # need to pretend tap 0 is used to avoid cutting the sequences # too long if the taps are all lower or all higher than 0. maxtap_proxy = max(maxtap, 0) mintap_proxy = min(mintap, 0) start = (k - mintap_proxy) if k == maxtap_proxy: nw_seq = seq['input'][start:] else: end = -(maxtap_proxy - k) nw_seq = seq['input'][start:end] if go_backwards: nw_seq = nw_seq[::-1] scan_seqs.append(nw_seq) inner_seqs.append(nw_slice) inner_slices.append(actual_slice) n_seqs += 1 # Since we've added all sequences now we need to level them up based on # n_steps or their different shapes lengths_vec = [] for seq in scan_seqs: lengths_vec.append(seq.shape[0]) if not scan_utils.isNaN_or_Inf_or_None(n_steps): # ^ N_steps should also be considered lengths_vec.append(tensor.as_tensor(n_steps)) if len(lengths_vec) == 0: # ^ No information about the number of steps raise ValueError('No information about the number of steps ' 'provided. Either provide a value for ' 'n_steps argument of scan or provide an input ' 'sequence') # If the user has provided the number of steps, do that regardless ( and # raise an error if the sequences are not long enough ) if scan_utils.isNaN_or_Inf_or_None(n_steps): actual_n_steps = lengths_vec[0] for contestant in lengths_vec[1:]: actual_n_steps = tensor.minimum(actual_n_steps, contestant) else: actual_n_steps = tensor.as_tensor(n_steps) # Add names -- it helps a lot when debugging for (nw_seq, seq) in zip(scan_seqs, seqs): if getattr(seq['input'], 'name', None) is not None: nw_seq.name = seq['input'].name + '[%d:]' % k scan_seqs = [seq[:actual_n_steps] for seq in scan_seqs] # Conventions : # mit_mot = multiple input taps, multiple output taps ( only provided # by the gradient function ) # mit_sot = multiple input taps, single output tap (t + 0) # sit_sot = single input tap, single output tap (t + 0) # nit_sot = no input tap, single output tap (t + 0) # MIT_MOT -- not provided by the user only by the grad function n_mit_mot = 0 n_mit_mot_outs = 0 mit_mot_scan_inputs = [] mit_mot_inner_inputs = [] mit_mot_inner_outputs = [] mit_mot_out_slices = [] mit_mot_rightOrder = [] # SIT_SOT -- provided by the user n_mit_sot = 0 mit_sot_scan_inputs = [] mit_sot_inner_inputs = [] mit_sot_inner_slices = [] mit_sot_inner_outputs = [] mit_sot_return_steps = OrderedDict() mit_sot_tap_array = [] mit_sot_rightOrder = [] n_sit_sot = 0 sit_sot_scan_inputs = [] sit_sot_inner_inputs = [] sit_sot_inner_slices = [] sit_sot_inner_outputs = [] sit_sot_return_steps = OrderedDict() sit_sot_rightOrder = [] # go through outputs picking up time slices as needed for i, init_out in enumerate(outs_info): # Note that our convention dictates that if an output uses # just the previous time step, as a initial state we will only # provide a tensor of the same dimension as one time step; This # makes code much cleaner for those who do not use taps. Otherwise # they would always had to shape_padleft the initial state .. # which is ugly if init_out.get('taps', None) == [-1]: actual_arg = init_out['initial'] if not isinstance(actual_arg, tensor.Variable): actual_arg = tensor.as_tensor_variable(actual_arg) arg = safe_new(actual_arg) if isinstance(arg, tensor.Constant): # safe new returns a clone of the constants, but that is not # what we need for initial states arg = arg.type() # Try to transfer test_value to the new variable if config.compute_test_value != 'off': try: arg.tag.test_value = gof.Op._get_test_value(actual_arg) except AttributeError as e: if config.compute_test_value != 'ignore': # No need to print a warning or raise an error now, # it will be done when fn will be called. _logger.info( ('Cannot compute test value for the ' 'inner function of scan, input value missing %s'), e) if getattr(init_out['initial'], 'name', None) is not None: arg.name = init_out['initial'].name + '[t-1]' # We need now to allocate space for storing the output and copy # the initial state over. We do this using the expand function # defined in scan utils sit_sot_scan_inputs.append( scan_utils.expand_empty( tensor.unbroadcast(tensor.shape_padleft(actual_arg), 0), actual_n_steps)) sit_sot_inner_slices.append(actual_arg) if i in return_steps: sit_sot_return_steps[n_sit_sot] = return_steps[i] sit_sot_inner_inputs.append(arg) sit_sot_rightOrder.append(i) n_sit_sot += 1 elif init_out.get('taps', None): if numpy.any(numpy.array(init_out.get('taps', [])) > 0): # Make sure we do not have requests for future values of a # sequence we can not provide such values raise ValueError('Can not use future taps of outputs', init_out) # go through the taps mintap = abs(numpy.min(init_out['taps'])) mit_sot_tap_array.append(init_out['taps']) idx_offset = abs(numpy.min(init_out['taps'])) # Sequence mit_sot_scan_inputs.append( scan_utils.expand_empty(init_out['initial'][:mintap], actual_n_steps)) if i in return_steps: mit_sot_return_steps[n_mit_sot] = return_steps[i] mit_sot_rightOrder.append(i) n_mit_sot += 1 for k in init_out['taps']: # create a new slice actual_nw_slice = init_out['initial'][k + mintap] _init_out_var = tensor.as_tensor_variable(init_out['initial']) _init_out_var_slice = _init_out_var[k + mintap] nw_slice = _init_out_var_slice.type() # Try to transfer test_value to the new variable if config.compute_test_value != 'off': try: nw_slice.tag.test_value = gof.Op._get_test_value( _init_out_var_slice) except AttributeError as e: if config.compute_test_value != 'ignore': # No need to print a warning or raise an error now, # it will be done when fn will be called. _logger.info( ('Cannot compute test value for ' 'the inner function of scan, input value ' 'missing. %s'), e) # give it a name or debugging and pretty printing if getattr(init_out['initial'], 'name', None) is not None: if k > 0: nw_slice.name = (init_out['initial'].name + '[t+%d]' % k) elif k == 0: nw_slice.name = init_out['initial'].name + '[t]' else: nw_slice.name = (init_out['initial'].name + '[t%d]' % k) mit_sot_inner_inputs.append(nw_slice) mit_sot_inner_slices.append(actual_nw_slice) # NOTE: there is another case, in which we do not want to provide # any previous value of the output to the inner function (i.e. # a map); in that case we do not have to do anything .. # Re-order args max_mit_sot = numpy.max([-1] + mit_sot_rightOrder) + 1 max_sit_sot = numpy.max([-1] + sit_sot_rightOrder) + 1 n_elems = numpy.max([max_mit_sot, max_sit_sot]) _ordered_args = [[] for x in xrange(n_elems)] offset = 0 for idx in xrange(n_mit_sot): n_inputs = len(mit_sot_tap_array[idx]) if n_fixed_steps in [1, -1]: _ordered_args[mit_sot_rightOrder[idx]] = \ mit_sot_inner_slices[offset:offset + n_inputs] else: _ordered_args[mit_sot_rightOrder[idx]] = \ mit_sot_inner_inputs[offset:offset + n_inputs] offset += n_inputs for idx in xrange(n_sit_sot): if n_fixed_steps in [1, -1]: _ordered_args[sit_sot_rightOrder[idx]] = \ [sit_sot_inner_slices[idx]] else: _ordered_args[sit_sot_rightOrder[idx]] = \ [sit_sot_inner_inputs[idx]] ordered_args = [] for ls in _ordered_args: ordered_args += ls if n_fixed_steps in [1, -1]: args = (inner_slices + ordered_args + non_seqs) else: args = (inner_seqs + ordered_args + non_seqs) # add only the non-shared variables and non-constants to the arguments of # the dummy function [ a function should not get shared variables or # constants as input ] dummy_args = [ arg for arg in args if (not isinstance(arg, SharedVariable) and not isinstance(arg, tensor.Constant)) ] # when we apply the lambda expression we get a mixture of update rules # and outputs that needs to be separated condition, outputs, updates = scan_utils.get_updates_and_outputs(fn(*args)) if condition is not None: as_while = True else: as_while = False ## # Step 3. Check if we actually need scan and remove it if we don't ## if n_fixed_steps in [1, -1]: # We do not need to use the scan op anymore, so we can just return # the outputs and updates we have if condition is not None: _logger.warning(('When the number of steps is fixed and equal ' 'to 1, the provided stopping condition, ', str(condition), ' is ignored')) for pos, inner_out in enumerate(outputs): # we need to see if we need to pad our sequences with an # unbroadcastable dimension; case example : we return an # output for which we want all intermediate. If n_steps is 1 # then, if we return the output as given by the innner function # this will represent only a slice and it will have one # dimension less. if (isinstance(inner_out.type, tensor.TensorType) and return_steps.get(pos, 0) != 1): outputs[pos] = tensor.unbroadcast( tensor.shape_padleft(inner_out), 0) if len(outputs) == 1: outputs = outputs[0] return (outputs, updates) ## # Step 4. Compile the dummy function ## # We can now compile a dummy function just to see what shared variable # we have and what are their update rules (note that the user has # the option not to pass the shared variable to scan, so we need to # pick them manually and add them to scan) # make the compilation as fast as possible by not applying any # optimization or conversion to C [ note this region is not important # for performance so we can do stuff as unoptimal as we wish ] # extract still missing inputs (there still might be so) and add them # as non sequences at the end of our args fake_nonseqs = [x.type() for x in non_seqs] fake_outputs = scan_utils.clone(outputs, replace=OrderedDict( izip(non_seqs, fake_nonseqs))) all_inputs = ifilter( lambda x: (isinstance(x, gof.Variable) and not isinstance( x, SharedVariable) and not isinstance(x, gof.Constant)), gof.graph.inputs(fake_outputs)) extra_inputs = [x for x in all_inputs if x not in args + fake_nonseqs] non_seqs += extra_inputs # Note we do not use all_inputs directly since the order of variables # in args is quite important dummy_args += extra_inputs dummy_outs = outputs if condition is not None: dummy_outs.append(condition) dummy_f = function(dummy_args, dummy_outs, updates=updates, mode=compile.mode.Mode(linker='py', optimizer=None), on_unused_input='ignore', profile=False) ## # Step 5. Re-arange inputs of scan into a more strict order ## # Step 5.0 Check the outputs of the dummy function to see if they # match with user provided data # if the number of outputs to the function does not match the number of # assumed outputs until now (provided by the user) there can be # only one explanation: No information is provided for any of the # outputs (i.e. we are dealing with a map) tmp_dummy_f_outs = len(dummy_f.maker.outputs) if as_while: tmp_dummy_f_outs -= 1 if not (tmp_dummy_f_outs == n_outs or outs_info == []): raise ValueError('Please provide None as outputs_info for ' 'any output that does not feed back into ' 'scan (i.e. it behaves like a map) ') if outs_info == []: n_outs = len(dummy_f.maker.outputs) if as_while: n_outs = n_outs - 1 outs_info = [OrderedDict() for x in xrange(n_outs)] # Step 5.1 Outputs with taps different then -1 for i, out in enumerate(outs_info): if 'taps' in out and out['taps'] != [-1]: mit_sot_inner_outputs.append(outputs[i]) # Step 5.2 Outputs with tap equal to -1 for i, out in enumerate(outs_info): if 'taps' in out and out['taps'] == [-1]: sit_sot_inner_outputs.append(outputs[i]) # Step 5.3 Outputs that correspond to update rules of shared variables givens = OrderedDict() n_shared_outs = 0 shared_scan_inputs = [] shared_inner_inputs = [] shared_inner_outputs = [] sit_sot_shared = [] for input in dummy_f.maker.expanded_inputs: if isinstance(input.variable, SharedVariable) and input.update: new_var = safe_new(input.variable) if getattr(input.variable, 'name', None) is not None: new_var.name = input.variable.name + '_copy' if isinstance(new_var.type, ops.expandable_types): sit_sot_inner_inputs.append(new_var) sit_sot_scan_inputs.append( scan_utils.expand_empty( tensor.unbroadcast( tensor.shape_padleft(input.variable), 0), actual_n_steps)) tensor_update = tensor.as_tensor_variable(input.update) sit_sot_inner_outputs.append(tensor_update) # Not that pos is not a negative index. The sign of pos is used # as a flag to indicate if this output should be part of the # update rules or part of the standard outputs of scan. # If `pos` is positive than it corresponds to the standard # outputs of scan and it refers to output of index `pos`. If `pos` # is negative that it corresponds to update rules of scan and it # refers to update rule of index -1 - `pos`. sit_sot_rightOrder.append(-1 - len(sit_sot_shared)) sit_sot_shared.append(input.variable) givens[input.variable] = new_var else: shared_inner_inputs.append(new_var) shared_scan_inputs.append(input.variable) shared_inner_outputs.append(input.update) givens[input.variable] = new_var n_shared_outs += 1 n_sit_sot = len(sit_sot_inner_inputs) # Step 5.4 Outputs with no taps used in the input n_nit_sot = 0 nit_sot_inner_outputs = [] nit_sot_return_steps = OrderedDict() nit_sot_rightOrder = [] for i, out in enumerate(outs_info): if not 'taps' in out: nit_sot_inner_outputs.append(outputs[i]) if i in return_steps: nit_sot_return_steps[n_nit_sot] = return_steps[i] nit_sot_rightOrder.append(i) n_nit_sot += 1 # Step 5.5 all other arguments including extra inputs other_scan_args = [] other_inner_args = [] other_scan_args += [ arg for arg in non_seqs if (not isinstance(arg, SharedVariable) and not isinstance(arg, tensor.Constant)) ] # Step 5.6 all shared variables with no update rules other_inner_args += [ safe_new(arg, '_copy') for arg in non_seqs if (not isinstance(arg, SharedVariable) and not isinstance(arg, tensor.Constant)) ] givens.update(OrderedDict(izip(other_scan_args, other_inner_args))) if strict: non_seqs_set = set(non_sequences if non_sequences is not None else []) other_shared_scan_args = [ arg.variable for arg in dummy_f.maker.expanded_inputs if (isinstance(arg.variable, SharedVariable) and not arg.update and arg.variable in non_seqs_set) ] other_shared_inner_args = [ safe_new(arg.variable, '_copy') for arg in dummy_f.maker.expanded_inputs if (isinstance(arg.variable, SharedVariable) and not arg.update and arg.variable in non_seqs_set) ] else: other_shared_scan_args = [ arg.variable for arg in dummy_f.maker.expanded_inputs if (isinstance(arg.variable, SharedVariable) and not arg.update) ] other_shared_inner_args = [ safe_new(arg.variable, '_copy') for arg in dummy_f.maker.expanded_inputs if (isinstance(arg.variable, SharedVariable) and not arg.update) ] givens.update( OrderedDict(izip(other_shared_scan_args, other_shared_inner_args))) ## # Step 6. Re-order the outputs and clone them replacing things # using the givens ## inner_inputs = (inner_seqs + mit_mot_inner_inputs + mit_sot_inner_inputs + sit_sot_inner_inputs + shared_inner_inputs + other_shared_inner_args + other_inner_args) inner_outs = (mit_mot_inner_outputs + mit_sot_inner_outputs + sit_sot_inner_outputs + nit_sot_inner_outputs + shared_inner_outputs) if condition is not None: inner_outs.append(condition) # Cuda and Gpuarray are imported here, instead of being imported on top of # the file because that would force on the user some dependencies that we # might do not want to. Currently we are working on removing the # dependencies on sandbox code completeley. from theano.sandbox import cuda, gpuarray if cuda.cuda_available or gpuarray.pygpu_activated: # very often we end up in this situation when we want to # replace w with w_copy, where w is a GPU variable # and w_copy is TensorType. This is caused because shared # variables are put on GPU right aways >:| , new_givens = OrderedDict() for w, w_copy in iteritems(givens): if ((isinstance(w.type, cuda.CudaNdarrayType) or isinstance(w.type, gpuarray.GpuArrayType)) and isinstance(w_copy.type, tensor.TensorType)): for o in inner_outs: new_givens = traverse(o, w, w_copy, new_givens) else: new_givens[w] = w_copy else: new_givens = givens new_outs = scan_utils.clone(inner_outs, replace=new_givens) ## # Step 7. Create the Scan Op ## tap_array = mit_sot_tap_array + [[-1] for x in xrange(n_sit_sot)] if allow_gc is None: allow_gc = config.scan.allow_gc info = OrderedDict() info['tap_array'] = tap_array info['n_seqs'] = n_seqs info['n_mit_mot'] = n_mit_mot info['n_mit_mot_outs'] = n_mit_mot_outs info['mit_mot_out_slices'] = mit_mot_out_slices info['n_mit_sot'] = n_mit_sot info['n_sit_sot'] = n_sit_sot info['n_shared_outs'] = n_shared_outs info['n_nit_sot'] = n_nit_sot info['truncate_gradient'] = truncate_gradient info['name'] = name info['mode'] = mode info['destroy_map'] = OrderedDict() info['gpu'] = False info['as_while'] = as_while info['profile'] = profile info['allow_gc'] = allow_gc info['strict'] = strict local_op = scan_op.Scan(inner_inputs, new_outs, info) ## # Step 8. Compute the outputs using the scan op ## _scan_inputs = (scan_seqs + mit_mot_scan_inputs + mit_sot_scan_inputs + sit_sot_scan_inputs + shared_scan_inputs + [actual_n_steps for x in xrange(n_nit_sot)] + other_shared_scan_args + other_scan_args) scan_inputs = [] for arg in [actual_n_steps] + _scan_inputs: try: arg = tensor.as_tensor_variable(arg) except TypeError: # This happens for Random States for e.g. but it is a good way # to make sure no input is a cuda ndarrays pass scan_inputs += [arg] scan_outs = local_op(*scan_inputs) if type(scan_outs) not in (list, tuple): scan_outs = [scan_outs] ## # Step 9. Figure out which outs are update rules for shared variables # and so on ... ## update_map = OrderedUpdates() def remove_dimensions(outs, steps_return, offsets=None): out_ls = [] for idx, out in enumerate(outs): if idx in steps_return: if steps_return[idx] > 1: out_ls.append(out[-steps_return[idx]:]) else: out_ls.append(out[-1]) else: if offsets is None: out_ls.append(out) else: out_ls.append(out[offsets[idx]:]) return out_ls offset = n_mit_mot offsets = [abs(numpy.min(x)) for x in mit_sot_tap_array] mit_sot_outs = remove_dimensions(scan_outs[offset:offset + n_mit_sot], mit_sot_return_steps, offsets) offset += n_mit_sot offsets = [1 for x in xrange(n_sit_sot)] sit_sot_outs = remove_dimensions(scan_outs[offset:offset + n_sit_sot], sit_sot_return_steps, offsets) offset += n_sit_sot nit_sot_outs = remove_dimensions(scan_outs[offset:offset + n_nit_sot], nit_sot_return_steps) offset += n_nit_sot for idx, update_rule in enumerate(scan_outs[offset:offset + n_shared_outs]): update_map[shared_scan_inputs[idx]] = update_rule _scan_out_list = (mit_sot_outs + sit_sot_outs + nit_sot_outs) # Step 10. I need to reorder the outputs to be in the order expected by # the user rightOrder = (mit_sot_rightOrder + sit_sot_rightOrder + nit_sot_rightOrder) scan_out_list = [None] * len(rightOrder) for idx, pos in enumerate(rightOrder): if pos >= 0: scan_out_list[pos] = _scan_out_list[idx] else: # Not that pos is not a negative index. The sign of pos is used # as a flag to indicate if this output should be part of the # update rules or part of the standard outputs of scan. # If `pos` is positive than it corresponds to the standard # outputs of scan and it refers to output of index `pos`. If `pos` # is negative that it corresponds to update rules of scan and it # refers to update rule of index -1 - `pos`. update_map[sit_sot_shared[abs(pos) - 1]] = _scan_out_list[idx][-1] scan_out_list = [x for x in scan_out_list if x is not None] if len(scan_out_list) == 1: scan_out_list = scan_out_list[0] elif len(scan_out_list) == 0: scan_out_list = None return (scan_out_list, update_map)
def call(self, x, mask=None): print "call" predict_targets = x[0] real_targets = x[1] init_state = self.get_initial_states(real_targets) ndim = predict_targets.ndim axes = [1, 0] + list(range(2, ndim)) predict_targets = predict_targets.dimshuffle(axes) ndim = real_targets.ndim axes = [1, 0] + list(range(2, ndim)) real_targets = real_targets.dimshuffle(axes) print type(init_state[0]) print(init_state[0]).broadcastable if l != 1: if len(init_state) > 0: for i in range(1, 2 + len(self.output_dim)): init_state[0] = T.unbroadcast(init_state[0], i) elif l == 1: if len(init_state) > 0: for i in range(1, 1 + len(self.output_dim)): init_state[0] = T.unbroadcast(init_state[0], i) # print (init_state[0]).broadcastable # exit(1) def _step(x_t, y_t, *errors): error = T.concatenate(errors, 1) o = K.conv2d(error, self.W, strides=(1, 1), padding="same", data_format="channels_first") o += K.reshape(self.b, (1, self.error_hidden_dim, 1, 1)) o = activations.get("relu")(o) o = K.conv2d(o, self.output_W, strides=(1, 1), padding="same", data_format="channels_first") # o += K.reshape(self.output_b, (1, self.output_dim[0], 1, 1)) o = activations.get("relu")(o) o += x_t return [o - y_t, o] init_state = init_state[0] # init_state = theano.shared(np.zeros((self.n_past_error, 926, 1, 48, 48), dtype=theano.config.floatX)) [errors, os], _ = theano.scan( _step, sequences=[predict_targets, real_targets], outputs_info=[ dict(initial=init_state, taps=[-i for i in range(self.n_past_error, 0, -1)]), None ]) last_os = os[-1] axes = [1, 0] + list(range(2, os.ndim)) os = os.dimshuffle(axes) if self.return_sequences: return os else: return last_os
def update(self, input_current): rate = self.t_ref - self.t_rc * TT.log(1 - 1.0 / TT.maximum(input_current, 0)) rate = TT.switch(input_current > 1, 1 / rate, 0) return OrderedDict([(self.output, TT.unbroadcast(rate.astype('float32'), 0))])
def __call__(self, X, context, parent_t_seq, init_state=None, init_cell=None, hist_h=None, mask=None, context_mask=None, dropout=0, train=True, srng=None, time_steps=None): assert context_mask.dtype == 'int8', 'context_mask is not int8, got %s' % context_mask.dtype # (n_timestep, batch_size) mask = self.get_mask(mask, X) # (n_timestep, batch_size, input_dim) X = X.dimshuffle((1, 0, 2)) retain_prob = 1. - dropout B_w = np.ones((4, ), dtype=theano.config.floatX) B_u = np.ones((4, ), dtype=theano.config.floatX) if dropout > 0: logging.info('applying dropout with p = %f', dropout) if train: B_w = srng.binomial((4, X.shape[1], self.input_dim), p=retain_prob, dtype=theano.config.floatX) B_u = srng.binomial((4, X.shape[1], self.output_dim), p=retain_prob, dtype=theano.config.floatX) else: B_w *= retain_prob B_u *= retain_prob # (n_timestep, batch_size, output_dim) xi = T.dot(X * B_w[0], self.W_i) + self.b_i xf = T.dot(X * B_w[1], self.W_f) + self.b_f xc = T.dot(X * B_w[2], self.W_c) + self.b_c xo = T.dot(X * B_w[3], self.W_o) + self.b_o # (batch_size, context_size, att_layer1_dim) context_att_trans = T.dot(context, self.att_ctx_W1) + self.att_b1 if init_state: # (batch_size, output_dim) first_state = T.unbroadcast(init_state, 1) else: first_state = T.unbroadcast( alloc_zeros_matrix(X.shape[1], self.output_dim), 1) if init_cell: # (batch_size, output_dim) first_cell = T.unbroadcast(init_cell, 1) else: first_cell = T.unbroadcast( alloc_zeros_matrix(X.shape[1], self.output_dim), 1) if not hist_h: # (batch_size, n_timestep, output_dim) hist_h = alloc_zeros_matrix(X.shape[1], X.shape[0], self.output_dim) if train: n_timestep = X.shape[0] time_steps = T.arange(n_timestep, dtype='int32') # (n_timestep, batch_size) parent_t_seq = parent_t_seq.dimshuffle((1, 0)) [outputs, cells, ctx_vectors, hist_h_outputs], updates = theano.scan( self._step, sequences=[time_steps, xi, xf, xo, xc, mask, parent_t_seq], outputs_info=[ first_state, # for h first_cell, # for cell # T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.context_dim), 1), # for ctx vector None, hist_h, # for hist_h ], non_sequences=[ self.U_i, self.U_f, self.U_o, self.U_c, self.C_i, self.C_f, self.C_o, self.C_c, self.H_i, self.H_f, self.H_o, self.H_c, self.P_i, self.P_f, self.P_o, self.P_c, self.att_h_W1, self.att_W2, self.att_b2, context, context_mask, context_att_trans, B_u ]) outputs = outputs.dimshuffle((1, 0, 2)) ctx_vectors = ctx_vectors.dimshuffle((1, 0, 2)) cells = cells.dimshuffle((1, 0, 2)) return outputs, cells, ctx_vectors
def predict_one(self, x): x = tt.unbroadcast(x, 0) # F'ing scan for layer in self.layers: x = layer.forward_pass(x) return x
target = im shp = im.shape # careful shift to avoid leakage conv1 = conv2d(im, conv1_w, conv1_b, border_mode=(0, k_conv1[1])) theano.printing.Print("conv1.shape")(conv1.shape) conv1 = conv1[:, :, :, :shp[3]] theano.printing.Print("conv1.shape")(conv1.shape) r_conv1 = conv1.dimshuffle(2, 1, 0, 3) theano.printing.Print("r_conv1.shape")(r_conv1.shape) shp = r_conv1.shape init_hidden = tensor.zeros((minibatch_size, n_conv1, 1, shp[3]), dtype=theano.config.floatX) # weirdness in broadcast if minibatch_size == 1: init_hidden = tensor.unbroadcast(init_hidden, 0, 2) else: init_hidden = tensor.unbroadcast(init_hidden, 2) theano.printing.Print("init_hidden.shape")(init_hidden.shape) # recurrent function (using tanh activation function) def step(in_t, h_tm1): theano.printing.Print("in_t.shape")(in_t.shape) theano.printing.Print("h_tm1.shape")(h_tm1.shape) h_i = conv2d(h_tm1, conv1_hid, border_mode="half") theano.printing.Print("h_i.shape")(h_i.shape) in_i = in_t.dimshuffle(1, 0, 'x', 2) theano.printing.Print("in_i.shape")(in_i.shape) h_t = tanh(in_i + h_i) # need to add broadcast dims back to keep scan happy theano.printing.Print("h_t.shape")(h_t.shape)
def rnn(step_function, inputs, initial_states, go_backwards=False, mask=None): '''Iterates over the time dimension of a tensor. Parameters ---------- inputs: tensor of temporal data of shape (samples, time, ...) (at least 3D). step_function: Parameters: input: tensor with shape (samples, ...) (no time dimension), representing input for the batch of samples at a certain time step. states: list of tensors. Returns: output: tensor with shape (samples, ...) (no time dimension), new_states: list of tensors, same length and shapes as 'states'. initial_states: tensor with shape (samples, ...) (no time dimension), containing the initial values for the states used in the step function. go_backwards: boolean. If True, do the iteration over the time dimension in reverse order. mask: binary tensor with shape (samples, time), with a zero for every element that is masked. Returns ------- A tuple (last_output, outputs, new_states). last_output: the latest output of the rnn, of shape (samples, ...) outputs: tensor with shape (samples, time, ...) where each entry outputs[s, t] is the output of the step function at time t for sample s. new_states: list of tensors, latest states returned by the step function, of shape (samples, ...). ''' ndim = inputs.ndim assert ndim >= 3, 'Input should be at least 3D.' axes = [1, 0] + list(range(2, ndim)) inputs = inputs.dimshuffle(axes) if mask is not None: if mask.ndim == ndim - 1: mask = expand_dims(mask) assert mask.ndim == ndim mask = mask.dimshuffle(axes) # build an all-zero tensor of shape (samples, output_dim) initial_output = step_function(inputs[0], initial_states)[0] * 0 # Theano gets confused by broadcasting patterns in the scan op initial_output = T.unbroadcast(initial_output, 0, 1) def _step(input, mask, output_tm1, *states): output, new_states = step_function(input, states) # output previous output if masked. output = T.switch(mask, output, output_tm1) return_states = [] for state, new_state in zip(states, new_states): return_states.append(T.switch(mask, new_state, state)) return [output] + return_states results, _ = theano.scan(_step, sequences=[inputs, mask], outputs_info=[initial_output] + initial_states, go_backwards=go_backwards) else: def _step(input, *states): output, new_states = step_function(input, states) return [output] + new_states results, _ = theano.scan(_step, sequences=inputs, outputs_info=[None] + initial_states, go_backwards=go_backwards) # deal with Theano API inconsistency if type(results) is list: outputs = results[0] states = results[1:] else: outputs = results states = [] outputs = T.squeeze(outputs) last_output = outputs[-1] axes = [1, 0] + list(range(2, outputs.ndim)) outputs = outputs.dimshuffle(axes) states = [T.squeeze(state[-1]) for state in states] return last_output, outputs, states
# the outputs and updates we have if condition is not None: _logger.warning(('When the number of steps is fixed and equal ' 'to 1, the provided stopping condition, ', str(condition), ' is ignored')) for pos, inner_out in enumerate(outputs): # we need to see if we need to pad our sequences with an # unbroadcastable dimension; case example : we return an # output for which we want all intermediate. If n_steps is 1 # then, if we return the output as given by the innner function # this will represent only a slice and it will have one # dimension less. if (isinstance(inner_out.type, tensor.TensorType) and return_steps.get(pos, 0) != 1): outputs[pos] = tensor.unbroadcast( tensor.shape_padleft(inner_out), 0) if len(outputs) == 1: outputs = outputs[0] return (outputs, updates) ## ### Step 4. Compile the dummy function ## # We can now compile a dummy function just to see what shared variable # we have and what are their update rules (note that the user has # the option not to pass the shared variable to scan, so we need to # pick them manually and add them to scan) # make the compilation as fast as possible by not applying any # optimization or conversion to C [ note this region is not important
def __init__(self, inputs=None, hiddens=None, params=None, outdir='outputs/lstm/', activation='relu', gate_activation='sigmoid', mrg=RNG_MRG.MRG_RandomStreams(1), weights_init='uniform', weights_interval='glorot', weights_mean=0, weights_std=5e-3, bias_init=0.0, r_weights_init='identity', r_weights_interval='glorot', r_weights_mean=0, r_weights_std=5e-3, r_bias_init=0.0, direction='forward', clip_recurrent_grads=False): """ Initialize an LSTM. Parameters ---------- inputs : List of [tuple(shape, `Theano.TensorType`)] The dimensionality of the inputs for this model, and the routing information for the model to accept inputs from elsewhere. `inputs` variable are expected to be of the form (timesteps, batch, data). `shape` will be a monad tuple representing known sizes for each dimension in the `Theano.TensorType`. The length of `shape` should be equal to number of dimensions in `Theano.TensorType`, where the shape element is an integer representing the size for its dimension, or None if the shape isn't known. For example, if you have a matrix with unknown batch size but fixed feature size of 784, `shape` would be: (None, 784). The full form of `inputs` would be: [((None, 784), <TensorType(float32, matrix)>)]. hiddens : int or Tuple of (shape, `Theano.TensorType`) Int for the number of hidden units to use, or a tuple of shape, expression to route the starting hidden values from elsewhere. params : Dict(string_name: theano SharedVariable), optional A dictionary of model parameters (shared theano variables) that you should use when constructing this model (instead of initializing your own shared variables). This parameter is useful when you want to have two versions of the model that use the same parameters - such as siamese networks or pretraining some weights. outdir : str The location to produce outputs from training or running the :class:`LSTM`. If None, nothing will be saved. activation : str or callable The nonlinear (or linear) activation to perform for the hidden units. This activation function should be appropriate for the output unit types, i.e. 'sigmoid' for binary. See opendeep.utils.activation for a list of available activation functions. Alternatively, you can pass your own function to be used as long as it is callable. gate_activation : str or callable The activation to perform for the hidden gates (default sigmoid). See opendeep.utils.activation for a list of available activation functions. Alternatively, you can pass your own function to be used as long as it is callable. mrg : random A random number generator that is used when adding noise. I recommend using Theano's sandbox.rng_mrg.MRG_RandomStreams. weights_init : str Determines the method for initializing input-hidden model weights. See opendeep.utils.nnet for options. weights_interval : str or float If Uniform `weights_init`, the +- interval to use. See opendeep.utils.nnet for options. weights_mean : float If Gaussian `weights_init`, the mean value to use. weights_std : float If Gaussian `weights_init`, the standard deviation to use. bias_init : float The initial value to use for the bias parameter. Most often, the default of 0.0 is preferred. r_weights_init : str Determines the method for initializing recurrent hidden-hidden model weights. See opendeep.utils.nnet for options. r_weights_interval : str or float If Uniform `r_weights_init`, the +- interval to use. See opendeep.utils.nnet for options. r_weights_mean : float If Gaussian `r_weights_init`, the mean value to use. r_weights_std : float If Gaussian `r_weights_init`, the standard deviation to use. r_bias_init : float The initial value to use for the recurrent bias parameter. Most often, the default of 0.0 is preferred. direction : str The direction this recurrent model should go over its inputs. Can be 'forward', 'backward', or 'bidirectional'. In the case of 'bidirectional', it will make two passes over the sequence, computing two sets of hiddens and adding them together. clip_recurrent_grads : False or float, optional Whether to clip the gradients for the parameters that unroll over timesteps (such as the weights connecting previous hidden states to the current hidden state, and not the weights from current input to hiddens). If it is a float, the gradients for the weights will be hard clipped to the range `+-clip_recurrent_grads`. """ initial_parameters = locals().copy() initial_parameters.pop('self') super(LSTM, self).__init__(**initial_parameters) ################## # specifications # ################## backward = direction.lower() == 'backward' bidirectional = direction.lower() == 'bidirectional' ######################## # activation functions # ######################## # recurrent hidden activation functions! self.hidden_activation_func = get_activation_function(activation) self.gate_activation_func = get_activation_function(gate_activation) ########## # inputs # ########## # inputs are expected to have the shape (n_timesteps, batch_size, data) if len(self.inputs) > 1: raise NotImplementedError( "Expected 1 input, found %d. Please merge inputs before passing " "to the model!" % len(self.inputs)) # self.inputs is a list of all the input expressions (we enforce only 1, so self.inputs[0] is the input) input_shape, self.input = self.inputs[0] if isinstance(input_shape, int): self.input_size = ((None, ) * (self.input.ndim - 1)) + (input_shape, ) else: self.input_size = input_shape assert self.input_size is not None, "Need to specify the shape for at least the last dimension of the input!" # input is 3D tensor of (timesteps, batch_size, data_dim) # if input is 2D tensor, assume it is of the form (timesteps, data_dim) i.e. batch_size is 1. Convert to 3D. # if input is > 3D tensor, assume it is of form (timesteps, batch_size, data...) and flatten to 3D. if self.input.ndim == 1: self.input = unbroadcast(self.input.dimshuffle(0, 'x', 'x'), [1, 2]) elif self.input.ndim == 2: self.input = unbroadcast(self.input.dimshuffle(0, 'x', 1), 1) elif self.input.ndim > 3: flat_in = Flatten((self.input_size, self.input), ndim=3) self.input = flat_in.get_outputs() self.input_size = flat_in.output_size ########### # hiddens # ########### # have only 1 hiddens assert len( self.hiddens) == 1, "Expected 1 `hiddens` param, found %d" % len( self.hiddens) self.hiddens = self.hiddens[0] # if hiddens is an int (hidden size parameter, not routing info) h_init = None if isinstance(self.hiddens, int): self.hidden_size = self.hiddens elif isinstance(self.hiddens, tuple): hidden_shape, h_init = self.hiddens if isinstance(hidden_shape, int): self.hidden_size = hidden_shape else: self.hidden_size = hidden_shape[-1] else: raise AssertionError( "Hiddens need to be an int or tuple of (shape, theano_expression), found %s" % type(self.hiddens)) # output shape is going to be 3D with (timesteps, batch_size, hidden_size) self.output_size = (None, None, self.hidden_size) ########################################################## # parameters - make sure to deal with params dict input! # ########################################################## # all input-to-hidden weights W_c, W_i, W_f, W_o = [ self.params.get( "W_%s" % sub, get_weights( weights_init=weights_init, shape=(self.input_size[-1], self.hidden_size), name="W_%s" % sub, # if gaussian mean=weights_mean, std=weights_std, # if uniform interval=weights_interval)) for sub in ['c', 'i', 'f', 'o'] ] # all hidden-to-hidden weights U_c, U_i, U_f, U_o = [ self.params.get( "U_%s" % sub, get_weights( weights_init=r_weights_init, shape=(self.hidden_size, self.hidden_size), name="U_%s" % sub, # if gaussian mean=r_weights_mean, std=r_weights_std, # if uniform interval=r_weights_interval)) for sub in ['c', 'i', 'f', 'o'] ] # if bidirectional, make hidden-to-hidden weights again to go the opposite direction U_c_b, U_i_b, U_f_b, U_o_b = None, None, None, None if bidirectional: U_c_b, U_i_b, U_f_b, U_o_b = [ self.params.get( "U_%s_b" % sub, get_weights( weights_init=r_weights_init, shape=(self.hidden_size, self.hidden_size), name="U_%s_b" % sub, # if gaussian mean=r_weights_mean, std=r_weights_std, # if uniform interval=r_weights_interval)) for sub in ['c', 'i', 'f', 'o'] ] # biases b_c, b_i, b_f, b_o = [ self.params.get( "b_%s" % sub, get_bias(shape=(self.hidden_size, ), name="b_%s" % sub, init_values=r_bias_init)) for sub in ['c', 'i', 'f', 'o'] ] # clip gradients if we are doing that recurrent_params = [U_c, U_i, U_f, U_o, U_c_b, U_i_b, U_f_b, U_o_b] if clip_recurrent_grads: clip = abs(clip_recurrent_grads) U_c, U_i, U_f, U_o, U_c_b, U_i_b, U_f_b, U_o_b = [ grad_clip(param, -clip, clip) if param is not None else None for param in recurrent_params ] # put all the parameters into our dictionary self.params = { "W_c": W_c, "W_i": W_i, "W_f": W_f, "W_o": W_o, "U_c": U_c, "U_i": U_i, "U_f": U_f, "U_o": U_o, "b_c": b_c, "b_i": b_i, "b_f": b_f, "b_o": b_o, } if bidirectional: self.params.update({ "U_c_b": U_c_b, "U_i_b": U_i_b, "U_f_b": U_f_b, "U_o_b": U_o_b, }) # make h_init the right sized tensor if h_init is None: h_init = zeros_like(dot(self.input[0], W_c)) c_init = zeros_like(dot(self.input[0], W_c)) ############### # computation # ############### # move some computation outside of scan to speed it up! x_c = dot(self.input, W_c) + b_c x_i = dot(self.input, W_i) + b_i x_f = dot(self.input, W_f) + b_f x_o = dot(self.input, W_o) + b_o # now do the recurrent stuff (self.hiddens, _), self.updates = scan(fn=self.recurrent_step, sequences=[x_c, x_i, x_f, x_o], outputs_info=[h_init, c_init], non_sequences=[U_c, U_i, U_f, U_o], go_backwards=backward, name="lstm_scan", strict=True) # if bidirectional, do the same in reverse! if bidirectional: (hiddens_b, _), updates_b = scan(fn=self.recurrent_step, sequences=[x_c, x_i, x_f, x_o], outputs_info=[h_init, c_init], non_sequences=[U_c_b, U_i_b, U_f_b, U_o_b], go_backwards=not backward, name="lstm_scan_back", strict=True) # flip the hiddens to be the right direction hiddens_b = hiddens_b[::-1] # update stuff self.updates.update(updates_b) self.hiddens += hiddens_b log.info("Initialized an LSTM!")
def recurrent_apply(brick, application, application_call, *args, **kwargs): """Iterates a transition function. Parameters ---------- iterate : bool If ``True`` iteration is made. By default ``True``. reverse : bool If ``True``, the sequences are processed in backward direction. ``False`` by default. return_initial_states : bool If ``True``, initial states are included in the returned state tensors. ``False`` by default. """ # Extract arguments related to iteration and immediately relay the # call to the wrapped function if `iterate=False` iterate = kwargs.pop('iterate', True) if not iterate: return application_function(brick, *args, **kwargs) reverse = kwargs.pop('reverse', False) return_initial_states = kwargs.pop('return_initial_states', False) # Push everything to kwargs for arg, arg_name in zip(args, arg_names): kwargs[arg_name] = arg # Make sure that all arguments for scan are tensor variables scan_arguments = (application.sequences + application.states + application.contexts) for arg in scan_arguments: if arg in kwargs: if kwargs[arg] is None: del kwargs[arg] else: kwargs[arg] = tensor.as_tensor_variable(kwargs[arg]) # Check which sequence and contexts were provided sequences_given = dict_subset(kwargs, application.sequences, must_have=False) contexts_given = dict_subset(kwargs, application.contexts, must_have=False) # Determine number of steps and batch size. if len(sequences_given): # TODO Assumes 1 time dim! shape = list(sequences_given.values())[0].shape n_steps = shape[0] batch_size = shape[1] else: # TODO Raise error if n_steps and batch_size not found? n_steps = kwargs.pop('n_steps') batch_size = kwargs.pop('batch_size') # Handle the rest kwargs rest_kwargs = { key: value for key, value in kwargs.items() if key not in scan_arguments } for value in rest_kwargs.values(): if (isinstance(value, Variable) and not is_shared_variable(value)): logger.warning("unknown input {}".format(value) + unknown_scan_input) # Ensure that all initial states are available. initial_states = brick.initial_states(batch_size, as_dict=True, *args, **kwargs) for state_name in application.states: dim = brick.get_dim(state_name) if state_name in kwargs: if isinstance(kwargs[state_name], NdarrayInitialization): kwargs[state_name] = tensor.alloc( kwargs[state_name].generate(brick.rng, (1, dim)), batch_size, dim) elif isinstance(kwargs[state_name], Application): kwargs[state_name] = (kwargs[state_name](state_name, batch_size, *args, **kwargs)) else: try: kwargs[state_name] = initial_states[state_name] except KeyError: raise KeyError( "no initial state for '{}' of the brick {}".format( state_name, brick.name)) states_given = dict_subset(kwargs, application.states) # Theano issue 1772 for name, state in states_given.items(): states_given[name] = tensor.unbroadcast( state, *range(state.ndim)) def scan_function(*args): args = list(args) arg_names = (list(sequences_given) + [ output for output in application.outputs if output in application.states ] + list(contexts_given)) kwargs = dict(equizip(arg_names, args)) kwargs.update(rest_kwargs) outputs = application(iterate=False, **kwargs) # We want to save the computation graph returned by the # `application_function` when it is called inside the # `theano.scan`. application_call.inner_inputs = args application_call.inner_outputs = pack(outputs) return outputs outputs_info = [ states_given[name] if name in application.states else None for name in application.outputs ] result, updates = theano.scan( scan_function, sequences=list(sequences_given.values()), outputs_info=outputs_info, non_sequences=list(contexts_given.values()), n_steps=n_steps, go_backwards=reverse, name='{}_{}_scan'.format(brick.name, application.application_name)) result = pack(result) if return_initial_states: # Undo Subtensor for i in range(len(states_given)): assert isinstance(result[i].owner.op, tensor.subtensor.Subtensor) result[i] = result[i].owner.inputs[0] if updates: application_call.updates = dict_union(application_call.updates, updates) return result
def minres(compute_Av, bs, rtol=constantX(1e-6), maxit=20, Ms=None, shift=constantX(0.), maxxnorm=constantX(1e15), Acondlim=constantX(1e16), profile=0): """ Attempts to find the minimum-length and minimum-residual-norm solution :math:`x` to the system of linear equations :math:`A*x = b` or least squares problem :math:`\\min||Ax-b||`. The n-by-n coefficient matrix A must be symmetric (but need not be positive definite or invertible). The right-hand-side column vector b must have length n. Parameters ---------- compute_Av : callable Callable returing the symbolic expression for \ `Av` (the product of matrix A with some vector v). \ `v` should be a list of tensors, where the vector v means \ the vector obtain by concatenating and flattening all tensors in v bs : list List of Theano expressions. We are looking to compute `A^-1\dot bs`. rtol : float, optional Specifies the tolerance of the method. Default is 1e-6. maxit : int, positive, optional Specifies the maximum number of iterations. Default is 20. Ms : list List of theano expression of same shape as `bs`. The method uses \ these to precondition with diag(Ms) shift : float, optional Default is 0. Effectively solve the system (A - shift I) * x = b. maxxnorm : float, positive, optional Maximum bound on NORM(x). Default is 1e14. Acondlim : float, positive, optional Maximum bound on COND(A). Default is 1e15. show : bool If True, show iterations, otherwise suppress outputs. Default is \ False. Returns ------- x : list List of Theano tensor representing the solution flag : tensor_like Theano int scalar - convergence flag * 0 beta1 = 0. The exact solution is x = 0. * 1 A solution to (poss. singular) Ax = b found, given rtol. * 2 Pseudoinverse solution for singular LS problem, given rtol. * 3 A solution to (poss. singular) Ax = b found, given eps. * 4 Pseudoinverse solution for singular LS problem, given eps. * 5 x has converged to an eigenvector. * 6 xnorm has exceeded maxxnorm. * 7 Acond has exceeded Acondlim. * 8 The iteration limit was reached. * 9/10 It is a least squares problem but no converged solution yet. iter : int Iteration number at which x was computed: `0 <= iter <= maxit`. relres : float Real positive, the relative residual is defined as NORM(b-A*x)/(NORM(A) * NORM(x) + NORM(b)), computed recurrently here. If flag is 1 or 3, relres <= TOL. relAres : float Real positive, the relative-NORM(Ar) := NORM(Ar) / NORM(A) computed recurrently here. If flag is 2 or 4, relAres <= TOL. Anorm : float Real positive, estimate of matrix 2-norm of A. Acond : float Real positive, estimate of condition number of A with respect to 2-norm. xnorm : float Non-negative positive, recurrently computed NORM(x) Axnorm : float Non-negative positive, recurrently computed NORM(A * x). See Also -------- Sou-Cheng Choi's PhD Dissertation, Stanford University, 2006. http://www.stanford.edu/group/SOL/software.html """ if not isinstance(bs, (tuple, list)): bs = [bs] return_as_list = False else: bs = list(bs) return_as_list = True eps = constantX(1e-23) # Initialise beta1 = sqrt_inner_product(bs) #------------------------------------------------------------------ # Set up p and v for the first Lanczos vector v1. # p = beta1 P' v1, where P = C**(-1). # v is really P' v1. #------------------------------------------------------------------ r3s = [b for b in bs] r2s = [b for b in bs] r1s = [b for b in bs] if Ms is not None: r3s = [b / m for b, m in zip(bs, Ms)] beta1 = sqrt_inner_product(r3s, bs) #------------------------------------------------------------------ ## Initialize other quantities. # Note that Anorm has been initialized by IsOpSym6. # ------------------------------------------------------------------ bnorm = beta1 n_params = len(bs) def loop(niter, beta, betan, phi, Acond, cs, dbarn, eplnn, rnorm, sn, Tnorm, rnorml, xnorm, Dnorm, gamma, pnorm, gammal, Axnorm, relrnorm, relArnorml, Anorm, flag, *args): #----------------------------------------------------------------- ## Obtain quantities for the next Lanczos vector vk+1, k = 1, 2,... # The general iteration is similar to the case k = 1 with v0 = 0: # # p1 = Operator * v1 - beta1 * v0, # alpha1 = v1'p1, # q2 = p2 - alpha1 * v1, # beta2^2 = q2'q2, # v2 = (1/beta2) q2. # # Again, p = betak P vk, where P = C**(-1). # .... more description needed. #----------------------------------------------------------------- xs = args[0 * n_params:1 * n_params] r1s = args[1 * n_params:2 * n_params] r2s = args[2 * n_params:3 * n_params] r3s = args[3 * n_params:4 * n_params] dls = args[4 * n_params:5 * n_params] ds = args[5 * n_params:6 * n_params] betal = beta beta = betan vs = [r3 / beta for r3 in r3s] r3s, upds = compute_Av(*vs) r3s = [r3 - shift * v for r3, v in zip(r3s, vs)] r3s = [ TT.switch(TT.ge(niter, constantX(1.)), r3 - (beta / betal) * r1, r3) for r3, r1 in zip(r3s, r1s) ] alpha = inner_product(r3s, vs) r3s = [r3 - (alpha / beta) * r2 for r3, r2 in zip(r3s, r2s)] r1s = [r2 for r2 in r2s] r2s = [r3 for r3 in r3s] if Ms is not None: r3s = [r3 / M for r3, M in zip(r3s, Ms)] betan = sqrt_inner_product(r2s, r3s) else: betan = sqrt_inner_product(r3s) pnorml = pnorm pnorm = TT.switch( TT.eq(niter, constantX(0.)), TT.sqrt(TT.sqr(alpha) + TT.sqr(betan)), TT.sqrt(TT.sqr(alpha) + TT.sqr(betan) + TT.sqr(beta))) #----------------------------------------------------------------- ## Apply previous rotation Qk-1 to get # [dlta_k epln_{k+1}] = [cs sn][dbar_k 0 ] # [gbar_k dbar_{k+1} ] [sn -cs][alpha_k beta_{k+1}]. #----------------------------------------------------------------- dbar = dbarn epln = eplnn dlta = cs * dbar + sn * alpha gbar = sn * dbar - cs * alpha eplnn = sn * betan dbarn = -cs * betan ## Compute the current plane rotation Qk gammal2 = gammal gammal = gamma cs, sn, gamma = symGivens2(gbar, betan) tau = cs * phi phi = sn * phi Axnorm = TT.sqrt(TT.sqr(Axnorm) + TT.sqr(tau)) # Update d dl2s = [dl for dl in dls] dls = [d for d in ds] ds = [ TT.switch(TT.neq(gamma, constantX(0.)), (v - epln * dl2 - dlta * dl) / gamma, v) for v, dl2, dl in zip(vs, dl2s, dls) ] d_norm = TT.switch(TT.neq(gamma, constantX(0.)), sqrt_inner_product(ds), constantX(numpy.inf)) # Update x except if it will become too big xnorml = xnorm dl2s = [x for x in xs] xs = [x + tau * d for x, d in zip(xs, ds)] xnorm = sqrt_inner_product(xs) xs = [ TT.switch(TT.ge(xnorm, maxxnorm), dl2, x) for dl2, x in zip(dl2s, xs) ] flag = TT.switch(TT.ge(xnorm, maxxnorm), constantX(6.), flag) # Estimate various norms rnorml = rnorm # ||r_{k-1}|| Anorml = Anorm Acondl = Acond relrnorml = relrnorm flag_no_6 = TT.neq(flag, constantX(6.)) Dnorm = TT.switch(flag_no_6, TT.sqrt(TT.sqr(Dnorm) + TT.sqr(d_norm)), Dnorm) xnorm = TT.switch(flag_no_6, sqrt_inner_product(xs), xnorm) rnorm = TT.switch(flag_no_6, phi, rnorm) relrnorm = TT.switch(flag_no_6, rnorm / (Anorm * xnorm + bnorm), relrnorm) Tnorm = TT.switch( flag_no_6, TT.switch( TT.eq(niter, constantX(0.)), TT.sqrt(TT.sqr(alpha) + TT.sqr(betan)), TT.sqrt( TT.sqr(Tnorm) + TT.sqr(beta) + TT.sqr(alpha) + TT.sqr(betan))), Tnorm) Anorm = TT.maximum(Anorm, pnorm) Acond = Anorm * Dnorm rootl = TT.sqrt(TT.sqr(gbar) + TT.sqr(dbarn)) Anorml = rnorml * rootl relArnorml = rootl / Anorm #--------------------------------------------------------------- # See if any of the stopping criteria are satisfied. # In rare cases, flag is already -1 from above (Abar = const*I). #--------------------------------------------------------------- epsx = Anorm * xnorm * eps epsr = Anorm * xnorm * rtol #Test for singular Hk (hence singular A) # or x is already an LS solution (so again A must be singular). t1 = constantX(1) + relrnorm t2 = constantX(1) + relArnorml flag = TT.switch( TT.bitwise_or(TT.eq(flag, constantX(0)), TT.eq(flag, constantX(6))), multiple_switch(TT.le(t1, constantX(1)), constantX(3), TT.le(t2, constantX(1)), constantX(4), TT.le(relrnorm, rtol), constantX(1), TT.le(Anorm, constantX(1e-20)), constantX(12), TT.le(relArnorml, rtol), constantX(10), TT.ge(epsx, beta1), constantX(5), TT.ge(xnorm, maxxnorm), constantX(6), TT.ge(niter, TT.cast(maxit, theano.config.floatX)), constantX(8), flag), flag) flag = TT.switch(TT.lt(Axnorm, rtol * Anorm * xnorm), constantX(11.), flag) return [niter + constantX(1.), beta, betan, phi, Acond, cs, dbarn, eplnn, rnorm, sn, Tnorm, rnorml, xnorm, Dnorm, gamma, pnorm, gammal, Axnorm, relrnorm, relArnorml, Anorm, flag] + xs + r1s + r2s + r3s + dls + ds, upds, \ theano.scan_module.scan_utils.until(TT.neq(flag, 0)) states = [] # 0 niter states.append(constantX([0])) # 1 beta states.append(constantX([0])) # 2 betan states.append(TT.unbroadcast(TT.shape_padleft(beta1), 0)) # 3 phi states.append(TT.unbroadcast(TT.shape_padleft(beta1), 0)) # 4 Acond states.append(constantX([1])) # 5 cs states.append(constantX([-1])) # 6 dbarn states.append(constantX([0])) # 7 eplnn states.append(constantX([0])) # 8 rnorm states.append(TT.unbroadcast(TT.shape_padleft(beta1), 0)) # 9 sn states.append(constantX([0])) # 10 Tnorm states.append(constantX([0])) # 11 rnorml states.append(TT.unbroadcast(TT.shape_padleft(beta1), 0)) # 12 xnorm states.append(constantX([0])) # 13 Dnorm states.append(constantX([0])) # 14 gamma states.append(constantX([0])) # 15 pnorm states.append(constantX([0])) # 16 gammal states.append(constantX([0])) # 17 Axnorm states.append(constantX([0])) # 18 relrnorm states.append(constantX([1])) # 19 relArnorml states.append(constantX([1])) # 20 Anorm states.append(constantX([0])) # 21 flag states.append(constantX([0])) xs = [TT.unbroadcast(TT.shape_padleft(TT.zeros_like(b)), 0) for b in bs] ds = [TT.unbroadcast(TT.shape_padleft(TT.zeros_like(b)), 0) for b in bs] dls = [TT.unbroadcast(TT.shape_padleft(TT.zeros_like(b)), 0) for b in bs] r1s = [TT.unbroadcast(TT.shape_padleft(r1), 0) for r1 in r1s] r2s = [TT.unbroadcast(TT.shape_padleft(r2), 0) for r2 in r2s] r3s = [TT.unbroadcast(TT.shape_padleft(r3), 0) for r3 in r3s] rvals, loc_updates = scan(loop, states=states + xs + r1s + r2s + r3s + dls + ds, n_steps=maxit + numpy.int32(1), name='minres', profile=profile, mode=theano.Mode(linker='cvm')) assert isinstance(loc_updates, dict) and 'Ordered' in str( type(loc_updates)) niters = TT.cast(rvals[0][0], 'int32') flag = TT.cast(rvals[21][0], 'int32') relres = rvals[18][0] relAres = rvals[19][0] Anorm = rvals[20][0] Acond = rvals[4][0] xnorm = rvals[12][0] Axnorm = rvals[17][0] sol = [x[0] for x in rvals[22:22 + n_params]] return (sol, flag, niters, relres, relAres, Anorm, Acond, xnorm, Axnorm, loc_updates)
def rnn(step_function, inputs, initial_states, go_backwards=False, mask=None, constants=None, unroll=False, input_length=None): ndim = inputs.ndim assert ndim >= 3, 'Input should be at least 3D.' if unroll: if input_length is None: raise Exception('When specifying `unroll=True`, an `input_length` ' 'must be provided to `rnn`.') axes = [1, 0] + list(range(2, ndim)) inputs = inputs.dimshuffle(axes) if constants is None: constants = [] if mask is not None: if mask.ndim == ndim - 1: mask = K.expand_dims(mask) assert mask.ndim == ndim mask = mask.dimshuffle(axes) if unroll: indices = list(range(input_length)) if go_backwards: indices = indices[::-1] successive_outputs = [] successive_states = [] states = initial_states for i in indices: output, new_states = step_function(inputs[i], states + constants) if len(successive_outputs) == 0: prev_output = K.zeros_like(output) else: prev_output = successive_outputs[-1] output = T.switch(mask[i], output, prev_output) kept_states = [] for state, new_state in zip(states, new_states): kept_states.append(T.switch(mask[i], new_state, state)) states = kept_states successive_outputs.append(output) successive_states.append(states) outputs = T.stack(*successive_outputs) states = [] for i in range(len(successive_states[-1])): states.append( T.stack(*[ states_at_step[i] for states_at_step in successive_states ])) else: # build an all-zero tensor of shape (samples, output_dim) initial_output = step_function(inputs[0], initial_states + constants)[0] * 0 # Theano gets confused by broadcasting patterns in the scan op initial_output = T.unbroadcast(initial_output, 0, 1) def _step(input, mask, output_tm1, *states): output, new_states = step_function(input, states) # output previous output if masked. output = T.switch(mask, output, output_tm1) return_states = [] for state, new_state in zip(states, new_states): return_states.append(T.switch(mask, new_state, state)) return [output] + return_states results, _ = theano.scan(_step, sequences=[inputs, mask], outputs_info=[initial_output] + initial_states, non_sequences=constants, go_backwards=go_backwards) # deal with Theano API inconsistency if type(results) is list: outputs = results[0] states = results[1:] else: outputs = results states = [] else: if unroll: indices = list(range(input_length)) if go_backwards: indices = indices[::-1] successive_outputs = [] successive_states = [] states = initial_states for i in indices: output, states = step_function(inputs[i], states + constants) successive_outputs.append(output) successive_states.append(states) outputs = T.stack(*successive_outputs) states = [] for i in range(len(successive_states[-1])): states.append( T.stack(*[ states_at_step[i] for states_at_step in successive_states ])) else: def _step(input, *states): output, new_states = step_function(input, states) return [output] + new_states results, _ = theano.scan(_step, sequences=inputs, outputs_info=[None] + initial_states, non_sequences=constants, go_backwards=go_backwards) # deal with Theano API inconsistency if type(results) is list: outputs = results[0] states = results[1:] else: outputs = results states = [] outputs = T.squeeze(outputs) last_output = outputs[-1] axes = [1, 0] + list(range(2, outputs.ndim)) outputs = outputs.dimshuffle(axes) states = [T.squeeze(state.dimshuffle(axes)) for state in states] return last_output, outputs, states
def apply(self, state_below, mask_below=None, init_state=None, init_context=None, c=None, c_mask=None, one_step=False, # added by Zhaopeng Tu, 2016-04-29 cov_before=None, fertility=None): # assert c, 'Context must be provided' # assert c.ndim == 3, 'Context must be 3-d: n_seq * batch_size * dim' # state_below: n_steps * batch_size/1 * embedding if state_below.ndim == 3: n_steps = state_below.shape[0] batch_size = state_below.shape[1] else: batch_size = 1 # mask if mask_below is None: #sampling or beamsearch mask_below = T.alloc(numpy.float32(1.), state_below.shape[0], 1) if one_step: assert init_state, 'previous state mush be provided' if init_state is None: init_state = self.create_init_state(init_context) state_below_xh = T.dot(state_below, self.W_xh) state_below_xz = T.dot(state_below, self.W_xz) state_below_xr = T.dot(state_below, self.W_xr) if self.with_attention: # for attention model p_from_c = T.dot(c, self.A_cp).reshape((c.shape[0], c.shape[1], self.n_hids)) else: c_z = T.dot(init_context, self.W_cz) c_r = T.dot(init_context, self.W_cr) c_h = T.dot(init_context, self.W_ch) if one_step: if self.with_attention: return self._step_attention(state_below_xh, state_below_xz, state_below_xr, \ mask_below, init_state, c, c_mask, p_from_c, \ # added by Zhaopeng Tu, 2016-06-08 cov_tm1=cov_before, fertility=fertility) else: return self._step_context(state_below_xh, state_below_xz, state_below_xr, \ mask_below, init_state, c_z, c_r, c_h, init_context) else: sequences = [state_below_xh, state_below_xz, state_below_xr, mask_below] # decoder hidden state outputs_info = [init_state] if self.with_attention: non_sequences = [c, c_mask, p_from_c] # added by Zhaopeng Tu, 2016-04-29 # ctx, probs outputs_info += [None, None] if self.with_coverage: # initialization for coverage init_cov = T.unbroadcast(T.zeros((c.shape[0], c.shape[1], self.coverage_dim), dtype='float32'), 2) outputs_info.append(init_cov) # fertility is not constructed outside when training if self.coverage_type is 'linguistic': fertility = self._get_fertility(c) else: fertility = T.zeros((c.shape[0], c.shape[1]), dtype='float32') non_sequences.append(fertility) # modified by Zhaopeng Tu, 2016-05-02 # rval, updates = theano.scan(self._step_attention, if not self.with_coverage: # seqs | out | non_seqs fn = lambda x_h, x_z, x_r, x_m, h_tm1, c, c_m, p_from_c : self._step_attention(x_h, x_z, x_r, x_m, h_tm1, c, c_m, p_from_c) else: # seqs | out | non_seqs fn = lambda x_h, x_z, x_r, x_m, h_tm1, cov_tm1, c, c_m, p_from_c, fertility : self._step_attention(x_h, x_z, x_r, x_m, h_tm1, c, c_m, p_from_c, cov_tm1=cov_tm1, fertility=fertility) else: non_sequences = [c_z, c_r, c_h, init_context] # seqs | out | non_seqs fn = lambda x_h, x_z, x_r, x_m, h_tm1, cz, cr, ch, ctx : self._step_context(x_h, x_z, x_r, x_m, h_tm1, cz, cr, ch, ctx) rval, updates = theano.scan(fn, sequences=sequences, non_sequences=non_sequences, # outputs_info=[init_state, None], outputs_info=outputs_info, name=_p(self.pname, 'layers'), n_steps=n_steps) self.output = rval return self.output