class PtbMiniBatchesGenerator(object): def __init__(self, ptb_train, ptb_valid, batch_size, sentence_max_len, device_id): self.blocking_contexts = None self.context = Context(device_id) device_id = self.context.device_id self.train_offsets = HomogeneousDataGenerator(ptb_train, batch_size, sentence_max_len, randomize=True, infinite=True) self.valid_offsets = HomogeneousDataGenerator(ptb_valid, batch_size, sentence_max_len) train_sentences = np.array([self.train_offsets.flatten_sentences]) valid_sentences = np.array([self.valid_offsets.flatten_sentences]) self.train_sents = Matrix.from_npa(train_sentences, 'int', device_id) self.valid_sents = Matrix.from_npa(valid_sentences, 'int', device_id) self._sent_lengths = np.empty((batch_size, 1), dtype=np.int32, order='F')[...] self.sent_lengths = Matrix.from_npa(self._sent_lengths, device_id=device_id) sentence_batch = Matrix.empty(batch_size, sentence_max_len, 'int', device_id) self.sentence_batch = Connector(sentence_batch, self.context) self.sentence_batch.sync_fill(0) self._mask = Matrix.empty(sentence_batch.nrows, self.sentence_batch.ncols, 'float', device_id) self.mask = List([Connector(self._mask[:, i]) for i in xrange(sentence_max_len)], self.sentence_batch.ncols) self.train_offsets_iterator = iter(self.train_offsets) self.valid_offsets_iterator = iter(self.valid_offsets) self.training_mode = True def set_training_mode(self): self.training_mode = True def set_testing_mode(self): self.training_mode = False def fprop(self): if self.training_mode: offsets = next(self.train_offsets_iterator) sents = self.train_sents else: try: offsets = next(self.valid_offsets_iterator) sents = self.valid_sents except StopIteration as e: self.valid_offsets_iterator = iter(self.valid_offsets) raise e self.context.wait(*self.blocking_contexts) self._sent_lengths = self._sent_lengths.base[:len(offsets)] self.sentence_batch.nrows = len(offsets) for k, offset in enumerate(offsets): self.sentence_batch[k].assign(self.context, sents[:, offset[0]:offset[1]]) self._sent_lengths[k] = offset[1] - offset[0] max_sent_len = int(np.max(self._sent_lengths)) self.sentence_batch.last_modification_context = self.context self.sentence_batch.ncols = max_sent_len self.sent_lengths.assign_npa(self.context, self._sent_lengths) self._mask.mask_column_numbers_row_wise(self.context, self.sent_lengths) for e in self.mask: e.last_modification_context = self.context self.sentence_batch.fprop() self.mask.fprop()
class DataBlock(object): def __init__(self, data, char_to_idx, batch_size, x_device_id, y_device_id): self.data = HomogeneousDataIterator(data, char_to_idx, batch_size, True, True) self.data_iterator = iter(self.data) self.x_context = Context(x_device_id) self.y_context = Context(y_device_id) max_len = 0 for sub_line in data: cur_len = len(sub_line) if cur_len > max_len: max_len = cur_len print max_len self.x = Connector( Matrix.empty(batch_size, max_len - 1, 'int', x_device_id)) self._y = Matrix.empty(batch_size, max_len - 1, 'int', y_device_id) self.y = List([Connector(self._y[:, i]) for i in xrange(max_len - 1)], self.x.ncols) self.lengths = Matrix.empty(self.x.nrows, 1, 'int', x_device_id) self._mask = Matrix.empty(self.x.nrows, self.x.ncols, 'float', x_device_id) self.mask = List( [Connector(self._mask[:, i]) for i in xrange(max_len)], self.x.ncols) self.blocking_contexts = None def fprop(self): self.x_context.wait(*self.blocking_contexts) self.y_context.wait(*self.blocking_contexts) data = next(self.data_iterator) lengths_npa = np.array([[len(e) - 1] for e in data], np.int32, order='F') x_npa = np.zeros((len(data), int(np.max(lengths_npa))), np.int32, 'F') for k, e in enumerate(data): x_npa[k, :len(e) - 1] = e[:-1] self.x.assign_npa(self.x_context, x_npa) y_npa = np.zeros((len(data), int(np.max(lengths_npa))), np.int32, 'F') for k, e in enumerate(data): y_npa[k, :len(e) - 1] = e[1:] self._y.assign_npa(self.y_context, y_npa) for e in self.y: e.last_modification_context = self.y_context self.lengths.assign_npa(self.x_context, lengths_npa) self._mask.mask_column_numbers_row_wise(self.x_context, self.lengths) for e in self.mask: e.last_modification_context = self.x_context self.x.fprop() self.y.fprop() self.mask.fprop()
class DataBlock(object): def __init__(self, data, char_to_idx, batch_size, x_device_id, y_device_id): self.data = HomogeneousDataIterator(data, char_to_idx, batch_size, True, True) self.data_iterator = iter(self.data) self.x_context = Context(x_device_id) self.y_context = Context(y_device_id) max_len = 0 for sub_line in data: cur_len = len(sub_line) if cur_len > max_len: max_len = cur_len print max_len self.x = Connector(Matrix.empty(batch_size, max_len - 1, 'int', x_device_id)) self._y = Matrix.empty(batch_size, max_len - 1, 'int', y_device_id) self.y = List([Connector(self._y[:, i]) for i in xrange(max_len - 1)], self.x.ncols) self.lengths = Matrix.empty(self.x.nrows, 1, 'int', x_device_id) self._mask = Matrix.empty(self.x.nrows, self.x.ncols, 'float', x_device_id) self.mask = List([Connector(self._mask[:, i]) for i in xrange(max_len)], self.x.ncols) self.blocking_contexts = None def fprop(self): self.x_context.wait(*self.blocking_contexts) self.y_context.wait(*self.blocking_contexts) data = next(self.data_iterator) lengths_npa = np.array([[len(e) - 1] for e in data], np.int32, order='F') x_npa = np.zeros((len(data), int(np.max(lengths_npa))), np.int32, 'F') for k, e in enumerate(data): x_npa[k, :len(e) - 1] = e[:-1] self.x.assign_npa(self.x_context, x_npa) y_npa = np.zeros((len(data), int(np.max(lengths_npa))), np.int32, 'F') for k, e in enumerate(data): y_npa[k, :len(e) - 1] = e[1:] self._y.assign_npa(self.y_context, y_npa) for e in self.y: e.last_modification_context = self.y_context self.lengths.assign_npa(self.x_context, lengths_npa) self._mask.mask_column_numbers_row_wise(self.x_context, self.lengths) for e in self.mask: e.last_modification_context = self.x_context self.x.fprop() self.y.fprop() self.mask.fprop()
class PtbMiniBatchesGenerator(object): def __init__(self, ptb_train, ptb_valid, batch_size, sentence_max_len, device_id): self.blocking_contexts = None self.context = Context(device_id) device_id = self.context.device_id self.train_offsets = HomogeneousDataGenerator(ptb_train, batch_size, sentence_max_len, randomize=True, infinite=True) self.valid_offsets = HomogeneousDataGenerator(ptb_valid, batch_size, sentence_max_len) train_sentences = np.array([self.train_offsets.flatten_sentences]) valid_sentences = np.array([self.valid_offsets.flatten_sentences]) self.train_sents = Matrix.from_npa(train_sentences, 'int', device_id) self.valid_sents = Matrix.from_npa(valid_sentences, 'int', device_id) self._sent_lengths = np.empty((batch_size, 1), dtype=np.int32, order='F')[...] self.sent_lengths = Matrix.from_npa(self._sent_lengths, device_id=device_id) sentence_batch = Matrix.empty(batch_size, sentence_max_len, 'int', device_id) self.sentence_batch = Connector(sentence_batch, self.context) self.sentence_batch.sync_fill(0) self._mask = Matrix.empty(sentence_batch.nrows, self.sentence_batch.ncols, 'float', device_id) self.mask = List( [Connector(self._mask[:, i]) for i in xrange(sentence_max_len)], self.sentence_batch.ncols) self.train_offsets_iterator = iter(self.train_offsets) self.valid_offsets_iterator = iter(self.valid_offsets) self.training_mode = True def set_training_mode(self): self.training_mode = True def set_testing_mode(self): self.training_mode = False def fprop(self): if self.training_mode: offsets = next(self.train_offsets_iterator) sents = self.train_sents else: try: offsets = next(self.valid_offsets_iterator) sents = self.valid_sents except StopIteration as e: self.valid_offsets_iterator = iter(self.valid_offsets) raise e self.context.wait(*self.blocking_contexts) self._sent_lengths = self._sent_lengths.base[:len(offsets)] self.sentence_batch.nrows = len(offsets) for k, offset in enumerate(offsets): self.sentence_batch[k].assign(self.context, sents[:, offset[0]:offset[1]]) self._sent_lengths[k] = offset[1] - offset[0] max_sent_len = int(np.max(self._sent_lengths)) self.sentence_batch.last_modification_context = self.context self.sentence_batch.ncols = max_sent_len self.sent_lengths.assign_npa(self.context, self._sent_lengths) self._mask.mask_column_numbers_row_wise(self.context, self.sent_lengths) for e in self.mask: e.last_modification_context = self.context self.sentence_batch.fprop() self.mask.fprop()
class MnistMiniBatchesGenerator(object): def __init__(self, train_x, train_y, valid_x, valid_y, batch_size, device_id): self.context = Context(device_id) device_id = self.context.device_id self.train_x = Matrix.from_npa(train_x.T.astype(np.float32), device_id=device_id) self.valid_x = Matrix.from_npa(valid_x.T.astype(np.float32), device_id=device_id) self.train_y = Matrix.from_npa(train_y[:, np.newaxis], 'int', device_id=device_id) self.valid_y = Matrix.from_npa(valid_y[:, np.newaxis], 'int', device_id=device_id) self.batch_size = batch_size x = Matrix.empty(self.batch_size, self.train_x.nrows, device_id=device_id) y = Matrix.empty(self.batch_size, 1, 'int', device_id) self.x = Connector(x) self.y = Connector(y) self.train_indices = np.arange(int(self.train_x.ncols), dtype=np.int32) self.valid_indices = np.arange(int(self.valid_x.ncols), dtype=np.int32) self.indices = Matrix.empty(self.batch_size, 1, 'int', device_id) self.rng = np.random.RandomState(42) self.rng.shuffle(self.train_indices) self.train_i = 0 self.valid_i = 0 self.training_mode = True self.blocking_contexts = None def set_training_mode(self): self.training_mode = True def set_testing_mode(self): self.training_mode = False def fprop(self): indices = self.train_indices if self.training_mode else self.valid_indices i = self.train_i if self.training_mode else self.valid_i x = self.train_x if self.training_mode else self.valid_x y = self.train_y if self.training_mode else self.valid_y indices = indices[self.batch_size * i:self.batch_size * (i + 1)] indices = np.asfortranarray(indices[:, np.newaxis]) if self.training_mode: self.train_i += 1 else: self.valid_i += 1 if indices.size: self.indices.assign_npa(self.context, indices) self.x.nrows = indices.size self.y.nrows = indices.size self.context.wait(*self.blocking_contexts) x.slice_columns_and_transpose(self.context, self.indices, self.x) y.slice_rows(self.context, self.indices, self.y) self.x.fprop() self.y.fprop() else: if self.training_mode: self.train_i = 0 self.rng.shuffle(self.train_indices) self.fprop() else: self.valid_i = 0 raise StopIteration()
class LstmBlock(object): """ A long short-term memory (LSTM) block. Parameters ---------- W R b grad_clipping x mask prev_c prev_h device_id : int Defines the device's id on which the computation will take place Returns ------- """ def __init__(self, W, R, b, grad_clipping, x, mask, prev_c, prev_h, device_id=None): self.f_context = Context(device_id) device_id = self.f_context.device_id if W.bpropagable: self.W, self.dL_dW = W.register_usage(device_id, device_id) self.W_b_context = Context(device_id) else: self.W = W.register_usage(device_id) if R.bpropagable: self.R, self.dL_dR = R.register_usage(device_id, device_id) self.R_b_context = Context(device_id) else: self.R = R.register_usage(device_id) if b.bpropagable: self.b, self.dL_db = b.register_usage(device_id, device_id) self.b_b_context = Context(device_id) else: self.b = b.register_usage(device_id) self.grad_clipping = grad_clipping if x.bpropagable: self.x, self.dL_dx = x.register_usage(device_id, device_id) self.x_b_context = Context(device_id) else: self.x = x.register_usage(device_id) if mask: self.mask = mask.register_usage(device_id) if prev_c.bpropagable: self.prev_c, self.dL_dprev_c = prev_c.register_usage(device_id, device_id) self.prev_c_b_context = Context(device_id) else: self.prev_c = prev_c.register_usage(device_id) if prev_h.bpropagable: self.prev_h, self.dL_dprev_h = prev_h.register_usage(device_id, device_id) self.prev_h_b_context = Context(device_id) else: self.prev_h = prev_h.register_usage(device_id) self.learning = W.bpropagable or R.bpropagable or x.bpropagable or \ prev_c.bpropagable or prev_h.bpropagable if self.learning: self.b_context = Context(device_id) dim = self.R.nrows batch_size = self.x.nrows self.zifo = Matrix.empty(batch_size, 4 * dim, device_id=device_id) self.z = self.zifo[:, 0*dim:1*dim] self.i = self.zifo[:, 1*dim:2*dim] self.f = self.zifo[:, 2*dim:3*dim] self.o = self.zifo[:, 3*dim:4*dim] self.c = Matrix.empty_like(self.prev_c, device_id) self.c = Connector(self.c, device_id if self.learning else None) self.tanh_c = Matrix.empty_like(self.c, device_id) self.h = Matrix.empty_like(self.c, device_id) self.h = Connector(self.h, device_id if self.learning else None) if self.learning: self._dzifo_dpre_zifo = Matrix.empty_like(self.zifo) self.dz_dpre_z = self._dzifo_dpre_zifo[:, 0*dim:1*dim] self.di_dpre_i = self._dzifo_dpre_zifo[:, 1*dim:2*dim] self.df_dpre_f = self._dzifo_dpre_zifo[:, 2*dim:3*dim] self.do_dpre_o = self._dzifo_dpre_zifo[:, 3*dim:4*dim] self.dL_dpre_zifo = self._dzifo_dpre_zifo self.dL_dpre_z = self.dz_dpre_z self.dL_dpre_i = self.di_dpre_i self.dL_dpre_f = self.df_dpre_f self.dL_dpre_o = self.do_dpre_o self._dtanh_c_dc = Matrix.empty_like(self.c) @property def dzifo_dpre_zifo(self): if self.learning: return self._dzifo_dpre_zifo @property def dtanh_c_dc(self): if self.learning: return self._dtanh_c_dc def fprop(self): # zifo = tanh_sigm(x[t] * W + h[t-1] * R + b) self.zifo.assign_dot(self.f_context, self.x, self.W) self.zifo.add_dot(self.f_context, self.prev_h, self.R) self.zifo.add(self.f_context, self.b) self.zifo.tanh_sigm(self.f_context, self.zifo, self.dzifo_dpre_zifo, axis=1) # c[t] = i[t] .* z[t] + f[t] .* c[t-1] # h[t] = o[t] .* tanh(c[t]) self.c.assign_sum_hprod(self.f_context, self.i, self.z, self.f, self.prev_c) self.c.tanh(self.f_context, self.tanh_c, self.dtanh_c_dc) self.h.assign_hprod(self.f_context, self.o, self.tanh_c) if hasattr(self, 'mask'): # s[t] = mask .* s[t] + (1 - mask) .* s[t-1] self.c.assign_masked_addition(self.f_context, self.mask, self.c, self.prev_c) self.h.assign_masked_addition(self.f_context, self.mask, self.h, self.prev_h) self.c.fprop() self.h.fprop() def bprop(self): dL_dc = self.c.backward_matrix dL_dh = self.h.backward_matrix if hasattr(self, 'mask'): # dL/ds[t-1] = (1 - mask) .* dL/ds[t] # dL/ds[t] = mask .* dL/ds[t] if hasattr(self, 'dL_dprev_c'): self.dL_dprev_c.add_hprod_one_minus_mask(self.prev_c_b_context, self.mask, dL_dc) dL_dc.hprod(self.prev_c_b_context, self.mask) if hasattr(self, 'dL_dprev_h'): self.dL_dprev_h.add_hprod_one_minus_mask(self.prev_h_b_context, self.mask, dL_dh) dL_dh.hprod(self.prev_h_b_context, self.mask) # dL/dc[t] = dL[t+1]/dc[t] + dL/dh[t] .* o[t] .* dtanh(c[t])/dc[t] dL_dc.add_hprod(self.b_context, dL_dh, self.o, self.dtanh_c_dc) # self.dzifo_dpre_zifo was calculated in self.f_context, # now we have to explicitly wait it in context self.b_context, because # self.dx_dpre_x does not have proper last_modif_context self.b_context.wait(self.f_context) # dL/dpre_o[t] = dL/dh[t] .* tanh(c[t]) .* do[t]/dpre_o[t] # dL/dpre_f[t] = dL/dc[t] .* c[t-1] .* df[t]/dpre_f[t] # dL/dpre_i[t] = dL/dc[t] .* z[t] .* di[t]/dpre_i[t] # dL/dpre_z[t] = dL/dc[t] .* i[t] .* dz[t]/dpre_z[t] self.dL_dpre_o.assign_hprod(self.b_context, dL_dh, self.tanh_c, self.do_dpre_o) self.dL_dpre_f.assign_hprod(self.b_context, dL_dc, self.prev_c, self.df_dpre_f) self.dL_dpre_i.assign_hprod(self.b_context, dL_dc, self.z, self.di_dpre_i) self.dL_dpre_z.assign_hprod(self.b_context, dL_dc, self.i, self.dz_dpre_z) if self.grad_clipping: self.dL_dpre_zifo.clip(self.b_context, -self.grad_clipping, self.grad_clipping) else: self.dL_dpre_zifo.last_modif_context = self.b_context if hasattr(self, 'dL_dW'): # dL_dW += x[t].T * dL/dpre_zifo[t] self.dL_dW.add_dot(self.W_b_context, self.x, self.dL_dpre_zifo, 'T') if hasattr(self, 'dL_dR'): # dL_dR += h[t-1].T * dL/dpre_zifo[t] self.dL_dR.add_dot(self.R_b_context, self.prev_h, self.dL_dpre_zifo, 'T') if hasattr(self, 'dL_db'): # dL_db += sum(dL/dpre_zifo[t], axis=0) self.dL_db.add_repeat_derivative(self.b_b_context, self.dL_dpre_zifo, self.dL_dpre_zifo.nrows, axis=0) if hasattr(self, 'dL_dx'): # dL/dx[t] = dL/dpre_zifo[t] * W.T self.dL_dx.add_dot(self.x_b_context, self.dL_dpre_zifo, self.W, 'N', 'T') if hasattr(self, 'dL_dprev_c'): # dL/dc[t-1] = f[t] .* dL/dc[t] self.dL_dprev_c.add_hprod(self.prev_c_b_context, self.f, dL_dc) if hasattr(self, 'dL_dprev_h'): # dL/dh[t-1] = dL/dpre_zifo[t] * R.T self.dL_dprev_h.add_dot(self.prev_h_b_context, self.dL_dpre_zifo, self.R, 'N', 'T')