def __call__(self, es): mask = es.mask # first layer forward_es = self.forward_layers[0](es) rev_backward_es = self.backward_layers[0]( ReversedExpressionSequence(es)) for layer_i in range(1, len(self.forward_layers)): new_forward_es = self.forward_layers[layer_i]( [forward_es, ReversedExpressionSequence(rev_backward_es)]) rev_backward_es = ExpressionSequence(self.backward_layers[layer_i]( [ReversedExpressionSequence(forward_es), rev_backward_es]).as_list(), mask=mask) forward_es = new_forward_es self._final_states = [FinalTransducerState(dy.concatenate([self.forward_layers[layer_i].get_final_states()[0].main_expr(), self.backward_layers[layer_i].get_final_states()[0].main_expr()]), dy.concatenate([self.forward_layers[layer_i].get_final_states()[0].cell_expr(), self.backward_layers[layer_i].get_final_states()[0].cell_expr()])) \ for layer_i in range(len(self.forward_layers))] return ExpressionSequence(expr_list=[ dy.concatenate([forward_es[i], rev_backward_es[-i - 1]]) for i in range(len(forward_es)) ], mask=mask)
def transduce(self, es): forward_e = self.forward_layer(es) backward_e = self.backward_layer(ReversedExpressionSequence(es)) self._final_states = [FinalTransducerState(dy.concatenate([self.forward_layer.get_final_states()[0].main_expr(), self.backward_layer.get_final_states()[0].main_expr()]), dy.concatenate([self.forward_layer.get_final_states()[0].cell_expr(), self.backward_layer.get_final_states()[0].cell_expr()]))] output = self.residual_network.transduce(ExpressionSequence(expr_list=[dy.concatenate([f,b]) for f,b in zip(forward_e, ReversedExpressionSequence(backward_e))])) self._final_states += self.residual_network.get_final_states() return output
def __call__(self, es): """ returns the list of output Expressions obtained by adding the given inputs to the current state, one by one, to both the forward and backward RNNs, and concatenating. :param es: an ExpressionSequence """ es_list = [es] for layer_i, (fb, bb) in enumerate(self.builder_layers): reduce_factor = self._reduce_factor_for_layer(layer_i) if self.downsampling_method=="concat" and len(es_list[0]) % reduce_factor != 0: raise ValueError("For 'concat' subsampling, sequence lengths must be multiples of the total reduce factor. Configure batcher accordingly.") fs = fb(es_list) bs = bb([ReversedExpressionSequence(es_item) for es_item in es_list]) if layer_i < len(self.builder_layers) - 1: if self.downsampling_method=="skip": es_list = [ExpressionSequence(expr_list=fs[::reduce_factor]), ExpressionSequence(expr_list=bs[::reduce_factor][::-1])] elif self.downsampling_method=="concat": es_len = len(es_list[0]) es_list_fwd = [] es_list_bwd = [] for i in range(0, es_len, reduce_factor): for j in range(reduce_factor): if i==0: es_list_fwd.append([]) es_list_bwd.append([]) es_list_fwd[j].append(fs[i+j]) es_list_bwd[j].append(bs[len(es_list[0])-reduce_factor+j-i]) es_list = [ExpressionSequence(expr_list=es_list_fwd[j]) for j in range(reduce_factor)] + [ExpressionSequence(expr_list=es_list_bwd[j]) for j in range(reduce_factor)] else: raise RuntimeError("unknown downsampling_method %s" % self.downsampling_method) else: # concat final outputs ret_es = ExpressionSequence(expr_list=[dy.concatenate([f, b]) for f, b in zip(fs, ReversedExpressionSequence(bs))]) self._final_states = [FinalTransducerState(dy.concatenate([fb.get_final_states()[0].main_expr(), bb.get_final_states()[0].main_expr()]), dy.concatenate([fb.get_final_states()[0].cell_expr(), bb.get_final_states()[0].cell_expr()])) \ for (fb, bb) in self.builder_layers] return ret_es
def __call__(self, es): """ returns the list of output Expressions obtained by adding the given inputs to the current state, one by one, to both the forward and backward RNNs, and concatenating. :param es: an ExpressionSequence """ es_list = [es] zero_pad = None batch_size = es_list[0][0].dim()[1] for layer_i, (fb, bb) in enumerate(self.builder_layers): reduce_factor = self._reduce_factor_for_layer(layer_i) while self.downsampling_method == "concat" and len( es_list[0]) % reduce_factor != 0: for es_i in range(len(es_list)): expr_list = es_list[es_i].as_list() if zero_pad is None or zero_pad.dim( )[0][0] != expr_list[0].dim()[0][0]: zero_pad = dy.zeros(dim=expr_list[0].dim()[0][0], batch_size=batch_size) expr_list.append(zero_pad) es_list[es_i] = ExpressionSequence(expr_list=expr_list) fs = fb(es_list) bs = bb( [ReversedExpressionSequence(es_item) for es_item in es_list]) if layer_i < len(self.builder_layers) - 1: if self.downsampling_method == "skip": es_list = [ ExpressionSequence(expr_list=fs[::reduce_factor]), ExpressionSequence(expr_list=bs[::reduce_factor][::-1]) ] elif self.downsampling_method == "concat": es_len = len(es_list[0]) es_list_fwd = [] es_list_bwd = [] for i in range(0, es_len, reduce_factor): for j in range(reduce_factor): if i == 0: es_list_fwd.append([]) es_list_bwd.append([]) es_list_fwd[j].append(fs[i + j]) es_list_bwd[j].append(bs[len(es_list[0]) - reduce_factor + j - i]) es_list = [ ExpressionSequence(expr_list=es_list_fwd[j]) for j in range(reduce_factor) ] + [ ExpressionSequence(expr_list=es_list_bwd[j]) for j in range(reduce_factor) ] else: raise RuntimeError("unknown downsampling_method %s" % self.downsampling_method) else: # concat final outputs ret_es = ExpressionSequence(expr_list=[ dy.concatenate([f, b]) for f, b in zip(fs, ReversedExpressionSequence(bs)) ]) self._final_states = [FinalTransducerState(dy.concatenate([fb.get_final_states()[0].main_expr(), bb.get_final_states()[0].main_expr()]), dy.concatenate([fb.get_final_states()[0].cell_expr(), bb.get_final_states()[0].cell_expr()])) \ for (fb, bb) in self.builder_layers] return ret_es
def transduce(self, es: ExpressionSequence) -> ExpressionSequence: """ returns the list of output Expressions obtained by adding the given inputs to the current state, one by one, to both the forward and backward RNNs, and concatenating. Args: es: an ExpressionSequence """ es_list = [es] for layer_i, (fb, bb) in enumerate(self.builder_layers): reduce_factor = self._reduce_factor_for_layer(layer_i) if es_list[0].mask is None: mask_out = None else: mask_out = es_list[0].mask.lin_subsampled(reduce_factor) if self.downsampling_method == "concat" and len( es_list[0]) % reduce_factor != 0: raise ValueError( f"For 'concat' subsampling, sequence lengths must be multiples of the total reduce factor, " f"but got sequence length={len(es_list[0])} for reduce_factor={reduce_factor}. " f"Set Batcher's pad_src_to_multiple argument accordingly.") fs = fb.transduce(es_list) bs = bb.transduce( [ReversedExpressionSequence(es_item) for es_item in es_list]) if layer_i < len(self.builder_layers) - 1: if self.downsampling_method == "skip": es_list = [ ExpressionSequence(expr_list=fs[::reduce_factor], mask=mask_out), ExpressionSequence(expr_list=bs[::reduce_factor][::-1], mask=mask_out) ] elif self.downsampling_method == "concat": es_len = len(es_list[0]) es_list_fwd = [] es_list_bwd = [] for i in range(0, es_len, reduce_factor): for j in range(reduce_factor): if i == 0: es_list_fwd.append([]) es_list_bwd.append([]) es_list_fwd[j].append(fs[i + j]) es_list_bwd[j].append(bs[len(es_list[0]) - reduce_factor + j - i]) es_list = [ExpressionSequence(expr_list=es_list_fwd[j], mask=mask_out) for j in range(reduce_factor)] + \ [ExpressionSequence(expr_list=es_list_bwd[j], mask=mask_out) for j in range(reduce_factor)] else: raise RuntimeError( f"unknown downsampling_method {self.downsampling_method}" ) else: # concat final outputs ret_es = ExpressionSequence(expr_list=[ dy.concatenate([f, b]) for f, b in zip(fs, ReversedExpressionSequence(bs)) ], mask=mask_out) self._final_states = [FinalTransducerState(dy.concatenate([fb.get_final_states()[0].main_expr(), bb.get_final_states()[0].main_expr()]), dy.concatenate([fb.get_final_states()[0].cell_expr(), bb.get_final_states()[0].cell_expr()])) \ for (fb, bb) in self.builder_layers] return ret_es