Пример #1
0
def adadelta(ips,cost,fupdate,names,parameters,gradients,lr,norm_lim,rho=0.95,eps=1e-6):
    zipped_grads = [theano.shared(p.get_value() * numpy.float32(0.), name='%s_grad'%k) for k, p in zip(names, parameters)]
    running_up2 = [theano.shared(p.get_value() * numpy.float32(0.), name='%s_rup2'%k) for k, p in zip(names, parameters)]
    running_grads2 = [theano.shared(p.get_value() * numpy.float32(0.), name='%s_rgrad2'%k) for k, p in zip(names, parameters)]

    zgup = [(zg, g) for zg, g in zip(zipped_grads, gradients)]
    rg2up = [(rg2, rho * rg2 + (1. - rho) * (g ** 2)) for rg2, g in zip(running_grads2, gradients)]
    
    update_map = fupdate if fupdate else OrderedUpdates()
    for kk, vv in zgup: update_map[kk] = vv
    for kk, vv in rg2up: update_map[kk] = vv
    
    f_grad_shared = theano.function(ips, cost, updates=update_map, on_unused_input='ignore')

    updir = [-T.sqrt(ru2 + eps) / T.sqrt(rg2 + eps) * zg for zg, ru2, rg2 in zip(zipped_grads, running_up2, running_grads2)]
    ru2up = [(ru2, rho * ru2 + (1. - rho) * (ud ** 2)) for ru2, ud in zip(running_up2, updir)]
    param_up = [(p, p + ud) for p, ud in zip(parameters, updir)]
    
    if norm_lim > 0:
        param_up = clipGradient(param_up, norm_lim, names)
        
    #update_map = fupdate if fupdate else OrderedUpdates()
    #for kk, vv in ru2up: update_map[kk] = vv
    #for kk, vv in param_up: update_map[kk] = vv

    f_param_update = theano.function([lr], [], updates=ru2up+param_up, on_unused_input='ignore')

    return f_grad_shared, f_param_update
    def test_updates_add(self):

        up1 = OrderedUpdates()
        up2 = OrderedUpdates()

        a = theano.shared('a')
        b = theano.shared('b')

        assert not up1 + up2

        up1[a] = 5

        # test that addition works
        assert up1
        assert up1 + up2
        assert not up2

        assert len(up1 + up2) == 1
        assert (up1 + up2)[a] == 5

        up2[b] = 7
        assert up1
        assert up1 + up2
        assert up2

        assert len(up1 + up2) == 2
        assert (up1 + up2)[a] == 5
        assert (up1 + up2)[b] == 7

        assert a in (up1 + up2)
        assert b in (up1 + up2)

        # this works even though there is a collision
        # because values all match
        assert len(up1 + up1 + up1) == 1

        up2[a] = 8  # a gets different value in up1 and up2
        try:
            up1 + up2
            assert 0
        except KeyError:
            pass

        # reassigning to a key works fine right?
        up2[a] = 10
Пример #3
0
    def test_updates_setitem(self):

        up = OrderedUpdates()

        # keys have to be SharedVariables
        self.assertRaises(TypeError, up.__setitem__, 5, 7)
        self.assertRaises(TypeError, up.__setitem__, T.vector(), 7)

        up[theano.shared(88)] = 7
Пример #4
0
    def test_updates_setitem(self):

        up = OrderedUpdates()

        # keys have to be SharedVariables
        with pytest.raises(TypeError):
            up.__setitem__(5, 7)
        with pytest.raises(TypeError):
            up.__setitem__(tt.vector(), 7)

        up[theano.shared(88)] = 7
Пример #5
0
def sgd(ips,cost,fupdate,names,parameters,gradients,lr,norm_lim):
    gshared = [theano.shared(p.get_value() * 0., name='%s_grad'%k) for k, p in zip(names, parameters)]
    gsup = [(gs, g) for gs, g in zip(gshared, gradients)]
    
    update_map = fupdate if fupdate else OrderedUpdates()
    for kk, vv in gsup: update_map[kk] = vv

    f_grad_shared = theano.function(ips, cost, updates=update_map, on_unused_input='ignore')

    pup = [(p, p - lr * g) for p, g in zip(parameters, gshared)]
    
    if norm_lim > 0:
        pup = clipGradient(pup, norm_lim, names)
        
    #update_map = fupdate if fupdate else OrderedUpdates()
    #for kk, vv in pup: update_map[kk] = vv
    
    f_param_update = theano.function([lr], [], updates=pup, on_unused_input='ignore')

    return f_grad_shared, f_param_update
Пример #6
0
    def set_objective(self, loss, params, inputs=None, updts=None, grads=None,
                      compilation_mode=None, **kwargs):
        '''
            Changes the objective function to be optimized
            @param loss theano graph representing the loss to be optimized
            @param params theano shared variables representing the parameters
                          to be optimized
            @param inputs theano variables representing the inputs required to
                          compute the loss, other than params
            @param updts dictionary of list of theano updates to be applied
                         after every evaluation of the loss function
            @param grads gradients of the loss function. If not provided, will
                         be computed here
        '''
        if inputs is None:
            inputs = []

        if updts is not None:
            updts = OrderedUpdates(updts)

        if grads is None:
            utils.print_with_stamp('Building computation graph for gradients',
                                   self.name)
            grads = theano.grad(loss, params)

        utils.print_with_stamp('Compiling function for loss', self.name)
        self.loss_fn = theano.function(
            inputs, loss, updates=updts, allow_input_downcast=True,
            mode=compilation_mode)
        utils.print_with_stamp('Compiling function for loss+gradients',
                               self.name)
        self.grads_fn = theano.function(
            inputs, [loss, ]+grads, updates=updts, allow_input_downcast=True,
            mode=compilation_mode)

        self.n_evals = 0
        self.start_time = 0
        self.iter_time = 0
        self.params = params
Пример #7
0
def finish_scan(fn_outputs, local_vars):

    n_fixed_steps = local_vars["n_fixed_steps"]
    return_steps = local_vars["return_steps"]
    non_seqs = local_vars["non_seqs"]
    dummy_args = local_vars["dummy_args"]
    args = local_vars["args"]
    outs_info = local_vars["outs_info"]
    n_outs = local_vars["n_outs"]
    mit_sot_inner_outputs = local_vars["mit_sot_inner_outputs"]
    sit_sot_inner_outputs = local_vars["sit_sot_inner_outputs"]
    sit_sot_scan_inputs = local_vars["sit_sot_scan_inputs"]
    sit_sot_inner_inputs = local_vars["sit_sot_inner_inputs"]
    actual_n_steps = local_vars["actual_n_steps"]
    sit_sot_rightOrder = local_vars["sit_sot_rightOrder"]
    strict = local_vars["strict"]
    non_sequences = local_vars["non_sequences"]
    inner_seqs = local_vars["inner_seqs"]
    mit_mot_inner_inputs = local_vars["mit_mot_inner_inputs"]
    mit_sot_inner_inputs = local_vars["mit_sot_inner_inputs"]
    mit_mot_inner_outputs = local_vars["mit_mot_inner_outputs"]
    mit_sot_tap_array = local_vars["mit_sot_tap_array"]
    allow_gc = local_vars["allow_gc"]
    n_seqs = local_vars["n_seqs"]
    n_mit_mot_outs = local_vars["n_mit_mot_outs"]
    mit_mot_out_slices = local_vars["mit_mot_out_slices"]
    truncate_gradient = local_vars["truncate_gradient"]
    name = local_vars["name"]
    mode = local_vars["mode"]
    profile = local_vars["profile"]
    scan_seqs = local_vars["scan_seqs"]
    mit_mot_scan_inputs = local_vars["mit_mot_scan_inputs"]
    mit_sot_scan_inputs = local_vars["mit_sot_scan_inputs"]
    n_mit_mot = local_vars["n_mit_mot"]
    mit_sot_return_steps = local_vars["mit_sot_return_steps"]
    n_mit_sot = local_vars["n_mit_sot"]
    sit_sot_return_steps = local_vars["sit_sot_return_steps"]
    mit_sot_rightOrder = local_vars["mit_sot_rightOrder"]

    condition, outputs, updates = scan_utils.get_updates_and_outputs(
        fn_outputs)
    ################################################################## P2>
    if condition is not None:
        as_while = True
    else:
        as_while = False
    ##
    # Step 3. Check if we actually need scan and remove it if we don't
    ##

    if n_fixed_steps in [1, -1]:
        # We do not need to use the scan op anymore, so we can just return
        # the outputs and updates we have
        if condition is not None:
            _logger.warning(('When the number of steps is fixed and equal '
                             'to 1, the provided stopping condition, ',
                             str(condition), ' is ignored'))

        for pos, inner_out in enumerate(outputs):
            # we need to see if we need to pad our sequences with an
            # unbroadcastable dimension; case example : we return an
            # output for which we want all intermediate. If n_steps is 1
            # then, if we return the output as given by the innner function
            # this will represent only a slice and it will have one
            # dimension less.
            if (isinstance(inner_out.type, tensor.TensorType)
                    and return_steps.get(pos, 0) != 1):
                outputs[pos] = tensor.unbroadcast(
                    tensor.shape_padleft(inner_out), 0)
        if len(outputs) == 1:
            outputs = outputs[0]

        return (outputs, updates)

    ##
    # Step 4. Compile the dummy function
    ##

    # We can now compile a dummy function just to see what shared variable
    # we have and what are their update rules (note that the user has
    # the option not to pass the shared variable to scan, so we need to
    # pick them manually and add them to scan)
    # make the compilation as fast as possible by not applying any
    # optimization or conversion to C [ note this region is not important
    # for performance so we can do stuff as unoptimal as we wish ]

    # extract still missing inputs (there still might be so) and add them
    # as non sequences at the end of our args
    fake_nonseqs = [x.type() for x in non_seqs]
    fake_outputs = scan_utils.clone(outputs,
                                    replace=OrderedDict(
                                        izip(non_seqs, fake_nonseqs)))
    all_inputs = ifilter(
        lambda x: (isinstance(x, gof.Variable) and not isinstance(
            x, SharedVariable) and not isinstance(x, gof.Constant)),
        gof.graph.inputs(fake_outputs))
    extra_inputs = [x for x in all_inputs if x not in args + fake_nonseqs]
    non_seqs += extra_inputs
    # Note we do not use all_inputs directly since the order of variables
    # in args is quite important
    dummy_args += extra_inputs

    dummy_outs = outputs
    if condition is not None:
        dummy_outs.append(condition)
    dummy_f = function(dummy_args,
                       dummy_outs,
                       updates=updates,
                       mode=compile.mode.Mode(linker='py', optimizer=None),
                       on_unused_input='ignore',
                       profile=False)

    ##
    # Step 5. Re-arange inputs of scan into a more strict order
    ##

    # Step 5.0 Check the outputs of the dummy function to see if they
    # match with user provided data

    # if the number of outputs to the function does not match the number of
    # assumed outputs until now (provided by the user) there can be
    # only one explanation: No information is provided for any of the
    # outputs (i.e. we are dealing with a map)
    tmp_dummy_f_outs = len(dummy_f.maker.outputs)
    if as_while:
        tmp_dummy_f_outs -= 1
    if not (tmp_dummy_f_outs == n_outs or outs_info == []):
        raise ValueError('Please provide None as outputs_info for '
                         'any output that does not feed back into '
                         'scan (i.e. it behaves like a map) ')

    if outs_info == []:
        n_outs = len(dummy_f.maker.outputs)
        if as_while:
            n_outs = n_outs - 1
        outs_info = [OrderedDict() for x in xrange(n_outs)]

    # Step 5.1 Outputs with taps different then -1

    for i, out in enumerate(outs_info):
        if 'taps' in out and out['taps'] != [-1]:
            mit_sot_inner_outputs.append(outputs[i])

    # Step 5.2 Outputs with tap equal to -1
    for i, out in enumerate(outs_info):
        if 'taps' in out and out['taps'] == [-1]:
            sit_sot_inner_outputs.append(outputs[i])

    # Step 5.3 Outputs that correspond to update rules of shared variables
    givens = OrderedDict()
    n_shared_outs = 0
    shared_scan_inputs = []
    shared_inner_inputs = []
    shared_inner_outputs = []
    sit_sot_shared = []
    for input in dummy_f.maker.expanded_inputs:
        if isinstance(input.variable, SharedVariable) and input.update:
            new_var = safe_new(input.variable)
            if getattr(input.variable, 'name', None) is not None:
                new_var.name = input.variable.name + '_copy'
            if isinstance(new_var.type, ops.expandable_types):
                sit_sot_inner_inputs.append(new_var)
                sit_sot_scan_inputs.append(
                    scan_utils.expand_empty(
                        tensor.unbroadcast(
                            tensor.shape_padleft(input.variable), 0),
                        actual_n_steps))
                tensor_update = tensor.as_tensor_variable(input.update)
                sit_sot_inner_outputs.append(tensor_update)
                # Not that pos is not a negative index. The sign of pos is used
                # as a flag to indicate if this output should be part of the
                # update rules or part of the standard outputs of scan.
                # If `pos` is positive than it corresponds to the standard
                # outputs of scan and it refers to output of index `pos`. If `pos`
                # is negative that it corresponds to update rules of scan and it
                # refers to update rule of index -1 - `pos`.
                sit_sot_rightOrder.append(-1 - len(sit_sot_shared))
                sit_sot_shared.append(input.variable)
                givens[input.variable] = new_var

            else:
                shared_inner_inputs.append(new_var)
                shared_scan_inputs.append(input.variable)
                shared_inner_outputs.append(input.update)
                givens[input.variable] = new_var
                n_shared_outs += 1
    n_sit_sot = len(sit_sot_inner_inputs)
    # Step 5.4 Outputs with no taps used in the input
    n_nit_sot = 0
    nit_sot_inner_outputs = []
    nit_sot_return_steps = OrderedDict()
    nit_sot_rightOrder = []
    for i, out in enumerate(outs_info):
        if not 'taps' in out:
            nit_sot_inner_outputs.append(outputs[i])
            if i in return_steps:
                nit_sot_return_steps[n_nit_sot] = return_steps[i]
            nit_sot_rightOrder.append(i)
            n_nit_sot += 1

    # Step 5.5 all other arguments including extra inputs
    other_scan_args = []
    other_inner_args = []

    other_scan_args += [
        arg for arg in non_seqs if (not isinstance(arg, SharedVariable)
                                    and not isinstance(arg, tensor.Constant))
    ]

    # Step 5.6 all shared variables with no update rules
    other_inner_args += [
        safe_new(arg, '_copy') for arg in non_seqs
        if (not isinstance(arg, SharedVariable)
            and not isinstance(arg, tensor.Constant))
    ]

    givens.update(OrderedDict(izip(other_scan_args, other_inner_args)))

    if strict:
        non_seqs_set = set(non_sequences if non_sequences is not None else [])

        other_shared_scan_args = [
            arg.variable for arg in dummy_f.maker.expanded_inputs
            if (isinstance(arg.variable, SharedVariable) and not arg.update
                and arg.variable in non_seqs_set)
        ]
        other_shared_inner_args = [
            safe_new(arg.variable, '_copy')
            for arg in dummy_f.maker.expanded_inputs
            if (isinstance(arg.variable, SharedVariable) and not arg.update
                and arg.variable in non_seqs_set)
        ]
    else:
        other_shared_scan_args = [
            arg.variable for arg in dummy_f.maker.expanded_inputs
            if (isinstance(arg.variable, SharedVariable) and not arg.update)
        ]
        other_shared_inner_args = [
            safe_new(arg.variable, '_copy')
            for arg in dummy_f.maker.expanded_inputs
            if (isinstance(arg.variable, SharedVariable) and not arg.update)
        ]
    givens.update(
        OrderedDict(izip(other_shared_scan_args, other_shared_inner_args)))

    ##
    # Step 6. Re-order the outputs and clone them replacing things
    # using the givens
    ##
    inner_inputs = (inner_seqs + mit_mot_inner_inputs + mit_sot_inner_inputs +
                    sit_sot_inner_inputs + shared_inner_inputs +
                    other_shared_inner_args + other_inner_args)

    inner_outs = (mit_mot_inner_outputs + mit_sot_inner_outputs +
                  sit_sot_inner_outputs + nit_sot_inner_outputs +
                  shared_inner_outputs)
    if condition is not None:
        inner_outs.append(condition)
    # Cuda and Gpuarray are imported here, instead of being imported on top of
    # the file because that would force on the user some dependencies that we
    # might do not want to. Currently we are working on removing the
    # dependencies on sandbox code completeley.
    from theano.sandbox import cuda, gpuarray
    if cuda.cuda_available or gpuarray.pygpu_activated:
        # very often we end up in this situation when we want to
        # replace w with w_copy, where w is a GPU variable
        # and w_copy is TensorType. This is caused because shared
        # variables are put on GPU right aways >:| ,
        new_givens = OrderedDict()

        for w, w_copy in iteritems(givens):
            if ((isinstance(w.type, cuda.CudaNdarrayType)
                 or isinstance(w.type, gpuarray.GpuArrayType))
                    and isinstance(w_copy.type, tensor.TensorType)):
                for o in inner_outs:
                    new_givens = traverse(o, w, w_copy, new_givens)
            else:
                new_givens[w] = w_copy
    else:
        new_givens = givens

    new_outs = scan_utils.clone(inner_outs, replace=new_givens)

    ##
    # Step 7. Create the Scan Op
    ##

    tap_array = mit_sot_tap_array + [[-1] for x in xrange(n_sit_sot)]
    if allow_gc is None:
        allow_gc = config.scan.allow_gc
    info = OrderedDict()

    info['tap_array'] = tap_array
    info['n_seqs'] = n_seqs
    info['n_mit_mot'] = n_mit_mot
    info['n_mit_mot_outs'] = n_mit_mot_outs
    info['mit_mot_out_slices'] = mit_mot_out_slices
    info['n_mit_sot'] = n_mit_sot
    info['n_sit_sot'] = n_sit_sot
    info['n_shared_outs'] = n_shared_outs
    info['n_nit_sot'] = n_nit_sot
    info['truncate_gradient'] = truncate_gradient
    info['name'] = name
    info['mode'] = mode
    info['destroy_map'] = OrderedDict()
    info['gpu'] = False
    info['as_while'] = as_while
    info['profile'] = profile
    info['allow_gc'] = allow_gc
    info['strict'] = strict

    local_op = scan_op.Scan(inner_inputs, new_outs, info)

    ##
    # Step 8. Compute the outputs using the scan op
    ##
    _scan_inputs = (scan_seqs + mit_mot_scan_inputs + mit_sot_scan_inputs +
                    sit_sot_scan_inputs + shared_scan_inputs +
                    [actual_n_steps for x in xrange(n_nit_sot)] +
                    other_shared_scan_args + other_scan_args)

    scan_inputs = []
    for arg in [actual_n_steps] + _scan_inputs:
        try:
            arg = tensor.as_tensor_variable(arg)
        except TypeError:
            # This happens for Random States for e.g. but it is a good way
            # to make sure no input is a cuda ndarrays
            pass
        scan_inputs += [arg]
    scan_outs = local_op(*scan_inputs)
    if type(scan_outs) not in (list, tuple):
        scan_outs = [scan_outs]
    ##
    # Step 9. Figure out which outs are update rules for shared variables
    # and so on ...
    ##

    update_map = OrderedUpdates()

    def remove_dimensions(outs, steps_return, offsets=None):
        out_ls = []
        for idx, out in enumerate(outs):
            if idx in steps_return:
                if steps_return[idx] > 1:
                    out_ls.append(out[-steps_return[idx]:])
                else:
                    out_ls.append(out[-1])
            else:
                if offsets is None:
                    out_ls.append(out)
                else:
                    out_ls.append(out[offsets[idx]:])
        return out_ls

    offset = n_mit_mot
    offsets = [abs(numpy.min(x)) for x in mit_sot_tap_array]
    mit_sot_outs = remove_dimensions(scan_outs[offset:offset + n_mit_sot],
                                     mit_sot_return_steps, offsets)

    offset += n_mit_sot
    offsets = [1 for x in xrange(n_sit_sot)]
    sit_sot_outs = remove_dimensions(scan_outs[offset:offset + n_sit_sot],
                                     sit_sot_return_steps, offsets)

    offset += n_sit_sot
    nit_sot_outs = remove_dimensions(scan_outs[offset:offset + n_nit_sot],
                                     nit_sot_return_steps)

    offset += n_nit_sot
    for idx, update_rule in enumerate(scan_outs[offset:offset +
                                                n_shared_outs]):
        update_map[shared_scan_inputs[idx]] = update_rule

    _scan_out_list = (mit_sot_outs + sit_sot_outs + nit_sot_outs)
    # Step 10. I need to reorder the outputs to be in the order expected by
    # the user
    rightOrder = (mit_sot_rightOrder + sit_sot_rightOrder + nit_sot_rightOrder)
    scan_out_list = [None] * len(rightOrder)
    for idx, pos in enumerate(rightOrder):
        if pos >= 0:
            scan_out_list[pos] = _scan_out_list[idx]
        else:
            # Not that pos is not a negative index. The sign of pos is used
            # as a flag to indicate if this output should be part of the
            # update rules or part of the standard outputs of scan.
            # If `pos` is positive than it corresponds to the standard
            # outputs of scan and it refers to output of index `pos`. If `pos`
            # is negative that it corresponds to update rules of scan and it
            # refers to update rule of index -1 - `pos`.
            update_map[sit_sot_shared[abs(pos) - 1]] = _scan_out_list[idx][-1]
    scan_out_list = [x for x in scan_out_list if x is not None]
    ################################################################## P2<
    return (scan_out_list, update_map)
Пример #8
0
def scan(fn,
         sequences=None,
         states=None,
         params=None,
         n_steps=None,
         mode=None,
         name=None,
         profile=False,
         allow_gc=None):
    """
    Similar to Theano's official scan, this function gives the user more
    control over the scan op, avoiding certain difficulties that arose from
    missing optimizations.

    :param fn: lambda function that describes one step of scan (see the
        official Theano scan function)
    :param sequences: similar to the official Theano's scan. This version
        of scan does not support taps for the sequences (it can only be a
        list of tensor). Scan assumes that sequences have the right length
        and it does not check for this.
    :param states: similar to outputs_info of the official scan function.
        There is one crucial difference though, namely that the `initial`
        key in the dictionary has been replace by 'membuf' key. This
        reflects the change of meaning. Instead of passing to scan just
        the initial steps misisng, one has now to pass a memory buffer in
        which scan will try to store its output. In this memory buffer the
        first entries should be set to the initial states of the
        corresponding states.
        Providing a memory buffer that has less entries then the number of
        steps, mneans scan will only use that amount of memory. The user has
        to match the memory buffer size with the number of steps, otherwise
        scan will produce wrong results. Also if gradients are to be
        computed through the scan, the memory buffer should have the same
        length as the number of steps.
        For states that do not require a initial state, one has to provide a
        dictionary with a single key 'steps' that says how many intermediate
        results to store. See examples below for more insight.
    :param n_steps: This parameter is mandatory and it will represent the
        number of steps scan will do (scan will not check sequences or any
        other source of information to figure out how many steps it needs
        to do).
    :param mode: Same as for the official scan
    :param name: Same as for the official scan
    :param profile: Same as for the official scan

    Note:
     - there is no truncate / go_backwards anymore !
     - the outputs returned by scan contain the initial states as well (i.e.
     if I loop over k steps, with my smallest tap for an output -3 and keep
     al intermediate results, my output will be of length k+3

     Examples:
         (a) if you do not want to store any intermediate results (just the
         last one)

         # The memory buffer can be the initial state, just that we need to
         # add one extra dimension in front of it
         state = TT.unbroadcast(TT.shape_padleft(x0),0)
         out,_ = scan(lambda x:x+1, states = state, n_steps = 5)
         # Once we got our result we need to remove the extra dimension
         out = out[0]

        (b) if you want to keep every intermediate results

        state = TT.alloc(TT.constant(0), 6, x0.shape[0])
        state = TT.set_subtensor(state[0], x0)
        out,_ = scan(lambda x:x+1, states = state, n_steps = 5)
        out = out[1:]

    """
    def wrap_into_list(x):
        '''
        Wrap the input into a list if it is not already a list
        '''
        if x is None:
            return []
        elif not isinstance(x, (list, tuple)):
            return [x]
        else:
            return list(x)

    seqs = wrap_into_list(sequences)
    outs_info = wrap_into_list(states)
    if allow_gc is None:
        allow_gc = config.scan.allow_gc

    # Make sure we get rid of numpy arrays or ints or anything like that
    # passed as inputs to scan
    non_seqs = []
    for elem in wrap_into_list(params):
        if not isinstance(elem, gof.Variable):
            non_seqs.append(tensor.as_tensor_variable(elem))
        else:
            non_seqs.append(elem)

    # If we provided a known number of steps ( before compilation)
    # and if that number is 1 or -1, then we can skip the Scan Op,
    # and just apply the inner function once
    # To do that we check here to see the nature of n_steps
    n_fixed_steps = None

    if isinstance(n_steps, (float, int)):
        n_fixed_steps = int(n_steps)
    else:
        try:
            n_fixed_steps = opt.get_scalar_constant_value(n_steps)
        except tensor.basic.NotScalarConstantError:
            n_fixed_steps = None

    # Check n_steps is an int
    if (hasattr(n_steps, 'dtype') and
        str(n_steps.dtype)[:3] not in ('uin', 'int')):
        raise ValueError(' n_steps must be an int. dtype provided '
                         'is %s' % n_steps.dtype)

    # compute number of sequences and number of outputs
    n_seqs = len(seqs)
    n_outs = len(outs_info)

    return_steps = OrderedDict()
    # wrap outputs info in a dictionary if they are not already in one
    for i in xrange(n_outs):
        if outs_info[i] is not None:
            if not isinstance(outs_info[i], dict):
                # by default any output has a tap value of -1
                outs_info[i] = dict(membuf=outs_info[i], taps=[-1])
            elif (not outs_info[i].get('membuf', None) and
                    outs_info[i].get('taps', None)):
                # ^ no initial state but taps provided
                raise ValueError(('If you are using slices of an output '
                                  'you need to provide a memory buffer for '
                                  'the state '), outs_info[i])
            elif (outs_info[i].get('membuf', None) and
                  not outs_info[i].get('taps', None)):
                # ^ initial state but taps not provided
                if 'taps' in outs_info[i]:
                    # ^ explicitly provided a None for taps
                    _logger.warning(
                            'Output %s (index %d) has a memory '
                            'buffer but taps is explicitly set to None ',
                            getattr(outs_info[i]['membuf'], 'name', 'None'),
                            i)
                outs_info[i]['taps'] = [-1]
        else:
            # if a None is provided as the output info we replace it
            # with an dict(steps=n_steps) to simplify handling
            outs_info[i] = dict(steps=n_steps)

    ##
    # Step 2. Generate inputs and outputs of the inner functions
    # for compiling a dummy function (Iteration #1)
    ##

    # create theano inputs for the recursive function
    # note : this is a first batch of possible inputs that will
    #        be compiled in a dummy function; we used this dummy
    #        function to detect shared variables and their updates
    #        and to construct a new and complete list of inputs and
    #        outputs

    n_seqs = 0
    scan_seqs = []     # Variables passed as inputs to the scan op
    inner_seqs = []    # Variables passed as inputs to the inner function
    inner_slices = []  # Actual slices if scan is removed from the picture
    # go through sequences picking up time slices as needed
    for i, seq in enumerate(seqs):
        if isinstance(seq, dict):
            seq = seq['input']
        actual_slice = seq[0]
        _seq_val = tensor.as_tensor_variable(seq)
        _seq_val_slice = _seq_val[0]

        nw_slice = _seq_val_slice.type()
        # Try to transfer test_value to the new variable
        if config.compute_test_value != 'off':
            try:
                nw_slice.tag.test_value = gof.Op._get_test_value(
                    _seq_val_slice)
            except AttributeError as e:
                if config.compute_test_value != 'ignore':
                    # No need to print a warning or raise an error now,
                    # it will be done when fn will be called.
                    _logger.info(('Cannot compute test value for '
                        'the inner function of scan, input value '
                        'missing %s'), e)

        if seq.name:
            nw_slice.name = seq.name + '[t]'
        scan_seqs.append(_seq_val)
        inner_seqs.append(nw_slice)
        inner_slices.append(actual_slice)

        n_seqs += 1

    actual_n_steps = tensor.as_tensor(n_steps)

    # Conventions :
    #   mit_mot = multiple input taps, multiple output taps ( only provided
    #             by the gradient function )
    #   mit_sot = multiple input taps, single output tap (t + 0)
    #   sit_sot = single input tap, single output tap (t + 0)
    #   nit_sot = no input tap, single output tap (t + 0)

    # MIT_MOT -- not provided by the user only by the grad function
    n_mit_mot = 0
    n_mit_mot_outs = 0
    mit_mot_scan_inputs = []
    mit_mot_inner_inputs = []
    mit_mot_inner_outputs = []
    mit_mot_out_slices = []
    mit_mot_rightOrder = []

    # SIT_SOT -- provided by the user
    n_mit_sot = 0
    mit_sot_scan_inputs = []
    mit_sot_inner_inputs = []
    mit_sot_inner_slices = []
    mit_sot_inner_outputs = []
    mit_sot_return_steps = OrderedDict()
    mit_sot_tap_array = []
    mit_sot_rightOrder = []

    n_sit_sot = 0
    sit_sot_scan_inputs = []
    sit_sot_inner_inputs = []
    sit_sot_inner_slices = []
    sit_sot_inner_outputs = []
    sit_sot_return_steps = OrderedDict()
    sit_sot_rightOrder = []
    nit_sot_steps = []
    # go through outputs picking up time slices as needed
    for i, init_out in enumerate(outs_info):
        # Note that our convention dictates that if an output uses
        # just the previous time step, as a initial state we will only
        # provide a tensor of the same dimension as one time step; This
        # makes code much cleaner for those who do not use taps. Otherwise
        # they would always had to shape_padleft the initial state ..
        # which is ugly

        # Note, 'taps' might not be in the dictionary
        if 'taps' in init_out and init_out['taps'] == [-1]:

            actual_arg = init_out['membuf']
            arg = safe_new(init_out['membuf'][0])
            if isinstance(arg, tensor.Constant):
                # safe new returns a clone of the constants, but that is not
                # what we need for initial states
                arg = arg.type()

            # Try to transfer test_value to the new variable
            if config.compute_test_value != 'off':
                try:
                    arg.tag.test_value = gof.Op._get_test_value(actual_arg)
                except AttributeError as e:
                    if config.compute_test_value != 'ignore':
                        # No need to print a warning or raise an error now,
                        # it will be done when fn will be called.
                        _logger.info(('Cannot compute test value for the '
                            'inner function of scan, input value missing %s'),
                                     e)

            if getattr(init_out['membuf'], 'name', None) is not None:
                arg.name = init_out['membuf'].name + '[t-1]'

            # We need now to allocate space for storing the output and copy
            # the initial state over. We do this using the expand function
            # defined in scan utils
            sit_sot_scan_inputs.append(actual_arg)
            sit_sot_inner_slices.append(actual_arg[0])
            if i in return_steps:
                sit_sot_return_steps[n_sit_sot] = return_steps[i]
            sit_sot_inner_inputs.append(arg)
            sit_sot_rightOrder.append(i)
            n_sit_sot += 1

        elif init_out.get('taps', None):

            if numpy.any(numpy.array(init_out.get('taps', [])) > 0):
                # Make sure we do not have requests for future values of a
                # sequence we can not provide such values
                raise ValueError('Can not use future taps of outputs',
                                    init_out)
            # go through the taps
            mintap = abs(numpy.min(init_out['taps']))
            mit_sot_tap_array.append(init_out['taps'])
            idx_offset = abs(numpy.min(init_out['taps']))
            # Sequence
            mit_sot_scan_inputs.append(init_out['membuf'])

            if i in return_steps:
                mit_sot_return_steps[n_mit_sot] = return_steps[i]
            mit_sot_rightOrder.append(i)
            n_mit_sot += 1
            for k in init_out['taps']:
                # create a new slice
                actual_nw_slice = init_out['membuf'][k + mintap]
                _init_out_var = tensor.as_tensor_variable(init_out['membuf'])
                _init_out_var_slice = _init_out_var[k + mintap]
                nw_slice = _init_out_var_slice.type()

                # Try to transfer test_value to the new variable
                if config.compute_test_value != 'off':
                    try:
                        nw_slice.tag.test_value = gof.Op._get_test_value(
                            _init_out_var_slice)
                    except AttributeError as e:
                        if config.compute_test_value != 'ignore':
                            # No need to print a warning or raise an error now,
                            # it will be done when fn will be called.
                            _logger.info(('Cannot compute test value for '
                                'the inner function of scan, input value '
                                'missing. %s'), e)

                # give it a name or debugging and pretty printing
                if getattr(init_out['membuf'], 'name', None) is not None:
                    if k > 0:
                        nw_slice.name = (init_out['membuf'].name +
                                            '[t+%d]' % k)
                    elif k == 0:
                        nw_slice.name = init_out['membuf'].name + '[t]'
                    else:
                        nw_slice.name = (init_out['membuf'].name +
                                            '[t%d]' % k)
                mit_sot_inner_inputs.append(nw_slice)
                mit_sot_inner_slices.append(actual_nw_slice)
        else:
            pass

    # Re-order args
    max_mit_sot = numpy.max([-1] + mit_sot_rightOrder) + 1
    max_sit_sot = numpy.max([-1] + sit_sot_rightOrder) + 1
    n_elems = numpy.max([max_mit_sot, max_sit_sot])
    _ordered_args = [[] for x in xrange(n_elems)]
    offset = 0
    for idx in xrange(n_mit_sot):
        n_inputs = len(mit_sot_tap_array[idx])
        if n_fixed_steps == 1:
            _ordered_args[mit_sot_rightOrder[idx]] = \
                            mit_sot_inner_slices[offset:offset + n_inputs]
        else:
            _ordered_args[mit_sot_rightOrder[idx]] = \
                            mit_sot_inner_inputs[offset:offset + n_inputs]
        offset += n_inputs

    for idx in xrange(n_sit_sot):
        if n_fixed_steps == 1:
            _ordered_args[sit_sot_rightOrder[idx]] = \
                                        [sit_sot_inner_slices[idx]]
        else:
            _ordered_args[sit_sot_rightOrder[idx]] = \
                                        [sit_sot_inner_inputs[idx]]

    ordered_args = []
    for ls in _ordered_args:
        ordered_args += ls
    if n_fixed_steps == 1:
        args = (inner_slices +
                ordered_args +
                non_seqs)

    else:
        args = (inner_seqs +
                ordered_args +
                non_seqs)

    # add only the non-shared variables and non-constants to the arguments of
    # the dummy function [ a function should not get shared variables or
    # constants as input ]
    dummy_args = [arg for arg in args
                  if (not isinstance(arg, SharedVariable) and
                      not isinstance(arg, tensor.Constant))]
    # when we apply the lambda expression we get a mixture of update rules
    # and outputs that needs to be separated
    lambda_result = fn(*args)
    condition, outputs, updates = scan_utils.get_updates_and_outputs(
                                                                lambda_result)
    if condition is not None:
        as_while = True
    else:
        as_while = False
    ##
    # Step 3. Check if we actually need scan and remove it if we don't
    ##

    if n_fixed_steps == 1:
        # We do not need to use the scan op anymore, so we can just return
        # the outputs and updates we have
        if condition is not None:
            _logger.warning(('When the number of steps is fixed and equal '
                    'to 1, the provided stopping condition, ',
                    str(condition), ' is ignored'))

        for pos, inner_out in enumerate(outputs):
            # we need to see if we need to pad our sequences with an
            # unbroadcastable dimension; case example : we return an
            # output for which we want all intermediate. If n_steps is 1
            # then, if we return the output as given by the innner function
            # this will represent only a slice and it will have one
            # dimension less.
            if (isinstance(inner_out.type, tensor.TensorType) and
                return_steps.get(pos, 0) != 1):
                outputs[pos] = tensor.unbroadcast(
                    tensor.shape_padleft(inner_out), 0)
        if len(outputs) == 1:
            outputs = outputs[0]

        return (outputs, updates)

    ##
    # Step 4. Compile the dummy function
    ##

    # We can now compile a dummy function just to see what shared variable
    # we have and what are their update rules (note that the user has
    # the option not to pass the shared variable to scan, so we need to
    # pick them manually and add them to scan)
    # make the compilation as fast as possible by not applying any
    # optimization or conversion to C [ note this region is not important
    # for performance so we can do stuff as unoptimal as we wish ]

    # extract still missing inputs (there still might be so) and add them
    # as non sequences at the end of our args
    fake_nonseqs = [x.type() for x in non_seqs]
    fake_outputs = scan_utils.clone(outputs + updates.values(),
                                    replace=dict(zip(non_seqs,
                                                     fake_nonseqs)))
    all_inputs = itertools.ifilter(
        lambda x: (isinstance(x, gof.Variable) and
                   not isinstance(x, SharedVariable) and
                   not isinstance(x, gof.Constant)),
        gof.graph.inputs(fake_outputs))
    extra_inputs = filter(lambda x: x not in args + fake_nonseqs,
                                    all_inputs)
    non_seqs += extra_inputs
    # Note we do not use all_inputs directly since the order of variables
    # in args is quite important
    dummy_args += extra_inputs

    dummy_outs = outputs
    if condition is not None:
        dummy_outs.append(condition)

    # If we use a regular dict here, the results are non-deterministic
    if not isinstance(updates, (list, tuple)):
        if isinstance(updates, dict) and \
            not isinstance(updates, OrderedDict):
                warnings.warn("Using non-deterministic dictionary.")

    dummy_f = function(dummy_args,
                       dummy_outs,
                       updates=updates,
                       mode=compile.mode.Mode(linker='py',
                                              optimizer=None),
                      on_unused_input='ignore')

    ##
    # Step 5. Re-arange inputs of scan into a more strict order
    ##

    # Step 5.0 Check the outputs of the dummy function to see if they
    # match with user provided data

    # if the number of outputs to the function does not match the number of
    # assumed outputs until now (provided by the user) there can be
    # only one explanation: No information is provided for any of the
    # outputs (i.e. we are dealing with a map)
    tmp_dummy_f_outs = len(dummy_f.maker.outputs)
    if as_while:
        tmp_dummy_f_outs -= 1
    if not (tmp_dummy_f_outs == n_outs or outs_info == []):
        raise ValueError('Please provide None as output_info for '
                         'any output that does not feed back into '
                         'scan (i.e. it behaves like a map) ')

    if outs_info == []:
        n_outs = len(dummy_f.maker.outputs)
        if as_while:
            n_outs = n_outs - 1
        outs_info = [dict(steps=n_steps) for x in xrange(n_outs)]

    # Step 5.1 Outputs with taps different then -1

    for i, out in enumerate(outs_info):
        if 'taps' in out and out['taps'] != [-1]:
            mit_sot_inner_outputs.append(outputs[i])

    # Step 5.2 Outputs with tap equal to -1
    for i, out in enumerate(outs_info):
        if 'taps' in out and out['taps'] == [-1]:
            sit_sot_inner_outputs.append(outputs[i])

    # Step 5.3 Outputs that correspond to update rules of shared variables
    givens = OrderedDict()
    n_shared_outs = 0
    shared_scan_inputs = []
    shared_inner_inputs = []
    shared_inner_outputs = []
    for input in dummy_f.maker.expanded_inputs:
        if isinstance(input.variable, SharedVariable) and input.update:
            new_var = safe_new(input.variable)
            if getattr(input.variable, 'name', None) is not None:
                new_var.name = input.variable.name + '_copy'
            shared_inner_inputs.append(new_var)
            shared_scan_inputs.append(input.variable)
            shared_inner_outputs.append(input.update)
            givens[input.variable] = new_var
            n_shared_outs += 1

    # Step 5.4 Outputs with no taps used in the input
    n_nit_sot = 0
    nit_sot_inner_outputs = []
    nit_sot_return_steps = OrderedDict()
    nit_sot_rightOrder = []
    for i, out in enumerate(outs_info):
        if not 'taps' in out:
            nit_sot_inner_outputs.append(outputs[i])
            if i in return_steps:
                nit_sot_return_steps[n_nit_sot] = return_steps[i]
            nit_sot_rightOrder.append(i)
            nit_sot_steps.append(out['steps'])
            n_nit_sot += 1

    # Step 5.5 all other arguments including extra inputs
    other_scan_args = []
    other_inner_args = []

    other_scan_args += [arg for arg in non_seqs
                        if (not isinstance(arg, SharedVariable) and
                            not isinstance(arg, tensor.Constant))]

    # Step 5.6 all shared variables with no update rules
    other_inner_args += [safe_new(arg, '_copy') for arg in non_seqs
                         if (not isinstance(arg, SharedVariable) and
                             not isinstance(arg, tensor.Constant))]

    givens.update(dict(zip(other_scan_args, other_inner_args)))
    other_shared_scan_args = [arg.variable for arg
                        in dummy_f.maker.expanded_inputs
                        if (isinstance(arg.variable, SharedVariable) and
                            not arg.update)]
    other_shared_inner_args = [safe_new(arg.variable, '_copy') for arg
                        in dummy_f.maker.expanded_inputs
                        if (isinstance(arg.variable, SharedVariable) and
                            not arg.update)]
    givens.update(dict(zip(other_shared_scan_args,
                           other_shared_inner_args)))

    ##
    # Step 6. Re-order the outputs and clone them replacing things
    # using the givens
    ##
    inner_inputs = (inner_seqs +
                    mit_mot_inner_inputs +
                    mit_sot_inner_inputs +
                    sit_sot_inner_inputs +
                    shared_inner_inputs +
                    other_shared_inner_args +
                    other_inner_args)

    inner_outs = (mit_mot_inner_outputs +
                  mit_sot_inner_outputs +
                  sit_sot_inner_outputs +
                  nit_sot_inner_outputs +
                  shared_inner_outputs)
    if condition is not None:
        inner_outs.append(condition)
    new_givens = OrderedDict()
    for w, w_copy in givens.iteritems():
        new_givens[w] = w.type.filter_variable(w_copy)

    new_outs = scan_utils.clone(inner_outs, replace=new_givens)

    ##
    # Step 7. Create the Scan Op
    ##

    tap_array = mit_sot_tap_array + [[-1] for x in xrange(n_sit_sot)]
    info = OrderedDict()

    info['tap_array'] = tap_array
    info['n_seqs'] = n_seqs
    info['n_mit_mot'] = n_mit_mot
    info['n_mit_mot_outs'] = n_mit_mot_outs
    info['mit_mot_out_slices'] = mit_mot_out_slices
    info['n_mit_sot'] = n_mit_sot
    info['n_sit_sot'] = n_sit_sot
    info['n_shared_outs'] = n_shared_outs
    info['n_nit_sot'] = n_nit_sot
    info['truncate_gradient'] = -1
    info['name'] = name
    info['mode'] = mode
    info['destroy_map'] = OrderedDict()
    info['inplace'] = False
    info['gpu'] = False
    info['as_while'] = as_while
    info['profile'] = profile
    info['_scan_savemem_visited'] = True
    info['allow_gc'] = allow_gc

    local_op = scan_op.Scan(inner_inputs, new_outs, info)

    ##
    # Step 8. Compute the outputs using the scan op
    ##
    _scan_inputs = (scan_seqs +
                    mit_mot_scan_inputs +
                    mit_sot_scan_inputs +
                    sit_sot_scan_inputs +
                    shared_scan_inputs +
                    nit_sot_steps +
                    other_shared_scan_args +
                    other_scan_args)

    scan_inputs = []
    for arg in [actual_n_steps] + _scan_inputs:
        if not isinstance(arg, gof.Variable):
            arg = tensor.as_tensor_variable(arg)
        scan_inputs += [arg]
    scan_outs = local_op(*scan_inputs)
    if type(scan_outs) not in (list, tuple):
        scan_outs = [scan_outs]
    ##
    # Step 9. Figure out which outs are update rules for shared variables
    # and so on ...
    ##

    update_map = OrderedUpdates()

    offset = n_mit_mot
    offsets = [abs(numpy.min(x)) for x in mit_sot_tap_array]
    mit_sot_outs = scan_outs[offset:offset + n_mit_sot]

    offset += n_mit_sot
    offsets = [1 for x in xrange(n_sit_sot)]
    sit_sot_outs = scan_outs[offset:offset + n_sit_sot]

    offset += n_sit_sot
    nit_sot_outs = scan_outs[offset:offset + n_nit_sot]

    offset += n_nit_sot
    for idx, update_rule in enumerate(
                scan_outs[offset:offset + n_shared_outs]):
        update_map[shared_scan_inputs[idx]] = update_rule

    _scan_out_list = (mit_sot_outs +
                      sit_sot_outs +
                      nit_sot_outs)
    # Step 10. I need to reorder the outputs to be in the order expected by
    # the user
    rightOrder = (mit_sot_rightOrder +
                  sit_sot_rightOrder +
                  nit_sot_rightOrder)
    scan_out_list = [None] * len(rightOrder)
    for idx, pos in enumerate(rightOrder):
        scan_out_list[pos] = _scan_out_list[idx]
    if len(scan_out_list) == 1:
        scan_out_list = scan_out_list[0]
    elif len(scan_out_list) == 0:
        scan_out_list = None

    assert isinstance(update_map, OrderedDict)
    return (scan_out_list, update_map)
Пример #9
0
    def set_objective(self,
                      loss,
                      params,
                      inputs=None,
                      updts=None,
                      grads=None,
                      polyak_averaging=None,
                      clip=None,
                      trust_input=True,
                      compilation_mode=None,
                      **kwargs):
        '''
            Changes the objective function to be optimized
            @param loss theano graph representing the loss to be optimized
            @param params theano shared variables representing the parameters
                          to be optimized
            @param inputs theano variables representing the inputs required to
                          compute the loss, other than params
            @param updts dictionary of list of theano updates to be applied
                         after every evaluation of the loss function
            @param grads gradients of the loss function. If not provided, will
                         be computed here
            @param kwargs arguments to pass to the lasagne.updates function
        '''
        if inputs is None:
            inputs = []

        if updts is not None:
            updts = OrderedUpdates(updts)

        if grads is None:
            utils.print_with_stamp('Building computation graph for gradients',
                                   self.name)
            grads = theano.grad(loss, params)
            if clip is not None:
                utils.print_with_stamp(
                    "Clipping gradients to norm %s" % (str(clip)), self.name)
                grads = lasagne.updates.total_norm_constraint(grads, clip)
            else:
                utils.print_with_stamp("No gradient clipping", self.name)

        utils.print_with_stamp("Computing parameter update rules", self.name)
        min_method_updt = LASAGNE_MIN_METHODS[self.min_method]
        grad_updates = min_method_updt(grads, params, **kwargs)

        outputs = [loss] + grads
        grad_updates = grad_updates + updts
        if polyak_averaging and polyak_averaging > 0.0:
            # create copy of params
            params_avg = [
                theano.shared(p.get_value(borrow=False,
                                          return_internal_type=True),
                              broadcastable=p.broadcastable,
                              name=p.name + '_copy') for p in params
            ]

            # prepare updates for polyak averaging
            t = theano.shared(np.array(1, dtype=floatX))
            b = polyak_averaging

            replace_dict = OrderedDict()
            for p, pp in zip(params, params_avg):
                grad_updates[pp] = ((b - b**t) * pp +
                                    (1 - b) * grad_updates[p]) / (1 - b**t)
                replace_dict[p] = pp
            grad_updates[t] = t + 1

            outputs[0] = theano.clone(loss, replace=replace_dict, strict=True)
            self.params_avg = params_avg

        else:
            if hasattr(self, 'params_avg'):
                delattr(self, 'params_avg')

        utils.print_with_stamp('Compiling function for loss', self.name)
        # converts inputs to shared variables to avoid repeated gpu transfers
        self.shared_inpts = [
            theano.shared(np.empty([1] * inp.ndim, dtype=inp.dtype),
                          name=inp.name) for inp in inputs
        ]

        givens_dict = dict(zip(inputs, self.shared_inpts))
        self.loss_fn = theano.function([],
                                       loss,
                                       updates=updts,
                                       on_unused_input='ignore',
                                       allow_input_downcast=True,
                                       givens=givens_dict,
                                       mode=compilation_mode)
        self.loss_fn.trust_input = trust_input

        utils.print_with_stamp("Compiling parameter updates", self.name)

        self.update_params_fn = theano.function([],
                                                outputs,
                                                updates=grad_updates,
                                                on_unused_input='ignore',
                                                allow_input_downcast=True,
                                                givens=givens_dict,
                                                mode=compilation_mode)
        self.update_params_fn.trust_input = trust_input

        self.n_evals = 0
        self.start_time = 0
        self.iter_time = 0
        self.params = params
        self.optimizer_state = [s for s in grad_updates.keys()]
Пример #10
0
    def get_output_for(self,
                       inputs,
                       accumulate_updates="warn",
                       recurrence_flags={},
                       **kwargs):
        """
        returns history of agent interaction with environment for given number of turns.
        
        parameters:
            inputs - [state init]  + [input_nonsequences] + [input_sequences]
                Each part is a list of theano expressions for layers in the order they were
                provided when creating this layer.
            recurrence_flags - a set of flags to be passed to the one step agent (anything that lasagne supports)
                e.g. {deterministic=True}
        returns:
            [state_sequences] + [output sequences] - a list of all states and all outputs sequences
            Shape of each such sequence is [batch, tick, shape_of_one_state_or_output...]
        """
        #aliases
        n_states = len(self.state_variables)
        n_state_inits = len(self.state_init)
        n_input_nonseq = len(self.input_nonsequences)
        n_input_seq = len(self.input_sequences)
        n_outputs = len(self.tracked_outputs)

        #slice inputs

        if self.mask_input is not None:
            mask, inputs = inputs[0], inputs[1:]

        initial_states_provided, nonsequences, sequences = unpack_list(
            inputs, [n_state_inits, n_input_nonseq, n_input_seq])

        # infer batch size
        if self.batch_size is not None:
            batch_size = self.batch_size
        elif len(inputs) != 0:
            batch_size = inputs[0].shape[0]
        else:
            raise ValueError(
                "Need to set batch_size explicitly for recurrence")

        # reshape sequences from [batch, time, ...] to [time,batch,...] to fit scan
        sequences = [seq.swapaxes(1, 0) for seq in sequences]

        #here we create outputs_info for scan
        ## initial states that are given as input
        initial_states_provided = OrderedDict(
            list(zip(self.state_init, initial_states_provided)))

        def get_initial_state(state_out_layer, batch_size=batch_size):
            """Pick dedicated initial state or create zeros of appropriate shape and dtype"""
            # if we have a dedicated init, use it
            if state_out_layer in initial_states_provided:
                initial_state = initial_states_provided[state_out_layer]
            # otherwise initialize with zeros
            else:
                dtype = get_layer_dtype(state_out_layer)
                initial_state = T.zeros(
                    (batch_size, ) + tuple(state_out_layer.output_shape[1:]),
                    dtype=dtype)

                #cast to non-broadcastable tensortype
                t_state = T.TensorType(dtype, (False, ) * initial_state.ndim)
                initial_state = t_state.convert_variable(initial_state)
                assert initial_state is not None  #if None, conversion failed. report ASAP

            return initial_state

        initial_states = list(map(get_initial_state, self.state_variables))

        #dummy values for initial outputs. They have no role in computation, but if nonsequences are present,
        # AND scan is not unrolled, the step function will not receive prev outputs as parameters, while
        # if unroll_scan, these parameters are present. we forcibly initialize outputs to prevent
        # complications during parameter parsing in step function below.
        initial_output_fillers = list(
            map(get_initial_state, self.tracked_outputs))

        outputs_info = initial_states + initial_output_fillers

        # recurrent step function
        def step(*args):

            sequence_slices, prev_states, prev_outputs, nonsequences = \
                unpack_list(args, [n_input_seq, n_states, n_outputs, n_input_nonseq])
            # make dicts of prev_states and inputs
            prev_states_dict = OrderedDict(
                zip(list(self.state_variables.keys()), prev_states))

            input_layers = list(
                chain(self.input_nonsequences.keys(),
                      self.input_sequences.keys()))
            assert len(input_layers) == len(nonsequences + sequence_slices)

            inputs_dict = OrderedDict(
                zip(input_layers, nonsequences + sequence_slices))

            # call one step recurrence
            new_states, new_outputs = self.get_one_step(
                prev_states_dict, inputs_dict, **recurrence_flags)

            #make sure output variable is of exactly the same type as corresponding input

            get_type = lambda tensor: T.TensorType(
                tensor.dtype,
                tensor.broadcastable,
                sparse_grad=getattr(tensor.type, "sparse_grad", False))

            new_states = [
                get_type(prev_state).convert_variable(
                    state.astype(prev_state.dtype))
                for (prev_state, state) in zip(prev_states, new_states)
            ]
            assert None not in new_states, "Some state variables has different dtype/shape from init ."

            new_outputs = [
                get_type(prev_out).convert_variable(out.astype(prev_out.dtype))
                for (prev_out, out) in zip(prev_outputs, new_outputs)
            ]
            assert None not in new_outputs, "Some of the tracked outputs has shape/dtype changing over time. Please report this."

            return new_states + new_outputs

        ###handling mask_input###

        #a step function that utilizes a mask
        def step_masked(mask_t, *args):
            #unpack arrays
            sequence_slices, prev_states, prev_outputs, nonsequences = \
                unpack_list(args, [n_input_seq, n_states, n_outputs, n_input_nonseq])

            #get regular step
            new_states_and_outputs = step(*args)
            old_states_and_outputs = prev_states + prev_outputs

            #if mask_t, return new ones, else return old ones
            def apply_mask(mask_t, new_state, old_state):
                assert new_state.ndim == old_state.ndim
                ndim = new_state.ndim
                #append dims to mask
                pattern = list(range(
                    mask_t.ndim)) + ['x'] * (ndim - mask_t.ndim)

                return T.switch(mask_t.dimshuffle(pattern), new_state,
                                old_state)

            next_states_and_outputs = [
                apply_mask(mask_t, new_state,
                           old_state) for new_state, old_state in zip(
                               new_states_and_outputs, old_states_and_outputs)
            ]

            return next_states_and_outputs

        if self.mask_input is not None:
            sequences = [mask.swapaxes(1, 0)] + sequences
            step_function = step_masked
        else:
            step_function = step

        #scan itself
        if self.unroll_scan:
            # call scan itself
            history = unroll_scan(step_function,
                                  sequences=sequences,
                                  outputs_info=outputs_info,
                                  non_sequences=nonsequences,
                                  n_steps=self.n_steps)
            #if explicitly asked to reset updates, do so
            if accumulate_updates == False:
                self.updates = OrderedUpdates()

        else:
            history, updates = theano.scan(step_function,
                                           sequences=sequences,
                                           outputs_info=outputs_info,
                                           non_sequences=nonsequences,
                                           n_steps=self.n_steps)

            if accumulate_updates in (True, 'warn'):
                self.updates += updates
            else:  #replace updates
                self.updates = updates

            #check if user received last updates
            if not self._updates_received and accumulate_updates == 'warn':
                warn(
                    "You called get_output from recurrence several times without gathering the updates.\n"
                    "(A) If you wanted to get two outputs from recurrence, use NOT\n"
                    ">>>out1 = get_output(rec[layer1])\n"
                    ">>>out2 = get_output(rec[layer2])\n"
                    "but instead:\n"
                    ">>>out1,out2 = get_output((rec[layer1],rec[layer2])) #or rec[layer1,layer2].\n"
                    "(B) If you want to run recurrence several times and accumulate updates from all runs,"
                    "use get_output(...,accumulate_updates=True) to silence the warning.\n"
                    "(C) If you want to get rid of old updates, use get_output(...,accumulate_updates=False)\n"
                )

            if len(self.updates) != 0:
                self._updates_received = False
                warn(
                    "Recurrent loop without unroll_scan got nonempty random state updates list. That happened"
                    " because there is some source of randomness (e.g. dropout) inside recurrent step graph."
                    " To compile such graph, one must either call .get_automatic_updates() right after .get_output"
                    " and pass these updates to a function when compiling theano.function.",
                    verbosity_level=2)

        # reordering from [time,batch,...] to [batch,time,...]
        history = [(var.swapaxes(1, 0) if var.ndim > 1 else var)
                   for var in check_list(history)]

        assert len(history) == n_states + n_outputs

        state_seqs, output_seqs = unpack_list(history, [n_states, n_outputs])

        # handle delayed_states
        # selectively shift state sequences by 1 tick into the past, padding with their initialisations
        for i in range(len(state_seqs)):
            if list(self.state_variables.keys())[i] in self.delayed_states:
                state_seq = state_seqs[i]
                state_init = initial_states[i]
                state_seq = T.concatenate(
                    [insert_dim(state_init, 1), state_seq[:, :-1]], axis=1)
                state_seqs[i] = state_seq

        #keys corresponding to output sequences. Note that we do not use self.keys() to correctly
        # handle cases where some variable is present in both state_variables and tracked_outputs
        output_keys = list(self.state_variables.keys()) + list(
            self.tracked_outputs)
        output_values = state_seqs + output_seqs
        assert len(output_keys) == len(output_values)
        return OrderedDict(zip(output_keys, output_values))
Пример #11
0
def scan(fn,
         sequences=None,
         outputs_info=None,
         non_sequences=None,
         n_steps=None,
         truncate_gradient=-1,
         go_backwards=False,
         mode=None,
         name=None,
         profile=False,
         allow_gc=None,
         strict=False):
    """
    This function constructs and applies a Scan op to the provided
    arguments.

    :param fn:
        ``fn`` is a function that describes the operations involved in one
        step of ``scan``. ``fn`` should construct variables describing the
        output of one iteration step. It should expect as input theano
        variables representing all the slices of the input sequences
        and previous values of the outputs, as well as all other arguments
        given to scan as ``non_sequences``. The order in which scan passes
        these variables to ``fn``  is the following :

        * all time slices of the first sequence
        * all time slices of the second sequence
        * ...
        * all time slices of the last sequence
        * all past slices of the first output
        * all past slices of the second otuput
        * ...
        * all past slices of the last output
        * all other arguments (the list given as `non_sequences` to
            scan)

        The order of the sequences is the same as the one in the list
        `sequences` given to scan. The order of the outputs is the same
        as the order of ``outputs_info``. For any sequence or output the
        order of the time slices is the same as the one in which they have
        been given as taps. For example if one writes the following :

        .. code-block:: python

            scan(fn, sequences = [ dict(input= Sequence1, taps = [-3,2,-1])
                                 , Sequence2
                                 , dict(input =  Sequence3, taps = 3) ]
                   , outputs_info = [ dict(initial =  Output1, taps = [-3,-5])
                                    , dict(initial = Output2, taps = None)
                                    , Output3 ]
                   , non_sequences = [ Argument1, Argument2])

        ``fn`` should expect the following arguments in this given order:

        #. ``Sequence1[t-3]``
        #. ``Sequence1[t+2]``
        #. ``Sequence1[t-1]``
        #. ``Sequence2[t]``
        #. ``Sequence3[t+3]``
        #. ``Output1[t-3]``
        #. ``Output1[t-5]``
        #. ``Output3[t-1]``
        #. ``Argument1``
        #. ``Argument2``

        The list of ``non_sequences`` can also contain shared variables
        used in the function, though ``scan`` is able to figure those
        out on its own so they can be skipped. For the clarity of the
        code we recommend though to provide them to scan. To some extend
        ``scan`` can also figure out other ``non sequences`` (not shared)
        even if not passed to scan (but used by `fn`). A simple example of
        this would be :

        .. code-block:: python

            import theano.tensor as TT
            W   = TT.matrix()
            W_2 = W**2
            def f(x):
                return TT.dot(x,W_2)

        The function is expected to return two things. One is a list of
        outputs ordered in the same order as ``outputs_info``, with the
        difference that there should be only one output variable per
        output initial state (even if no tap value is used). Secondly
        `fn` should return an update dictionary (that tells how to
        update any shared variable after each iteration step). The
        dictionary can optionally be given as a list of tuples. There is
        no constraint on the order of these two list, ``fn`` can return
        either ``(outputs_list, update_dictionary)`` or
        ``(update_dictionary, outputs_list)`` or just one of the two (in
        case the other is empty).

        To use ``scan`` as a while loop, the user needs to change the
        function ``fn`` such that also a stopping condition is returned.
        To do so, he/she needs to wrap the condition in an ``until`` class.
        The condition should be returned as a third element, for example:

        .. code-block:: python

            ...
            return [y1_t, y2_t], {x:x+1}, theano.scan_module.until(x < 50)

        Note that a number of steps (considered in here as the maximum
        number of steps ) is still required even though a condition is
        passed (and it is used to allocate memory if needed). = {}):

    :param sequences:
        ``sequences`` is the list of Theano variables or dictionaries
        describing the sequences ``scan`` has to iterate over. If a
        sequence is given as wrapped in a dictionary, then a set of optional
        information can be provided about the sequence. The dictionary
        should have the following keys:

        * ``input`` (*mandatory*) -- Theano variable representing the
          sequence.

        * ``taps`` -- Temporal taps of the sequence required by ``fn``.
          They are provided as a list of integers, where a value ``k``
          impiles that at iteration step ``t`` scan will pass to ``fn``
          the slice ``t+k``. Default value is ``[0]``

        Any Theano variable in the list ``sequences`` is automatically
        wrapped into a dictionary where ``taps`` is set to ``[0]``


    :param outputs_info:
        ``outputs_info`` is the list of Theano variables or dictionaries
        describing the initial state of the outputs computed
        recurrently. When this initial states are given as dictionary
        optional information can be provided about the output corresponding
        to these initial states. The dictionary should have the following
        keys:

        * ``initial`` -- Theano variable that represents the initial
          state of a given output. In case the output is not computed
          recursively (think of a map) and does not require an initial
          state this field can be skipped. Given that (only) the previous
          time step of the output is used by ``fn``, the initial state
          **should have the same shape** as the output and **should not
          involve a downcast** of the data type of the output. If multiple
          time taps are used, the initial state should have one extra
          dimension that should cover all the possible taps. For example
          if we use ``-5``, ``-2`` and ``-1`` as past taps, at step 0,
          ``fn`` will require (by an abuse of notation) ``output[-5]``,
          ``output[-2]`` and ``output[-1]``. This will be given by
          the initial state, which in this case should have the shape
          (5,)+output.shape. If this variable containing the initial
          state is called ``init_y`` then ``init_y[0]`` *corresponds to*
          ``output[-5]``. ``init_y[1]`` *correponds to* ``output[-4]``,
          ``init_y[2]`` corresponds to ``output[-3]``, ``init_y[3]``
          coresponds to ``output[-2]``, ``init_y[4]`` corresponds to
          ``output[-1]``. While this order might seem strange, it comes
          natural from splitting an array at a given point. Assume that
          we have a array ``x``, and we choose ``k`` to be time step
          ``0``. Then our initial state would be ``x[:k]``, while the
          output will be ``x[k:]``. Looking at this split, elements in
          ``x[:k]`` are ordered exactly like those in ``init_y``.
        * ``taps`` -- Temporal taps of the output that will be pass to
          ``fn``. They are provided as a list of *negative* integers,
          where a value ``k`` implies that at iteration step ``t`` scan
          will pass to ``fn`` the slice ``t+k``.

        ``scan`` will follow this logic if partial information is given:

        * If an output is not wrapped in a dictionary, ``scan`` will wrap
          it in one assuming that you use only the last step of the output
          (i.e. it makes your tap value list equal to [-1]).
        * If you wrap an output in a dictionary and you do not provide any
          taps but you provide an initial state it will assume that you are
          using only a tap value of -1.
        * If you wrap an output in a dictionary but you do not provide any
          initial state, it assumes that you are not using any form of
          taps.
        * If you provide a ``None`` instead of a variable or a empty
          dictionary ``scan`` assumes that you will not use any taps for
          this output (like for example in case of a map)

        If ``outputs_info`` is an empty list or None, ``scan`` assumes
        that no tap is used for any of the outputs. If information is
        provided just for a subset of the outputs an exception is
        raised (because there is no convention on how scan should map
        the provided information to the outputs of ``fn``)


    :param non_sequences:
        ``non_sequences`` is the list of arguments that are passed to
        ``fn`` at each steps. One can opt to exclude variable
        used in ``fn`` from this list as long as they are part of the
        computational graph, though for clarity we encourage not to do so.


    :param n_steps:
        ``n_steps`` is the number of steps to iterate given as an int
        or Theano scalar. If any of the input sequences do not have
        enough elements, scan will raise an error. If the *value is 0* the
        outputs will have *0 rows*. If the value is negative, ``scan``
        will run backwards in time. If the ``go_backwards`` flag is already
        set and also ``n_steps`` is negative, ``scan`` will run forward
        in time. If n_steps is not provided, ``scan`` will figure
        out the amount of steps it should run given its input sequences.


    :param truncate_gradient:
        ``truncate_gradient`` is the number of steps to use in truncated
        BPTT.  If you compute gradients through a scan op, they are
        computed using backpropagation through time. By providing a
        different value then -1, you choose to use truncated BPTT instead
        of classical BPTT, where you go for only ``truncate_gradient``
        number of steps back in time.


    :param go_backwards:
        ``go_backwards`` is a flag indicating if ``scan`` should go
        backwards through the sequences. If you think of each sequence
        as indexed by time, making this flag True would mean that
        ``scan`` goes back in time, namely that for any sequence it
        starts from the end and goes towards 0.


    :param name:
        When profiling ``scan``, it is crucial to provide a name for any
        instance of ``scan``. The profiler will produce an overall
        profile of your code as well as profiles for the computation of
        one step of each instance of ``scan``. The ``name`` of the instance
        appears in those profiles and can greatly help to disambiguate
        information.

    :param mode:
        It is recommended to leave this argument to None, especially
        when profiling ``scan`` (otherwise the results are not going to
        be accurate). If you prefer the computations of one step of
        ``scan`` to be done differently then the entire function, you
        can use this parameter to describe how the computations in this
        loop are done (see ``theano.function`` for details about
        possible values and their meaning).

    :param profile:
        Flag or string. If true, or different from the empty string, a
        profile object will be created and attached to the inner graph of
        scan. In case ``profile`` is True, the profile object will have the
        name of the scan instance, otherwise it will have the passed string.
        Profile object collect (and print) information only when running the
        inner graph with the new cvm linker ( with default modes,
        other linkers this argument is useless)

    :param allow_gc:
        Set the value of allow gc for the internal graph of scan.  If
        set to None, this will use the value of config.scan.allow_gc.

    :param strict:
        If true, all the shared variables used in ``fn`` must be provided as a
        part of ``non_sequences`` or ``sequences``. 

    :rtype: tuple
    :return: tuple of the form (outputs, updates); ``outputs`` is either a
             Theano variable or a list of Theano variables representing the
             outputs of ``scan`` (in the same order as in
             ``outputs_info``). ``updates`` is a subclass of dictionary
             specifying the
             update rules for all shared variables used in scan
             This dictionary should be passed to ``theano.function`` when
             you compile your function. The change compared to a normal
             dictionary is that we validate that keys are SharedVariable
             and addition of those dictionary are validated to be consistent.
    """
    # General observation : this code is executed only once, at creation
    # of the computational graph, so we don't yet need to be smart about
    # anything (to speed things up)

    ##
    # Step 1. Wrap all inputs in dictionaries and add default values
    ##

    # check if inputs are just single variables instead of lists
    def wrap_into_list(x):
        '''
        Wrap the input into a list if it is not already a list
        '''
        if x is None:
            return []
        elif not isinstance(x, (list, tuple)):
            return [x]
        else:
            return list(x)

    seqs = wrap_into_list(sequences)
    outs_info = wrap_into_list(outputs_info)

    # Make sure we get rid of numpy arrays or ints or anything like that
    # passed as inputs to scan
    non_seqs = []
    for elem in wrap_into_list(non_sequences):
        if not isinstance(elem, gof.Variable):
            non_seqs.append(tensor.as_tensor_variable(elem))
        else:
            non_seqs.append(elem)

    # If we provided a known number of steps ( before compilation)
    # and if that number is 1 or -1, then we can skip the Scan Op,
    # and just apply the inner function once
    # To do that we check here to see the nature of n_steps
    n_fixed_steps = None

    if isinstance(n_steps, (float, int)):
        n_fixed_steps = int(n_steps)
    else:
        try:
            n_fixed_steps = opt.get_scalar_constant_value(n_steps)
        except tensor.basic.NotScalarConstantError:
            n_fixed_steps = None

    # Check n_steps is an int
    if (hasattr(n_steps, 'dtype') and
        str(n_steps.dtype)[:3] not in ('uin', 'int')):
        raise ValueError(' n_steps must be an int. dtype provided '
                         'is %s' % n_steps.dtype)

    # compute number of sequences and number of outputs
    n_seqs = len(seqs)
    n_outs = len(outs_info)

    return_steps = OrderedDict()
    # wrap sequences in a dictionary if they are not already dictionaries
    for i in xrange(n_seqs):
        if not isinstance(seqs[i], dict):
            seqs[i] = OrderedDict([('input', seqs[i]), ('taps', [0])])
        elif seqs[i].get('taps', None) is not None:
            seqs[i]['taps'] = wrap_into_list(seqs[i]['taps'])
        elif seqs[i].get('taps', None) is None:
            # seqs dictionary does not have the ``taps`` key
            seqs[i]['taps'] = [0]

    # wrap outputs info in a dictionary if they are not already in one
    for i in xrange(n_outs):
        if outs_info[i] is not None:
            if isinstance(outs_info[i], dict):
                # DEPRECATED :
                if outs_info[i].get('return_steps', None) is not None:
                    raise ValueError(
                            "Using `return_steps` has been deprecated. "
                            "Simply select the entries you need using a "
                            "subtensor. Scan will optimize memory "
                            "consumption, so do not worry about that.")
                # END

            if not isinstance(outs_info[i], dict):
                # by default any output has a tap value of -1
                outs_info[i] = OrderedDict([('initial', outs_info[i]), ('taps', [-1])])
            elif (outs_info[i].get('initial', None) is None and
                    outs_info[i].get('taps', None) is not None):
                # ^ no initial state but taps provided
                raise ValueError(('If you are using slices of an output '
                                  'you need to provide a initial state '
                                  'for it'), outs_info[i])
            elif (outs_info[i].get('initial', None) is not None and
                  outs_info[i].get('taps', None) is None):
                # ^ initial state but taps not provided
                if 'taps' in outs_info[i]:
                    # ^ explicitly provided a None for taps
                    _logger.warning('Output %s ( index %d) has a initial '
                            'state but taps is explicitly set to None ',
                             getattr(outs_info[i]['initial'], 'name', 'None'),
                             i)
                outs_info[i]['taps'] = [-1]
        else:
            # if a None is provided as the output info we replace it
            # with an empty OrdereDict() to simplify handling
            outs_info[i] = OrderedDict()

    ##
    # Step 2. Generate inputs and outputs of the inner functions
    # for compiling a dummy function (Iteration #1)
    ##

    # create theano inputs for the recursive function
    # note : this is a first batch of possible inputs that will
    #        be compiled in a dummy function; we used this dummy
    #        function to detect shared variables and their updates
    #        and to construct a new and complete list of inputs and
    #        outputs

    n_seqs = 0
    scan_seqs = []     # Variables passed as inputs to the scan op
    inner_seqs = []    # Variables passed as inputs to the inner function
    inner_slices = []  # Actual slices if scan is removed from the picture
    # go through sequences picking up time slices as needed
    for i, seq in enumerate(seqs):
        # Note that you can have something like no taps for
        # a sequence, though is highly unlikely in practice
        if 'taps' in seq:
            # go through the indicated slice
            mintap = numpy.min(seq['taps'])
            maxtap = numpy.max(seq['taps'])
            for k in seq['taps']:
                # create one slice of the input
                # Later on, if we decide not to use scan because we are
                # going for just one step, it makes things easier if we
                # compute the correct outputs here. This way we can use
                # the output of the lambda expression directly to replace
                # the output of scan.

                # If not we need to use copies, that will be replaced at
                # each frame by the corresponding slice
                actual_slice = seq['input'][k - mintap]
                _seq_val = tensor.as_tensor_variable(seq['input'])
                _seq_val_slice = _seq_val[k - mintap]
                nw_slice = _seq_val_slice.type()

                # Try to transfer test_value to the new variable
                if config.compute_test_value != 'off':
                    try:
                        nw_slice.tag.test_value = gof.Op._get_test_value(
                            _seq_val_slice)
                    except AttributeError as e:
                        if config.compute_test_value != 'ignore':
                            # No need to print a warning or raise an error now,
                            # it will be done when fn will be called.
                            _logger.info(('Cannot compute test value for '
                                'the inner function of scan, input value '
                                'missing %s'), e)

                # Add names to slices for debugging and pretty printing ..
                # that is if the input already has a name
                if getattr(seq['input'], 'name', None) is not None:
                    if k > 0:
                        nw_name = seq['input'].name + '[t+%d]' % k
                    elif k == 0:
                        nw_name = seq['input'].name + '[t]'
                    else:
                        nw_name = seq['input'].name + '[t%d]' % k
                    nw_slice.name = nw_name

                # We cut the sequence such that seq[i] to correspond to
                # seq[i-k]
                if maxtap < 0:
                    offset = abs(maxtap)
                else:
                    offset = 0
                if maxtap == mintap and maxtap != 0:
                    if maxtap < 0:
                        nw_seq = seq['input'][:maxtap]
                    else:
                        nw_seq = seq['input'][maxtap:]
                elif maxtap - k != 0:
                    nw_seq = seq['input'][offset + k - mintap: -(maxtap - k)]
                else:
                    nw_seq = seq['input'][offset + k - mintap:]
                if go_backwards:
                    nw_seq = nw_seq[::-1]

                scan_seqs.append(nw_seq)
                inner_seqs.append(nw_slice)
                inner_slices.append(actual_slice)
                n_seqs += 1

    # Since we've added all sequences now we need to level them up based on
    # n_steps or their different shapes
    lengths_vec = []
    for seq in scan_seqs:
        lengths_vec.append(seq.shape[0])

    if not scan_utils.isNaN_or_Inf_or_None(n_steps):
        # ^ N_steps should also be considered
        lengths_vec.append(tensor.as_tensor(n_steps))

    if len(lengths_vec) == 0:
        # ^ No information about the number of steps
        raise ValueError(' No information about the number of steps '
                         'provided. Either provide a value for '
                         'n_steps argument of scan or provide an input '
                         'sequence')

    # If the user has provided the number of steps, do that regardless ( and
    # raise an error if the sequences are not long enough )
    if scan_utils.isNaN_or_Inf_or_None(n_steps):
        actual_n_steps = lengths_vec[0]
        for contestant in lengths_vec[1:]:
            actual_n_steps = tensor.minimum(actual_n_steps, contestant)
    else:
        actual_n_steps = tensor.as_tensor(n_steps)

    # Add names -- it helps a lot when debugging

    for (nw_seq, seq) in zip(scan_seqs, seqs):
        if getattr(seq['input'], 'name', None) is not None:
            nw_seq.name = seq['input'].name + '[%d:]' % k

    scan_seqs = [seq[:actual_n_steps] for seq in scan_seqs]
    # Conventions :
    #   mit_mot = multiple input taps, multiple output taps ( only provided
    #             by the gradient function )
    #   mit_sot = multiple input taps, single output tap (t + 0)
    #   sit_sot = single input tap, single output tap (t + 0)
    #   nit_sot = no input tap, single output tap (t + 0)

    # MIT_MOT -- not provided by the user only by the grad function
    n_mit_mot = 0
    n_mit_mot_outs = 0
    mit_mot_scan_inputs = []
    mit_mot_inner_inputs = []
    mit_mot_inner_outputs = []
    mit_mot_out_slices = []
    mit_mot_rightOrder = []

    # SIT_SOT -- provided by the user
    n_mit_sot = 0
    mit_sot_scan_inputs = []
    mit_sot_inner_inputs = []
    mit_sot_inner_slices = []
    mit_sot_inner_outputs = []
    mit_sot_return_steps = OrderedDict()
    mit_sot_tap_array = []
    mit_sot_rightOrder = []

    n_sit_sot = 0
    sit_sot_scan_inputs = []
    sit_sot_inner_inputs = []
    sit_sot_inner_slices = []
    sit_sot_inner_outputs = []
    sit_sot_return_steps = OrderedDict()
    sit_sot_rightOrder = []

    # go through outputs picking up time slices as needed
    for i, init_out in enumerate(outs_info):
        # Note that our convention dictates that if an output uses
        # just the previous time step, as a initial state we will only
        # provide a tensor of the same dimension as one time step; This
        # makes code much cleaner for those who do not use taps. Otherwise
        # they would always had to shape_padleft the initial state ..
        # which is ugly
        if init_out.get('taps', None) == [-1]:

            actual_arg = init_out['initial']
            if not isinstance(actual_arg, tensor.Variable):
                actual_arg = tensor.as_tensor_variable(actual_arg)
            arg = safe_new(actual_arg)
            if isinstance(arg, tensor.Constant):
                # safe new returns a clone of the constants, but that is not
                # what we need for initial states
                arg = arg.type()

            # Try to transfer test_value to the new variable
            if config.compute_test_value != 'off':
                try:
                    arg.tag.test_value = gof.Op._get_test_value(actual_arg)
                except AttributeError as e:
                    if config.compute_test_value != 'ignore':
                        # No need to print a warning or raise an error now,
                        # it will be done when fn will be called.
                        _logger.info(('Cannot compute test value for the '
                            'inner function of scan, input value missing %s'),
                                     e)

            if getattr(init_out['initial'], 'name', None) is not None:
                arg.name = init_out['initial'].name + '[t-1]'

            # We need now to allocate space for storing the output and copy
            # the initial state over. We do this using the expand function
            # defined in scan utils
            sit_sot_scan_inputs.append(
                scan_utils.expand(
                    tensor.unbroadcast(
                        tensor.shape_padleft(actual_arg), 0),
                    actual_n_steps
                ))

            sit_sot_inner_slices.append(actual_arg)
            if i in return_steps:
                sit_sot_return_steps[n_sit_sot] = return_steps[i]
            sit_sot_inner_inputs.append(arg)
            sit_sot_rightOrder.append(i)
            n_sit_sot += 1

        elif init_out.get('taps', None):

            if numpy.any(numpy.array(init_out.get('taps', [])) > 0):
                # Make sure we do not have requests for future values of a
                # sequence we can not provide such values
                raise ValueError('Can not use future taps of outputs',
                                    init_out)
            # go through the taps
            mintap = abs(numpy.min(init_out['taps']))
            mit_sot_tap_array.append(init_out['taps'])
            idx_offset = abs(numpy.min(init_out['taps']))
            # Sequence
            mit_sot_scan_inputs.append(
                scan_utils.expand(init_out['initial'][:mintap],
                                 actual_n_steps))

            if i in return_steps:
                mit_sot_return_steps[n_mit_sot] = return_steps[i]
            mit_sot_rightOrder.append(i)
            n_mit_sot += 1
            for k in init_out['taps']:
                # create a new slice
                actual_nw_slice = init_out['initial'][k + mintap]
                _init_out_var = tensor.as_tensor_variable(init_out['initial'])
                _init_out_var_slice = _init_out_var[k + mintap]
                nw_slice = _init_out_var_slice.type()

                # Try to transfer test_value to the new variable
                if config.compute_test_value != 'off':
                    try:
                        nw_slice.tag.test_value = gof.Op._get_test_value(
                            _init_out_var_slice)
                    except AttributeError as e:
                        if config.compute_test_value != 'ignore':
                            # No need to print a warning or raise an error now,
                            # it will be done when fn will be called.
                            _logger.info(('Cannot compute test value for '
                                'the inner function of scan, input value '
                                'missing. %s'), e)

                # give it a name or debugging and pretty printing
                if getattr(init_out['initial'], 'name', None) is not None:
                    if k > 0:
                        nw_slice.name = (init_out['initial'].name +
                                            '[t+%d]' % k)
                    elif k == 0:
                        nw_slice.name = init_out['initial'].name + '[t]'
                    else:
                        nw_slice.name = (init_out['initial'].name +
                                            '[t%d]' % k)
                mit_sot_inner_inputs.append(nw_slice)
                mit_sot_inner_slices.append(actual_nw_slice)
        # NOTE: there is another case, in which we do not want to provide
        #      any previous value of the output to the inner function (i.e.
        #      a map); in that case we do not have to do anything ..

    # Re-order args
    max_mit_sot = numpy.max([-1] + mit_sot_rightOrder) + 1
    max_sit_sot = numpy.max([-1] + sit_sot_rightOrder) + 1
    n_elems = numpy.max([max_mit_sot, max_sit_sot])
    _ordered_args = [[] for x in xrange(n_elems)]
    offset = 0
    for idx in xrange(n_mit_sot):
        n_inputs = len(mit_sot_tap_array[idx])
        if n_fixed_steps in [1, -1]:
            _ordered_args[mit_sot_rightOrder[idx]] = \
                            mit_sot_inner_slices[offset:offset + n_inputs]
        else:
            _ordered_args[mit_sot_rightOrder[idx]] = \
                            mit_sot_inner_inputs[offset:offset + n_inputs]
        offset += n_inputs

    for idx in xrange(n_sit_sot):
        if n_fixed_steps in [1, -1]:
            _ordered_args[sit_sot_rightOrder[idx]] = \
                                        [sit_sot_inner_slices[idx]]
        else:
            _ordered_args[sit_sot_rightOrder[idx]] = \
                                        [sit_sot_inner_inputs[idx]]

    ordered_args = []
    for ls in _ordered_args:
        ordered_args += ls
    if n_fixed_steps in [1, -1]:
        args = (inner_slices +
                ordered_args +
                non_seqs)

    else:
        args = (inner_seqs +
                ordered_args +
                non_seqs)

    # add only the non-shared variables and non-constants to the arguments of
    # the dummy function [ a function should not get shared variables or
    # constants as input ]
    dummy_args = [arg for arg in args
                  if (not isinstance(arg, SharedVariable) and
                      not isinstance(arg, tensor.Constant))]
    # when we apply the lambda expression we get a mixture of update rules
    # and outputs that needs to be separated

    condition, outputs, updates = scan_utils.get_updates_and_outputs(fn(*args))
    if condition is not None:
        as_while = True
    else:
        as_while = False
    ##
    # Step 3. Check if we actually need scan and remove it if we don't
    ##

    if n_fixed_steps in [1, -1]:
        # We do not need to use the scan op anymore, so we can just return
        # the outputs and updates we have
        if condition is not None:
            _logger.warning(('When the number of steps is fixed and equal '
                    'to 1, the provided stopping condition, ',
                    str(condition), ' is ignored'))

        for pos, inner_out in enumerate(outputs):
            # we need to see if we need to pad our sequences with an
            # unbroadcastable dimension; case example : we return an
            # output for which we want all intermediate. If n_steps is 1
            # then, if we return the output as given by the innner function
            # this will represent only a slice and it will have one
            # dimension less.
            if (isinstance(inner_out.type, tensor.TensorType) and
                return_steps.get(pos, 0) != 1):
                outputs[pos] = tensor.unbroadcast(
                    tensor.shape_padleft(inner_out), 0)
        if len(outputs) == 1:
            outputs = outputs[0]

        return (outputs, updates)

    ##
    # Step 4. Compile the dummy function
    ##

    # We can now compile a dummy function just to see what shared variable
    # we have and what are their update rules (note that the user has
    # the option not to pass the shared variable to scan, so we need to
    # pick them manually and add them to scan)
    # make the compilation as fast as possible by not applying any
    # optimization or conversion to C [ note this region is not important
    # for performance so we can do stuff as unoptimal as we wish ]

    # extract still missing inputs (there still might be so) and add them
    # as non sequences at the end of our args
    fake_nonseqs = [x.type() for x in non_seqs]
    fake_outputs = scan_utils.clone(outputs,
                                    replace=OrderedDict(zip(non_seqs,
                                                     fake_nonseqs)))
    all_inputs = itertools.ifilter(
        lambda x: (isinstance(x, gof.Variable) and
                   not isinstance(x, SharedVariable) and
                   not isinstance(x, gof.Constant)),
        gof.graph.inputs(fake_outputs))
    extra_inputs = [x for x in all_inputs if x not in args + fake_nonseqs]
    non_seqs += extra_inputs
    # Note we do not use all_inputs directly since the order of variables
    # in args is quite important
    dummy_args += extra_inputs

    dummy_outs = outputs
    if condition is not None:
        dummy_outs.append(condition)
    dummy_f = function(dummy_args,
                       dummy_outs,
                       updates=updates,
                       mode=compile.mode.Mode(linker='py',
                                              optimizer=None),
                       on_unused_input='ignore',
                       profile=False)

    ##
    # Step 5. Re-arange inputs of scan into a more strict order
    ##

    # Step 5.0 Check the outputs of the dummy function to see if they
    # match with user provided data

    # if the number of outputs to the function does not match the number of
    # assumed outputs until now (provided by the user) there can be
    # only one explanation: No information is provided for any of the
    # outputs (i.e. we are dealing with a map)
    tmp_dummy_f_outs = len(dummy_f.maker.outputs)
    if as_while:
        tmp_dummy_f_outs -= 1
    if not (tmp_dummy_f_outs == n_outs or outs_info == []):
        raise ValueError('Please provide None as outputs_info for '
                         'any output that does not feed back into '
                         'scan (i.e. it behaves like a map) ')

    if outs_info == []:
        n_outs = len(dummy_f.maker.outputs)
        if as_while:
            n_outs = n_outs - 1
        outs_info = [OrderedDict() for x in xrange(n_outs)]

    # Step 5.1 Outputs with taps different then -1

    for i, out in enumerate(outs_info):
        if 'taps' in out and out['taps'] != [-1]:
            mit_sot_inner_outputs.append(outputs[i])

    # Step 5.2 Outputs with tap equal to -1
    for i, out in enumerate(outs_info):
        if 'taps' in out and out['taps'] == [-1]:
            sit_sot_inner_outputs.append(outputs[i])

    # Step 5.3 Outputs that correspond to update rules of shared variables
    givens = OrderedDict()
    n_shared_outs = 0
    shared_scan_inputs = []
    shared_inner_inputs = []
    shared_inner_outputs = []
    sit_sot_shared = []
    for input in dummy_f.maker.expanded_inputs:
        if isinstance(input.variable, SharedVariable) and input.update:
            new_var = safe_new(input.variable)
            if getattr(input.variable, 'name', None) is not None:
                new_var.name = input.variable.name + '_copy'
            if isinstance(new_var.type, ops.expandable_types):
                sit_sot_inner_inputs.append(new_var)
                sit_sot_scan_inputs.append(
                    scan_utils.expand(
                        tensor.unbroadcast(
                            tensor.shape_padleft(input.variable), 0),
                        actual_n_steps))
                tensor_update = tensor.as_tensor_variable(input.update)
                sit_sot_inner_outputs.append(tensor_update)
                # Not that pos is not a negative index. The sign of pos is used
                # as a flag to indicate if this output should be part of the
                # update rules or part of the standard outputs of scan.
                # If `pos` is positive than it corresponds to the standard
                # outputs of scan and it refers to output of index `pos`. If `pos`
                # is negative that it corresponds to update rules of scan and it
                # refers to update rule of index -1 - `pos`.
                sit_sot_rightOrder.append(-1 - len(sit_sot_shared))
                sit_sot_shared.append(input.variable)
                givens[input.variable] = new_var

            else:
                shared_inner_inputs.append(new_var)
                shared_scan_inputs.append(input.variable)
                shared_inner_outputs.append(input.update)
                givens[input.variable] = new_var
                n_shared_outs += 1
    n_sit_sot = len(sit_sot_inner_inputs)
    # Step 5.4 Outputs with no taps used in the input
    n_nit_sot = 0
    nit_sot_inner_outputs = []
    nit_sot_return_steps = OrderedDict()
    nit_sot_rightOrder = []
    for i, out in enumerate(outs_info):
        if not 'taps' in out:
            nit_sot_inner_outputs.append(outputs[i])
            if i in return_steps:
                nit_sot_return_steps[n_nit_sot] = return_steps[i]
            nit_sot_rightOrder.append(i)
            n_nit_sot += 1

    # Step 5.5 all other arguments including extra inputs
    other_scan_args = []
    other_inner_args = []

    other_scan_args += [arg for arg in non_seqs
                        if (not isinstance(arg, SharedVariable) and
                            not isinstance(arg, tensor.Constant))]

    # Step 5.6 all shared variables with no update rules
    other_inner_args += [safe_new(arg, '_copy') for arg in non_seqs
                         if (not isinstance(arg, SharedVariable) and
                             not isinstance(arg, tensor.Constant))]

    givens.update(OrderedDict(zip(other_scan_args, other_inner_args)))

    if strict:
        non_seqs_set = set(non_sequences if non_sequences != None else [])

        other_shared_scan_args = [arg.variable for arg
                            in dummy_f.maker.expanded_inputs
                            if (isinstance(arg.variable, SharedVariable) and
                                not arg.update and
                                arg.variable in non_seqs_set)]
        other_shared_inner_args = [safe_new(arg.variable, '_copy') for arg
                            in dummy_f.maker.expanded_inputs
                            if (isinstance(arg.variable, SharedVariable) and
                                not arg.update and
                                arg.variable in non_seqs_set)]
    else:
        other_shared_scan_args = [arg.variable for arg
                            in dummy_f.maker.expanded_inputs
                            if (isinstance(arg.variable, SharedVariable) and
                                not arg.update)]
        other_shared_inner_args = [safe_new(arg.variable, '_copy') for arg
                            in dummy_f.maker.expanded_inputs
                            if (isinstance(arg.variable, SharedVariable) and
                                not arg.update)]
    givens.update(OrderedDict(zip(other_shared_scan_args,
                           other_shared_inner_args)))

    ##
    # Step 6. Re-order the outputs and clone them replacing things
    # using the givens
    ##
    inner_inputs = (inner_seqs +
                    mit_mot_inner_inputs +
                    mit_sot_inner_inputs +
                    sit_sot_inner_inputs +
                    shared_inner_inputs +
                    other_shared_inner_args +
                    other_inner_args)

    inner_outs = (mit_mot_inner_outputs +
                  mit_sot_inner_outputs +
                  sit_sot_inner_outputs +
                  nit_sot_inner_outputs +
                  shared_inner_outputs)
    if condition is not None:
        inner_outs.append(condition)
    # Cuda is imported here, instead of being imported on top of the file
    # because forces on the user some dependencies that we might do not want
    # to. Currently we are working on removing the dependencies on sandbox
    # code completeley.
    from theano.sandbox import cuda
    if cuda.cuda_available:
        # very often we end up in this situation when we want to
        # replace w with w_copy, where w is CudaNdarray
        # and w_copy is TensorType. This is caused because shared
        # variables are put on GPU right aways >:| ,
        new_givens = OrderedDict()

        for w, w_copy in givens.iteritems():
            if (isinstance(w.type, cuda.CudaNdarrayType)
                and isinstance(w_copy.type, tensor.TensorType)):
                for o in inner_outs:
                    new_givens = traverse(o, w, w_copy, new_givens)
            else:
                new_givens[w] = w_copy
    else:
        new_givens = givens

    new_outs = scan_utils.clone(inner_outs, replace=new_givens)

    ##
    # Step 7. Create the Scan Op
    ##

    tap_array = mit_sot_tap_array + [[-1] for x in xrange(n_sit_sot)]
    if allow_gc is None:
        allow_gc = config.scan.allow_gc
    info = OrderedDict()

    info['tap_array'] = tap_array
    info['n_seqs'] = n_seqs
    info['n_mit_mot'] = n_mit_mot
    info['n_mit_mot_outs'] = n_mit_mot_outs
    info['mit_mot_out_slices'] = mit_mot_out_slices
    info['n_mit_sot'] = n_mit_sot
    info['n_sit_sot'] = n_sit_sot
    info['n_shared_outs'] = n_shared_outs
    info['n_nit_sot'] = n_nit_sot
    info['truncate_gradient'] = truncate_gradient
    info['name'] = name
    info['mode'] = mode
    info['destroy_map'] = OrderedDict()
    info['gpu'] = False
    info['as_while'] = as_while
    info['profile'] = profile
    info['allow_gc'] = allow_gc
    info['strict'] = strict
    if strict:
        warnings.warn('In the strict mode, all neccessary shared variables '
                      'must be passed as a part of non_sequences', Warning)

    local_op = scan_op.Scan(inner_inputs, new_outs, info)

    ##
    # Step 8. Compute the outputs using the scan op
    ##
    _scan_inputs = (scan_seqs +
                    mit_mot_scan_inputs +
                    mit_sot_scan_inputs +
                    sit_sot_scan_inputs +
                    shared_scan_inputs +
                    [actual_n_steps for x in xrange(n_nit_sot)] +
                    other_shared_scan_args +
                    other_scan_args)

    scan_inputs = []
    for arg in [actual_n_steps] + _scan_inputs:
        try:
            arg = tensor.as_tensor_variable(arg)
        except TypeError:
            # This happens for Random States for e.g. but it is a good way
            # to make sure no input is a cuda ndarrays
            pass
        scan_inputs += [arg]
    scan_outs = local_op(*scan_inputs)
    if type(scan_outs) not in (list, tuple):
        scan_outs = [scan_outs]
    ##
    # Step 9. Figure out which outs are update rules for shared variables
    # and so on ...
    ##

    update_map = OrderedUpdates()

    def remove_dimensions(outs, steps_return, offsets=None):
        out_ls = []
        for idx, out in enumerate(outs):
            if idx in steps_return:
                if steps_return[idx] > 1:
                    out_ls.append(out[-steps_return[idx]:])
                else:
                    out_ls.append(out[-1])
            else:
                if offsets is None:
                    out_ls.append(out)
                else:
                    out_ls.append(out[offsets[idx]:])
        return out_ls

    offset = n_mit_mot
    offsets = [abs(numpy.min(x)) for x in mit_sot_tap_array]
    mit_sot_outs = remove_dimensions(
        scan_outs[offset:offset + n_mit_sot],
        mit_sot_return_steps,
        offsets)

    offset += n_mit_sot
    offsets = [1 for x in xrange(n_sit_sot)]
    sit_sot_outs = remove_dimensions(
        scan_outs[offset:offset + n_sit_sot],
        sit_sot_return_steps,
        offsets)

    offset += n_sit_sot
    nit_sot_outs = remove_dimensions(
        scan_outs[offset:offset + n_nit_sot],
        nit_sot_return_steps)

    offset += n_nit_sot
    for idx, update_rule in enumerate(
                scan_outs[offset:offset + n_shared_outs]):
        update_map[shared_scan_inputs[idx]] = update_rule

    _scan_out_list = (mit_sot_outs +
                      sit_sot_outs +
                      nit_sot_outs)
    # Step 10. I need to reorder the outputs to be in the order expected by
    # the user
    rightOrder = (mit_sot_rightOrder +
                  sit_sot_rightOrder +
                  nit_sot_rightOrder)
    scan_out_list = [None] * len(rightOrder)
    for idx, pos in enumerate(rightOrder):
        if pos >= 0:
            scan_out_list[pos] = _scan_out_list[idx]
        else:
            # Not that pos is not a negative index. The sign of pos is used
            # as a flag to indicate if this output should be part of the
            # update rules or part of the standard outputs of scan.
            # If `pos` is positive than it corresponds to the standard
            # outputs of scan and it refers to output of index `pos`. If `pos`
            # is negative that it corresponds to update rules of scan and it
            # refers to update rule of index -1 - `pos`.
            update_map[sit_sot_shared[abs(pos) - 1]] = _scan_out_list[idx][-1]
    scan_out_list = [x for x in scan_out_list if x is not None]
    if len(scan_out_list) == 1:
        scan_out_list = scan_out_list[0]
    elif len(scan_out_list) == 0:
        scan_out_list = None
    return (scan_out_list, update_map)
Пример #12
0
        try:
            arg = tensor.as_tensor_variable(arg)
        except TypeError:
            # This happens for Random States for e.g. but it is a good way
            # to make sure no input is a cuda ndarrays
            pass
        scan_inputs += [arg]
    scan_outs = local_op(*scan_inputs)
    if type(scan_outs) not in (list, tuple):
        scan_outs = [scan_outs]
    ##
    # Step 9. Figure out which outs are update rules for shared variables
    # and so on ...
    ##

    update_map = OrderedUpdates()

    def remove_dimensions(outs, steps_return, offsets=None):
        out_ls = []
        for idx, out in enumerate(outs):
            if idx in steps_return:
                if steps_return[idx] > 1:
                    out_ls.append(out[-steps_return[idx]:])
                else:
                    out_ls.append(out[-1])
            else:
                if offsets is None:
                    out_ls.append(out)
                else:
                    out_ls.append(out[offsets[idx]:])
        return out_ls
    def test_updates_init(self):
        self.assertRaises(TypeError, OrderedUpdates, dict(d=3))

        sv = theano.shared('asdf')
        OrderedUpdates({sv:3})
Пример #14
0
def train(dim_word=100,  # word vector dimensionality
          dim=1000,  # the number of LSTM units
          encoder='gru',
          decoder='gru_cond',
          n_words_src=30000,
          n_words=30000,
          max_epochs=5000,
          finish_after=10000000,  # finish after this many updates
          dispFreq=100,
          decay_c=0.,  # L2 regularization penalty
          alpha_c=0.,  # alignment regularization
          clip_c=-1.,  # gradient clipping threshold
          lrate=1.,  # learning rate
          maxlen=100,  # maximum length of the description
          optimizer='rmsprop',
          batch_size=16,
          valid_batch_size=80,
          saveto='model.npz',
          saveFreq=1000,  # save the parameters after every saveFreq updates
          validFreq=2500,
          dev_bleu_freq=20000,
          datasets=('/data/lisatmp3/chokyun/europarl/europarl-v7.fr-en.en.tok',
                    '/data/lisatmp3/chokyun/europarl/europarl-v7.fr-en.fr.tok'),
          valid_datasets=('./data/dev/dev_en.tok',
                          './data/dev/dev_fr.tok'),
          small_train_datasets=('./data/train/small_en-fr.en','./data/train/small_en-fr.fr',
                                './data/train/small_en-fr.fr'),
          use_dropout=False,
          reload_=False,
          overwrite=False,
          preload='',

          # Options below are from v-yanfa
          dump_before_train=True,
          plot_graph=None,
          vocab_filenames=('./data/dic/filtered_dic_en-fr.en.pkl',
                           './data/dic/filtered_dic_en-fr.fr.pkl'),
          map_filename='./data/dic/mapFullVocab2Top1MVocab.pkl',
          lr_discount_freq=80000,

          # Options of deeper encoder and decoder
          n_encoder_layers=1,
          n_decoder_layers=1,
          encoder_many_bidirectional=True,

          attention_layer_id=0,
          unit='gru',
          residual_enc=None,
          residual_dec=None,
          use_zigzag=False,

          initializer='orthogonal',
          given_embedding=None,

          dist_type=None,
          dist_recover_lr_iter=False,

          unit_size=2,
          cond_unit_size=2,

          given_imm=False,
          dump_imm=False,
          shuffle_data=False,

          decoder_all_attention=False,
          average_context=False,
          task='en-fr',

          fine_tune_patience=8,
          nccl = False,
          src_vocab_map_file = None,
          tgt_vocab_map_file = None,

          trg_attention_layer_id=None,
          fix_dp_bug = False,
          temperature = 1.0,
          scale=1.0,
          gate_dropout=0.0,
          ):
    model_options = locals().copy()

    # Set distributed computing environment
    worker_id = 0
    if dist_type == 'mv':
        try:
            import multiverso as mv
        except ImportError:
            from . import multiverso_ as mv

        worker_id = mv.worker_id()
    elif dist_type == 'mpi_reduce':
        from mpi4py import MPI
        mpi_communicator = MPI.COMM_WORLD
        worker_id = mpi_communicator.Get_rank()
        workers_cnt = mpi_communicator.Get_size()

        if nccl:
            nccl_comm = init_nccl_env(mpi_communicator)

    print 'Use {}, worker id: {}'.format('multiverso' if dist_type == 'mv' else 'mpi' if dist_recover_lr_iter else 'none', worker_id)
    sys.stdout.flush()

    # Set logging file
    set_logging_file('log/complete/e{}d{}_res{}_att{}_worker{}_task{}_{}.txt'.format(
        n_encoder_layers, n_decoder_layers, residual_enc, attention_layer_id,
        worker_id, task, time.strftime('%m-%d-%H-%M-%S'),
    ))

    log('''\
Start Time = {}
'''.format(
        time.strftime('%c'),
    ))

    # Model options: load and save
    message('Top options:')
    pprint(model_options)
    pprint(model_options, stream=get_logging_file())
    message('Done')
    sys.stdout.flush()

    #load_options(model_options, reload_, preload, src_vocab_map_file and tgt_vocab_map_file)
    check_options(model_options)
    model_options['cost_normalization'] = 1
    ada_alpha = 0.95
    if dist_type == 'mpi_reduce':
        model_options['cost_normalization'] = workers_cnt

    message('Model options:')
    pprint(model_options)
    pprint(model_options, stream=get_logging_file())
    message()

    print 'Loading data'
    log('\n\n\nStart to prepare data\n@Current Time = {}'.format(time.time()))
    sys.stdout.flush()

    dataset_src, dataset_tgt = datasets[0], datasets[1]

    if shuffle_data:
        text_iterator_list = [None for _ in range(10)]
        text_iterator = None
    else:
        text_iterator_list = None

        text_iterator = TextIterator(
            dataset_src, dataset_tgt,
            vocab_filenames[0], vocab_filenames[1],
            batch_size,n_words_src, n_words,maxlen
        )

    valid_iterator = TextIterator(
        valid_datasets[0], valid_datasets[1],
        vocab_filenames[0], vocab_filenames[1],
        valid_batch_size, n_words_src, n_words
    )

    small_train_iterator = TextIterator(
        small_train_datasets[0], small_train_datasets[1],
        vocab_filenames[0], vocab_filenames[1],
        valid_batch_size, n_words_src, n_words
    )

    print 'Building model'
    model = NMTModel(model_options)

    params = model.initializer.init_params()

    # Reload parameters
    if reload_ and os.path.exists(preload):
        print 'Reloading model parameters'
        load_params(preload, params, src_map_file = src_vocab_map_file, tgt_map_file = tgt_vocab_map_file)
    sys.stdout.flush()

    # Given embedding
    if given_embedding is not None:
        print 'Loading given embedding...',
        load_embedding(params, given_embedding)
        print 'Done'

    print_params(params)

    model.init_tparams(params)

    # Build model, stochastic_mode = 0(soft), 1(stochastic), 2(hard)
    trng, use_noise, stochastic_mode, hyper_param,\
        x, x_mask, y, y_mask, \
        opt_ret, \
        cost, test_cost, x_emb, stochastic_updates,_ = model.build_model()
    inps = [x, x_mask, y, y_mask]

    all_stochastic_updates = OrderedDictUpdates()
    for item1 in stochastic_updates:
        for item2 in item1:
            all_stochastic_updates.update(item2)

    print 'Building sampler'
    f_init, f_next = model.build_sampler(trng=trng, use_noise=use_noise, batch_mode=True, stochastic_mode=stochastic_mode, hyper_param=hyper_param)
    stochastic_mode.set_value(1)

    # before any regularizer
    print 'Building f_log_probs...',
    f_log_probs = theano.function(inps, cost, profile=profile, updates=all_stochastic_updates)
    print 'Done'
    sys.stdout.flush()
    test_cost = test_cost.mean() #FIXME: do not regularize test_cost here

    cost = cost.mean()

    cost = l2_regularization(cost, model.P, decay_c)

    cost = regularize_alpha_weights(cost, alpha_c, model_options, x_mask, y_mask, opt_ret)

    print 'Building f_cost...',
    f_cost = theano.function(inps, test_cost, profile=profile, updates=all_stochastic_updates)
    print 'Done'

    if plot_graph is not None:
        print 'Plotting post-compile graph...',
        theano.printing.pydotprint(
            f_cost,
            outfile='pictures/post_compile_{}'.format(plot_graph),
            var_with_name_simple=True,
        )
        print 'Done'

    print 'Computing gradient...',
    grads = tensor.grad(cost, wrt=itemlist(model.P))

    clip_shared = theano.shared(np.array(clip_c, dtype=fX), name='clip_shared')

    if dist_type != 'mpi_reduce': #build grads clip into computational graph
        grads, g2 = clip_grad_remove_nan(grads, clip_shared, model.P)
    else: #do the grads clip after gradients aggregation
        g2 = None

    # compile the optimizer, the actual computational graph is compiled here
    lr = tensor.scalar(name='lr')
    print 'Building optimizers...',

    given_imm_data = get_adadelta_imm_data(optimizer, given_imm, preload)

    if optimizer == 'adadelta':
        f_grad_shared, f_update, grads_shared, imm_shared = Optimizers[optimizer](
        lr, model.P, grads, inps, cost, g2=g2, given_imm_data=given_imm_data, alpha = ada_alpha, all_stochastic_updates=all_stochastic_updates)

    if optimizer == 'adam':
        f_grad_shared, f_update, grads_shared, imm_shared = Optimizers[optimizer](
            lr, model.P, grads, inps, cost, g2=g2, given_imm_data=given_imm_data,
            all_stochastic_updates=all_stochastic_updates)

    print 'Done'

    if dist_type == 'mpi_reduce':
        f_grads_clip = make_grads_clip_func(grads_shared = grads_shared, mt_tparams= model.P, clip_c_shared = clip_shared)

    print 'Optimization'
    log('Preparation Done\n@Current Time = {}'.format(time.time()))

    if dist_type == 'mv':
        mv.barrier()
    elif dist_type == 'mpi_reduce':
        #create receive buffers for mpi allreduce
        rec_grads = [np.zeros_like(p.get_value()) for p in model.P.itervalues()]

    estop = False
    history_errs = []
    best_bleu = -1.0
    best_valid_cost = 1e6
    best_p = None
    bad_counter = 0
    uidx = search_start_uidx(reload_, preload)

    epoch_n_batches = 0
    start_epoch = 0
    pass_batches = 0

    print 'worker', worker_id, 'uidx', uidx, 'l_rate', lrate, 'ada_alpha', ada_alpha, 'n_batches', epoch_n_batches, 'start_epoch', start_epoch, 'pass_batches', pass_batches

    start_uidx = uidx

    if dump_before_train:
        print 'Dumping before train...',
        saveto_uidx = '{}.iter{}.npz'.format(
            os.path.splitext(saveto)[0], uidx)
        np.savez(saveto_uidx, history_errs=history_errs,
                 uidx=uidx, **unzip(model.P))
        save_options(model_options, uidx, saveto)
        print 'Done'
        sys.stdout.flush()

    stochastic_mode.set_value(0)
    valid_cost = validation(valid_iterator, f_cost, use_noise)
    small_train_cost = validation(small_train_iterator, f_cost, use_noise)
    message('Soft Valid cost {:.5f} Small train cost {:.5f}'.format(valid_cost, small_train_cost))
    stochastic_mode.set_value(1)
    #new_bleu = translate_dev_get_bleu(model, f_init, f_next, trng, use_noise, 5, 1.0)
    #best_bleu = new_bleu
    #message('BLEU = {:.2f} at uidx {}'.format(new_bleu, uidx))
    sys.stdout.flush()
    
    commu_time_sum = 0.0
    cp_time_sum =0.0
    reduce_time_sum = 0.0

    start_time = time.time()
    finetune_cnt = 0

    for eidx in xrange(start_epoch, max_epochs):
        if shuffle_data:
            text_iterator = load_shuffle_text_iterator(
                eidx, worker_id, text_iterator_list,
                datasets, vocab_filenames, batch_size, maxlen, n_words_src, n_words
            )
        n_samples = 0
        if dist_type == 'mpi_reduce':
            mpi_communicator.Barrier()

        for i, (x, y) in enumerate(text_iterator):
            if eidx == start_epoch and i < pass_batches: #ignore the first several batches when reload
                continue
            n_samples += len(x)
            uidx += 1

            use_noise.set_value(1.)

            x, x_mask, y, y_mask = prepare_data(x, y, maxlen=maxlen)

            if x is None:
                print 'Minibatch with zero sample under length ', maxlen
                uidx -= 1
                continue

            effective_uidx = uidx - start_uidx
            ud_start = time.time()

            # compute cost, grads
            if dist_type != 'mpi_reduce':
                cost, g2_value = f_grad_shared(x, x_mask, y, y_mask)
            else:
                cost = f_grad_shared(x, x_mask, y, y_mask)

            if dist_type == 'mpi_reduce':
                reduce_start = time.time()
                commu_time = 0
                gpucpu_cp_time = 0
                if not nccl:
                    commu_time, gpucpu_cp_time = all_reduce_params(grads_shared, rec_grads)
                else:
                    commu_time, gpucpu_cp_time = all_reduce_params_nccl(nccl_comm, grads_shared)
                reduce_time = time.time() - reduce_start
                commu_time_sum += commu_time
                reduce_time_sum += reduce_time
                cp_time_sum += gpucpu_cp_time

                g2_value = f_grads_clip()
                print '@Worker = {}, Reduce time = {:.5f}, Commu time = {:.5f}, Copy time = {:.5f}'.format(worker_id, reduce_time, commu_time, gpucpu_cp_time)

            curr_lr = lrate if not dist_type or dist_recover_lr_iter < effective_uidx else lrate * 0.05 + effective_uidx * lrate / dist_recover_lr_iter * 0.95
            if curr_lr < lrate:
                print 'Curr lr {:.3f}'.format(curr_lr)

            # do the update on parameters
            f_update(curr_lr)

            ud = time.time() - ud_start

            if np.isnan(g2_value) or np.isinf(g2_value):
                message('gradient NaN detected')
                sys.stdout.flush()
                
            if np.isnan(cost) or np.isinf(cost):
                message('cost NaN detected')
                model.save_model(saveto, history_errs, uidx)
                save_minibatch(x, y, saveto, uidx, vocab_filenames)
                sys.stdout.flush()

                return 1., 1., 1.

            # discount learning rate
            # FIXME: Do NOT enable this and fine-tune at the same time
            if lr_discount_freq > 0 and np.mod(effective_uidx, lr_discount_freq) == 0:
                lrate *= 0.5
                message('Discount learning rate to {} at iteration {}'.format(lrate, uidx))

            # sync batch
            if dist_type == 'mv' and np.mod(uidx, dispFreq) == 0:
                comm_start = time.time()
                model.sync_tparams()
                message('@Comm time = {:.5f}'.format(time.time() - comm_start))

            # verbose
            if np.mod(effective_uidx, dispFreq) == 0:
                message('Worker {} Epoch {} Update {} Cost {:.5f} G2 {:.5f} UD {:.5f} Time {:.5f} s'.format(
                    worker_id, eidx, uidx, float(cost), float(g2_value), ud, time.time() - start_time,
                ))
                sys.stdout.flush()

            if np.mod(effective_uidx, saveFreq) == 0 and worker_id == 0:
                # save with uidx
                if not overwrite:
                    print 'Saving the model at iteration {}...'.format(uidx),
                    model.save_model(saveto, history_errs, uidx)
                    print 'Done'
                    sys.stdout.flush()

                # save immediate data in adadelta
                saveto_imm_path = '{}_latest.npz'.format(os.path.splitext(saveto)[0])
                dump_adadelta_imm_data(optimizer, imm_shared, dump_imm, saveto_imm_path)

            if np.mod(effective_uidx, validFreq) == 0:
                stochastic_mode.set_value(0)
                valid_cost = validation(valid_iterator, f_cost, use_noise)
                small_train_cost = validation(small_train_iterator, f_cost, use_noise)
                message('Soft Valid cost {:.5f} Small train cost {:.5f}'.format(valid_cost, small_train_cost))
                #new_bleu = translate_dev_get_bleu(model, f_init, f_next, trng, use_noise, 5, 1.0)
                #message('BLEU = {:.2f} at uidx {}'.format(new_bleu, uidx))
                sys.stdout.flush()

                #if new_bleu > best_bleu:
                #    print 'Saving the model at iteration {}...'.format(uidx),
                #    model.save_model(saveto, history_errs, uidx)
                #    print 'Done'
                #    best_bleu = new_bleu
                #    sys.stdout.flush()
                    
                stochastic_mode.set_value(1)


                # Fine-tune based on dev cost
                if fine_tune_patience > 0:
                    if valid_cost < best_valid_cost:
                        bad_counter = 0
                        best_valid_cost = valid_cost
                        #dump the best model so far, including the immediate file
                        if worker_id == 0:
                            message('Dump the the best model so far at uidx {}'.format(uidx))
                            model.save_model(saveto, history_errs)
                            #dump_adadelta_imm_data(optimizer, imm_shared, dump_imm, saveto)
                    else:
                        bad_counter += 1
                        if bad_counter >= fine_tune_patience:
                            print 'Fine tune:',
                            if finetune_cnt % 2 == 0:
                                lrate = np.float32(lrate * 0.5)
                                message('Discount learning rate to {} at iteration {}'.format(lrate, uidx))
                                if lrate <= 0.025:
                                    message('Learning rate decayed to {:.5f}, task completed'.format(lrate))
                                    return 1., 1., 1.
                            else:
                                clip_shared.set_value(np.float32(clip_shared.get_value() * 0.25))
                                message('Discount clip value to {} at iteration {}'.format(clip_shared.get_value(), uidx))
                            finetune_cnt += 1
                            bad_counter = 0


            # finish after this many updates
            if uidx >= finish_after:
                print 'Finishing after {} iterations!'.format(uidx)
                estop = True
                break

        print 'Seen {} samples'.format(n_samples)

        if estop:
            break

    if best_p is not None:
        zipp(best_p, model.P)

    use_noise.set_value(0.)

    return 0.
Пример #15
0
    def test_updates_init(self):
        with pytest.raises(TypeError):
            OrderedUpdates(dict(d=3))

        sv = theano.shared("asdf")
        OrderedUpdates({sv: 3})
Пример #16
0
    def get_output_for(self, inputs, accumulate_updates="warn",recurrence_flags={}, **kwargs):
        """
        returns history of agent interaction with environment for given number of turns.

        parameters:
            inputs - [state init]  + [input_nonsequences] + [input_sequences]
                Each part is a list of theano expressions for layers in the order they were
                provided when creating this layer.
            recurrence_flags - a set of flags to be passed to the one step agent (anything that lasagne supports)
                e.g. {deterministic=True}
        returns:
            [state_sequences] + [output sequences] - a list of all states and all outputs sequences
            Shape of each such sequence is [batch, tick, shape_of_one_state_or_output...]
        """
        #aliases
        n_states = len(self.state_variables)
        n_state_inits = len(self.state_init)
        n_input_nonseq = len(self.input_nonsequences)
        n_input_seq = len(self.input_sequences)
        n_outputs = len(self.tracked_outputs)

        #slice inputs

        if self.mask_input is not None:
            mask,inputs = inputs[0],inputs[1:]

        initial_states_provided, nonsequences, sequences = unpack_list(inputs, [n_state_inits, n_input_nonseq, n_input_seq])

        # infer batch size
        if self.batch_size is not None:
            batch_size = self.batch_size
        elif len(inputs) != 0:
            batch_size = inputs[0].shape[0]
        else:
            raise ValueError("Need to set batch_size explicitly for recurrence")


        #here we create outputs_info for scan, basically initial values for states and outputs
        ## initial states that are given as input
        initial_states_provided = OrderedDict(list(zip(self.state_init, initial_states_provided)))

        def get_initial_state(layer, batch_size=batch_size):
            """Pick dedicated initial state or create zeros of appropriate shape and dtype
            :param layer: layer for new hidden state (key of self.state_variables)
            :param batch_size: symbolic batch_size
            """
            # if we have a dedicated init, use it
            if layer in initial_states_provided:
                initial_state = initial_states_provided[layer]
            # otherwise initialize with zeros
            else:
                assert None not in layer.output_shape[1:],\
                    "Some of your state layers ({}) has undefined shape along non-batch dimension. (shape: {}) " \
                    "Therefore, it's initial value can't be inferred. Please set explicit initial value via state_init" \
                    "".format(layer.name or layer, layer.output_shape)

                dtype = get_layer_dtype(layer)
                initial_state = T.zeros((batch_size,) + tuple(layer.output_shape[1:]), dtype=dtype)
                #disable broadcasting along all axes (lasagne outputs are non-broadcastable)
                initial_state = T.unbroadcast(initial_state, *range(initial_state.ndim))

            return initial_state

        initial_states = list(map(get_initial_state, self.state_variables))

        # dummy initial values for tracked_outputs.
        # We need to provide them for step_masked to be able to backtrack to them. Also unroll scan requires them.
        # Initial shapes for outputs are inferred by calling get_one_step and taking shapes from it.
        # Theano optimizes shape computation without computing get_out_step outputs themselves
        # the resulting graph would be like (var1.shape[0],var1.shape[2]*3,10) so this operation is zero-cost.
        state_feed_dict = dict(zip(self.state_variables.keys(),initial_states))
        input_feed_dict = dict(zip(list(chain(self.input_nonsequences.keys(), self.input_sequences.keys())),
                                   list(chain(nonsequences,[seq[:,0] for seq in sequences]))))
        initial_output_fillers = self.get_one_step(state_feed_dict,input_feed_dict,**recurrence_flags)[1]
        # disable broadcasting of zeros_like(v) along all axes (since lasagne outputs are non-broadcastable)
        initial_output_fillers = [T.unbroadcast(T.zeros_like(v),*range(v.ndim))
                                  for v in initial_output_fillers]
        #/end of that nonsense

        #stack all initializers together
        outputs_info = initial_states + initial_output_fillers

        # reshape sequences from [batch, time, ...] to [time,batch,...] to fit scan
        sequences = [seq.swapaxes(1, 0) for seq in sequences]


        # recurrent step function
        def step(*args):

            sequence_slices, prev_states, prev_outputs, nonsequences = \
                unpack_list(args, [n_input_seq, n_states, n_outputs, n_input_nonseq])
            # make dicts of prev_states and inputs
            prev_states_dict = OrderedDict(zip(list(self.state_variables.keys()), prev_states))

            input_layers = list(chain(self.input_nonsequences.keys(), self.input_sequences.keys()))
            assert len(input_layers) == len(nonsequences + sequence_slices)

            inputs_dict = OrderedDict(zip(input_layers, nonsequences + sequence_slices))

            # call one step recurrence
            new_states, new_outputs = self.get_one_step(prev_states_dict, inputs_dict, **recurrence_flags)


            #make sure new state variables are of exactly the same type as their initial value
            state_names = [layer.name or str(layer) for layer in list(self.state_variables.keys())]
            for i in range(len(state_names)):
                try:
                    if self.force_cast_types:
                        new_states[i] = new_states[i].astype(prev_states[i].dtype)
                    new_states[i] = cast_to_type(new_states[i],get_type(prev_states[i]))
                except:
                    raise ValueError("Could not convert new state {}, of type {}, to it's previous/initial state type "
                                     "{}. Cast type manually or set force_cast_types=True on creation."
                                     "".format(state_names[i],get_type(new_states[i]),get_type(prev_states[i])))

            #make sure output variables are of exactly the same type as their initial value
            output_names = [layer.name or str(layer) for layer in self.tracked_outputs]
            for i in range(len(output_names)):
                try:
                    if self.force_cast_types:
                        new_outputs[i] = new_outputs[i].astype(prev_outputs[i].dtype)
                    new_outputs[i] = cast_to_type(new_outputs[i],get_type(prev_outputs[i]))
                except:
                    raise ValueError("Could not convert output of {}, of type {}, to it's previous/initial state type "
                                     "{}. Cast type manually or set force_cast_types=True on creation."
                                     "".format(output_names[i],get_type(new_outputs[i]),get_type(prev_outputs[i])))

            return new_states + new_outputs

        ###handling mask_input###

        #a step function that utilizes a mask
        def step_masked(mask_t,*args):
            #unpack arrays
            sequence_slices, prev_states, prev_outputs, nonsequences = \
                unpack_list(args, [n_input_seq, n_states, n_outputs, n_input_nonseq])

            #get regular step
            new_states_and_outputs = step(*args)
            old_states_and_outputs = prev_states+prev_outputs

            #if mask_t, return new ones, else return old ones
            def apply_mask(mask_t,new_state,old_state):
                assert new_state.ndim == old_state.ndim
                ndim = new_state.ndim
                #append dims to mask
                pattern = list(range(mask_t.ndim)) + ['x'] * (ndim - mask_t.ndim)

                return T.switch(mask_t.dimshuffle(pattern),
                                new_state, old_state)


            next_states_and_outputs = [apply_mask(mask_t,new_state,old_state)
                                       for new_state,old_state in zip(new_states_and_outputs,
                                                                      old_states_and_outputs)]

            return next_states_and_outputs


        if self.mask_input is not None:
            sequences = [mask.swapaxes(1, 0)]+sequences
            step_function = step_masked
        else:
            step_function = step


        #scan itself
        if self.unroll_scan:
            # call scan itself
            history = unroll_scan(step_function,
                                  sequences=sequences,
                                  outputs_info=outputs_info,
                                  non_sequences=nonsequences,
                                  n_steps=self.n_steps
                                  )
            #if explicitly asked to reset updates, do so
            if accumulate_updates == False:
                self.updates=OrderedUpdates()

        else:
            history,updates = theano.scan(step_function,
                                  sequences=sequences,
                                  outputs_info=outputs_info,
                                  non_sequences=nonsequences,
                                  n_steps=self.n_steps
                                  )

            if accumulate_updates in (True,'warn'):
                self.updates += updates
            else:#replace updates
                self.updates = updates

            #check if user received last updates
            if not self._updates_received and accumulate_updates=='warn':
                warn("You called get_output from recurrence several times without gathering the updates.\n"
                     "(A) If you wanted to get two outputs from recurrence, use NOT\n"
                     ">>>out1 = get_output(rec[layer1])\n"
                     ">>>out2 = get_output(rec[layer2])\n"
                     "but instead:\n"
                     ">>>out1,out2 = get_output((rec[layer1],rec[layer2])) #or rec[layer1,layer2].\n"
                     "(B) If you want to run recurrence several times and accumulate updates from all runs,"
                     "use get_output(...,accumulate_updates=True) to silence the warning.\n"
                     "(C) If you want to get rid of old updates, use get_output(...,accumulate_updates=False)\n"
                     )

            if len(self.updates) !=0:
                self._updates_received=False
                warn("Recurrent loop without unroll_scan got nonempty random state updates list. That happened"
                     " because there is some source of randomness (e.g. dropout) inside recurrent step graph."
                     " To compile such graph, one must either call .get_automatic_updates() right after .get_output"
                     " and pass these updates to a function when compiling theano.function.",verbosity_level=2)




        # reordering from [time,batch,...] to [batch,time,...]
        history = [(var.swapaxes(1, 0) if var.ndim > 1 else var) for var in check_list(history)]

        assert len(history) == n_states+n_outputs

        state_seqs, output_seqs = unpack_list(history, [n_states, n_outputs])

        # handle delayed_states
        # selectively shift state sequences by 1 tick into the past, padding with their initialisations
        for i in range(len(state_seqs)):
            if list(self.state_variables.keys())[i] in self.delayed_states:
                state_seq = state_seqs[i]
                state_init = initial_states[i]
                state_seq = T.concatenate([insert_dim(state_init, 1), state_seq[:, :-1]], axis=1)
                state_seqs[i] = state_seq


        #keys corresponding to output sequences. Note that we do not use self.keys() to correctly
        # handle cases where some variable is present in both state_variables and tracked_outputs
        output_keys = list(self.state_variables.keys()) + list(self.tracked_outputs)
        output_values = state_seqs + output_seqs
        assert len(output_keys) == len(output_values)
        return OrderedDict(zip(output_keys,output_values))