Exemple #1
0
 def __init__(self):
     super(ADMMSolver, self).__init__(self.register_options())
     self.set_options_to_default()
     # The following attributes will be modified by the
     # solve() method. For users that are scripting, these
     # can be accessed after the solve() method returns.
     # They will be reset each time solve() is called.
     ############################################
     self.objective_history = OrderedDict()
     self.primal_residual_history = OrderedDict()
     self.dual_residual_history = OrderedDict()
     self.iterations = None
Exemple #2
0
 def close(self):
     """Close the manager."""
     if len(self.results):
         print("WARNING: %s is closing with %s local "
               "results waiting to be processed." %
               (type(self).__name__, len(self.results)))
     if len(self._paused_task_dict):
         print("WARNING: %s is closing with %s paused "
               "tasks waiting to be queued." %
               (type(self).__name__, len(self._paused_task_dict)))
     self.results = OrderedDict()
     self._paused = False
     self._paused_task_dict = {}
     for client in self._dispatcher_name_to_client.values():
         # the client will release the dispatcher proxy
         client.close()
     self._dispatcher_name_to_client = {}
     self._dispatcher_proxies = {}
Exemple #3
0
    def __init__(self, host=None, port=None, verbose=0):

        self.host = host
        self.port = port
        self._verbose = verbose
        self._paused = False
        self._paused_task_dict = {}
        self._dispatcher_name_to_client = {}
        self._dispatcher_proxies = {}
        # map from task id to the corresponding action handle.
        # we only retain entries for tasks for which we expect
        # a result/response.
        self._last_extracted_ah_id = None
        super(PyroAsynchronousActionManager, self).__init__()
        # the list of cached results obtained from the dispatch server.
        # to avoid communication overhead, grab any/all results available,
        # and then cache them here - but return one-at-a-time via
        # the standard _perform_wait_any interface. the elements in this
        # list are simply tasks - at this point, we don't care about the
        # queue name associated with the task.
        self.results = OrderedDict()
Exemple #4
0
class SolverManager_Serial(AsynchronousSolverManager):

    def clear(self):
        """
        Clear manager state
        """
        super(SolverManager_Serial, self).clear()
        self.results = OrderedDict()

    def _perform_queue(self, ah, *args, **kwds):
        """
        Perform the queue operation.  This method returns the ActionHandle,
        and the ActionHandle status indicates whether the queue was successful.
        """

        opt = kwds.pop('solver', kwds.pop('opt', None))
        if opt is None:
            raise ActionManagerError(
                "No solver passed to %s, use keyword option 'solver'"
                % (type(self).__name__) )

        time_start = time.time()
        if isinstance(opt, string_types):
            with pyomo.opt.SolverFactory(opt) as _opt:
                results = _opt.solve(*args, **kwds)
        else:
            results = opt.solve(*args, **kwds)
        results.pyomo_solve_time = time.time()-time_start

        self.results[ah.id] = results
        ah.status = ActionStatus.done
        self.event_handle[ah.id].update(ah)

        return ah

    def _perform_wait_any(self):
        """
        Perform the wait_any operation.  This method returns an
        ActionHandle with the results of waiting.  If None is returned
        then the ActionManager assumes that it can call this method again.
        Note that an ActionHandle can be returned with a dummy value,
        to indicate an error.
        """
        if len(self.results) > 0:
            ah_id, result = self.results.popitem(last=False)
            self.results[ah_id] = result
            return self.event_handle[ah_id]
        return ActionHandle(error=True,
                            explanation=("No queued evaluations available in "
                                         "the 'serial' solver manager, which "
                                         "executes solvers synchronously"))
Exemple #5
0
 def _repn_(self, option):
     if not option.schema and not self._active and not self._required:
         return ignore
     if option.schema and len(self) == 0:
         self.add()
         self.add()
     if option.num_solutions is None:
         num = len(self)
     else:
         num = min(option.num_solutions, len(self))
     i=0
     tmp = []
     for item in self._list:
         tmp.append( item._repn_(option) )
         i=i+1
         if i == num:
             break
     return [OrderedDict([('number of solutions',len(self)), ('number of solutions displayed',num)])]+ tmp
Exemple #6
0
def _process_load(cmd, _model, _data, _default, options=None):
    #print("LOAD %s" % cmd)
    from pyomo.core import Set

    _cmd_len = len(cmd)
    _options = {}
    _options['filename'] = cmd[1]
    i = 2
    while cmd[i] != ':':
        _options[cmd[i]] = cmd[i + 2]
        i += 3
    i += 1
    _Index = (None, [])
    if type(cmd[i]) is tuple:
        _Index = (None, cmd[i])
        i += 1
    elif i + 1 < _cmd_len and cmd[i + 1] == '=':
        _Index = (cmd[i], cmd[i + 2])
        i += 3
    _smap = OrderedDict()
    while i < _cmd_len:
        if i + 2 < _cmd_len and cmd[i + 1] == '=':
            _smap[cmd[i + 2]] = cmd[i]
            i += 3
        else:
            _smap[cmd[i]] = cmd[i]
            i += 1

    if len(cmd) < 2:
        raise IOError("The 'load' command must specify a filename")

    options = Options(**_options)
    for key in options:
        if not key in [
                'range', 'filename', 'format', 'using', 'driver', 'query',
                'table', 'user', 'password', 'database'
        ]:
            raise ValueError("Unknown load option '%s'" % key)

    global Filename
    Filename = options.filename

    global Lineno
    Lineno = 0
    #
    # TODO: process mapping info
    #
    if options.using is None:
        tmp = options.filename.split(".")[-1]
        data = DataManagerFactory(tmp)
        if (data is None) or \
           isinstance(data, UnknownDataManager):
            raise ApplicationError("Data manager '%s' is not available." % tmp)
    else:
        try:
            data = DataManagerFactory(options.using)
        except:
            data = None
        if (data is None) or \
           isinstance(data, UnknownDataManager):
            raise ApplicationError("Data manager '%s' is not available." %
                                   options.using)
    set_name = None
    #
    # Create symbol map
    #
    symb_map = _smap
    if len(symb_map) == 0:
        raise IOError(
            "Must specify at least one set or parameter name that will be loaded"
        )
    #
    # Process index data
    #
    _index = None
    index_name = _Index[0]
    _select = None
    #
    # Set the 'set name' based on the format
    #
    _set = None
    if options.format == 'set' or options.format == 'set_array':
        if len(_smap) != 1:
            raise IOError(
                "A single set name must be specified when using format '%s'" %
                options.format)
        set_name = list(_smap.keys())[0]
        _set = set_name
    #
    # Set the 'param name' based on the format
    #
    _param = None
    if options.format == 'transposed_array' or options.format == 'array' or options.format == 'param':
        if len(_smap) != 1:
            raise IOError(
                "A single parameter name must be specified when using format '%s'"
                % options.format)
    if options.format in ('transposed_array', 'array', 'param', None):
        if _Index[0] is None:
            _index = None
        else:
            _index = _Index[0]
        _param = []
        _select = list(_Index[1])
        for key in _smap:
            _param.append(_smap[key])
            _select.append(key)
    if options.format in ('transposed_array', 'array'):
        _select = None

    #print "YYY", _param, options
    if not _param is None and len(
            _param) == 1 and not _model is None and isinstance(
                getattr(_model, _param[0]), Set):
        _select = None
        _set = _param[0]
        _param = None
        _index = None

    #print "SELECT", _param, _select
    #
    data.initialize(model=options.model,
                    filename=options.filename,
                    index=_index,
                    index_name=index_name,
                    param_name=symb_map,
                    set=_set,
                    param=_param,
                    format=options.format,
                    range=options.range,
                    query=options.query,
                    using=options.using,
                    table=options.table,
                    select=_select,
                    user=options.user,
                    password=options.password,
                    database=options.database)
    #
    data.open()
    try:
        data.read()
    except Exception:
        data.close()
        raise
    data.close()
    data.process(_model, _data, _default)
Exemple #7
0
def _process_table(cmd, _model, _data, _default, options=None):
    #print("TABLE %s" % cmd)
    #
    _options = {}
    _set = OrderedDict()
    _param = OrderedDict()
    _labels = []

    _cmd = cmd[1]
    _cmd_len = len(_cmd)
    name = None
    i = 0
    while i < _cmd_len:
        try:
            #print("CMD i=%s cmd=%s" % (i, _cmd[i:]))
            #
            # This should not be error prone, so we treat errors
            # with a general exception
            #

            #
            # Processing labels
            #
            if _cmd[i] == ':':
                i += 1
                while i < _cmd_len:
                    _labels.append(_cmd[i])
                    i += 1
                continue
            #
            # Processing options
            #
            name = _cmd[i]
            if i + 1 == _cmd_len:
                _param[name] = []
                _labels = ['Z']
                i += 1
                continue
            if _cmd[i + 1] == '=':
                if type(_cmd[i + 2]) is list:
                    _set[name] = _cmd[i + 2]
                else:
                    _options[name] = _cmd[i + 2]
                i += 3
                continue
            # This should be a parameter declaration
            if not type(_cmd[i + 1]) is tuple:
                raise IOError
            if i + 2 < _cmd_len and _cmd[i + 2] == '=':
                _param[name] = (_cmd[i + 1], _cmd[i + 3][0])
                i += 4
            else:
                _param[name] = _cmd[i + 1]
                i += 2
        except:
            raise IOError("Error parsing table options: %s" % name)

    #print("_options %s" % _options)
    #print("_set %s" % _set)
    #print("_param %s" % _param)
    #print("_labels %s" % _labels)


#
    options = Options(**_options)
    for key in options:
        if not key in ['columns']:
            raise ValueError("Unknown table option '%s'" % key)
    #
    ncolumns = options.columns
    if ncolumns is None:
        ncolumns = len(_labels)
        if ncolumns == 0:
            if not (len(_set) == 1 and len(_set[_set.keys()[0]]) == 0):
                raise IOError(
                    "Must specify either the 'columns' option or column headers"
                )
            else:
                ncolumns = 1
    else:
        ncolumns = int(ncolumns)
    #
    data = cmd[2]
    Ldata = len(cmd[2])
    #
    cmap = {}
    if len(_labels) == 0:
        for i in range(ncolumns):
            cmap[i + 1] = i
        for label in _param:
            ndx = cmap[_param[label][1]]
            if ndx < 0 or ndx >= ncolumns:
                raise IOError("Bad column value %s for data %s" %
                              (str(ndx), label))
            cmap[label] = ndx
            _param[label] = _param[label][0]
    else:
        i = 0
        for label in _labels:
            cmap[label] = i
            i += 1
    #print("CMAP %s" % cmap)
    #
    #print("_param %s" % _param)
    #print("_set %s" % _set)
    for sname in _set:
        # Creating set sname
        cols = _set[sname]
        tmp = []
        for col in cols:
            if not col in cmap:
                raise IOError(
                    "Unexpected table column '%s' for index set '%s'" %
                    (col, sname))
            tmp.append(cmap[col])
        if not sname in cmap:
            cmap[sname] = tmp
        cols = list(flatten_tuple(tmp))
        #
        _cmd = ['set', sname, ':=']
        i = 0
        while i < Ldata:
            row = []
            #print("COLS %s  NCOLS %d" % (cols, ncolumns))
            for col in cols:
                #print("Y %s %s" % (i, col))
                row.append(data[i + col])
            if len(row) > 1:
                _cmd.append(tuple(row))
            else:
                _cmd.append(row[0])
            i += ncolumns
        #print("_data %s" % _data)
        _process_set(_cmd, _model, _data)
    #
    #print("CMAP %s" % cmap)
    _i = 0
    if ncolumns == 0:
        raise IOError
    for vname in _param:
        _i += 1
        # create value vname
        cols = _param[vname]
        tmp = []
        for col in cols:
            #print("COL %s" % col)
            if not col in cmap:
                raise IOError(
                    "Unexpected table column '%s' for table value '%s'" %
                    (col, vname))
            tmp.append(cmap[col])
        #print("X %s %s" % (len(cols), tmp))
        cols = list(flatten_tuple(tmp))
        #print("X %s" % len(cols))
        #print("VNAME %s %s" % (vname, cmap[vname]))
        if vname in cmap:
            cols.append(cmap[vname])
        else:
            cols.append(ncolumns - 1 - (len(_param) - _i))
        #print("X %s" % len(cols))
        #
        _cmd = ['param', vname, ':=']
        i = 0
        while i < Ldata:
            #print("HERE %s %s %s" % (i, cols, ncolumns))
            for col in cols:
                _cmd.append(data[i + col])
            i += ncolumns
        #print("HERE %s" % _cmd)
        #print("_data %s" % _data)
        _process_param(_cmd, _model, _data, None, ncolumns=len(cols))
Exemple #8
0
 def clear(self):
     """
     Clear manager state
     """
     super(SolverManager_Serial, self).clear()
     self.results = OrderedDict()
Exemple #9
0
def Reference(reference, ctype=_NotSpecified):
    """Creates a component that references other components

    ``Reference`` generates a *reference component*; that is, an indexed
    component that does not contain data, but instead references data
    stored in other components as defined by a component slice.  The
    ctype parameter sets the :py:meth:`Component.type` of the resulting
    indexed component.  If the ctype parameter is not set and all data
    identified by the slice (at construction time) share a common
    :py:meth:`Component.type`, then that type is assumed.  If either the
    ctype parameter is ``None`` or the data has more than one ctype, the
    resulting indexed component will have a ctype of
    :py:class:`IndexedComponent`.

    If the indices associated with wildcards in the component slice all
    refer to the same :py:class:`Set` objects for all data identifed by
    the slice, then the resulting indexed component will be indexed by
    the product of those sets.  However, if all data do not share common
    set objects, or only a subset of indices in a multidimentional set
    appear as wildcards, then the resulting indexed component will be
    indexed by a :py:class:`SetOf` containing a
    :py:class:`_ReferenceSet` for the slice.

    Parameters
    ----------
    reference : :py:class:`IndexedComponent_slice`
        component slice that defines the data to include in the
        Reference component

    ctype : :py:class:`type` [optional]
        the type used to create the resulting indexed component.  If not
        specified, the data's ctype will be used (if all data share a
        common ctype).  If multiple data ctypes are found or type is
        ``None``, then :py:class:`IndexedComponent` will be used.

    Examples
    --------

    .. doctest::

        >>> from pyomo.environ import *
        >>> m = ConcreteModel()
        >>> @m.Block([1,2],[3,4])
        ... def b(b,i,j):
        ...     b.x = Var(bounds=(i,j))
        ...
        >>> m.r1 = Reference(m.b[:,:].x)
        >>> m.r1.pprint()
        r1 : Size=4, Index=r1_index, ReferenceTo=b[:, :].x
            Key    : Lower : Value : Upper : Fixed : Stale : Domain
            (1, 3) :     1 :  None :     3 : False :  True :  Reals
            (1, 4) :     1 :  None :     4 : False :  True :  Reals
            (2, 3) :     2 :  None :     3 : False :  True :  Reals
            (2, 4) :     2 :  None :     4 : False :  True :  Reals

    Reference components may also refer to subsets of the original data:

    .. doctest::

        >>> m.r2 = Reference(m.b[:,3].x)
        >>> m.r2.pprint()
        r2 : Size=2, Index=b_index_0, ReferenceTo=b[:, 3].x
            Key : Lower : Value : Upper : Fixed : Stale : Domain
              1 :     1 :  None :     3 : False :  True :  Reals
              2 :     2 :  None :     3 : False :  True :  Reals

    Reference components may have wildcards at multiple levels of the
    model hierarchy:

    .. doctest::

        >>> m = ConcreteModel()
        >>> @m.Block([1,2])
        ... def b(b,i):
        ...     b.x = Var([3,4], bounds=(i,None))
        ...
        >>> m.r3 = Reference(m.b[:].x[:])
        >>> m.r3.pprint()
        r3 : Size=4, Index=r3_index, ReferenceTo=b[:].x[:]
            Key    : Lower : Value : Upper : Fixed : Stale : Domain
            (1, 3) :     1 :  None :  None : False :  True :  Reals
            (1, 4) :     1 :  None :  None : False :  True :  Reals
            (2, 3) :     2 :  None :  None : False :  True :  Reals
            (2, 4) :     2 :  None :  None : False :  True :  Reals

    The resulting reference component may be used just like any other
    component.  Changes to the stored data will be reflected in the
    original objects:

    .. doctest::

        >>> m.r3[1,4] = 10
        >>> m.b[1].x.pprint()
        x : Size=2, Index=b[1].x_index
            Key : Lower : Value : Upper : Fixed : Stale : Domain
              3 :     1 :  None :  None : False :  True :  Reals
              4 :     1 :    10 :  None : False : False :  Reals

    """
    referent = reference
    if isinstance(reference, IndexedComponent_slice):
        _data = _ReferenceDict(reference)
        _iter = iter(reference)
        slice_idx = []
        index = None
    elif isinstance(reference, Component):
        reference = reference[...]
        _data = _ReferenceDict(reference)
        _iter = iter(reference)
        slice_idx = []
        index = None
    elif isinstance(reference, ComponentData):
        # Create a dummy IndexedComponent container with a "normal"
        # Scalar interface.  This relies on the assumption that the
        # Component uses a standard storage model.
        _idx = next(iter(UnindexedComponent_set))
        _parent = reference.parent_component()
        comp = _parent.__class__(SetOf(UnindexedComponent_set))
        comp.construct()
        comp._data[_idx] = reference
        #
        # HACK: Set the _parent to match the ComponentData's container's
        # parent so that block.clone() infers the correct block scope
        # for this "hidden" component
        #
        # TODO: When Block supports proper "hidden" / "anonymous"
        # components, switch this HACK over to that API
        comp._parent = _parent._parent
        #
        reference = comp[...]
        _data = _ReferenceDict(reference)
        _iter = iter(reference)
        slice_idx = []
        index = None
    elif isinstance(reference, Mapping):
        _data = _ReferenceDict_mapping(dict(reference))
        _iter = _data.values()
        slice_idx = None
        index = SetOf(_data)
    elif isinstance(reference, Sequence):
        _data = _ReferenceDict_mapping(OrderedDict(enumerate(reference)))
        _iter = _data.values()
        slice_idx = None
        index = OrderedSetOf(_data)
    else:
        raise TypeError(
            "First argument to Reference constructors must be a "
            "component, component slice, Sequence, or Mapping (received %s)" %
            (type(reference).__name__, ))

    if ctype is _NotSpecified:
        ctypes = set()
    else:
        # If the caller specified a ctype, then we will prepopulate the
        # list to improve our chances of avoiding a scan of the entire
        # Reference (by simulating multiple ctypes having been found, we
        # can break out as soon as we know that there are not common
        # subsets).
        ctypes = set((1, 2))

    for obj in _iter:
        ctypes.add(obj.ctype)
        if not isinstance(obj, ComponentData):
            # This object is not a ComponentData (likely it is a pure
            # IndexedComponent container).  As the Reference will treat
            # it as if it *were* a ComponentData, we will skip ctype
            # identification and return a base IndexedComponent, thereby
            # preventing strange exceptions in the writers and with
            # things like pprint().  Of course, all of this logic is
            # skipped if the User knows better and forced a ctype on us.
            ctypes.add(0)
        # Note that we want to walk the entire slice, unless we can
        # prove that BOTH there aren't common indexing sets (i.e., index
        # is None) AND there is more than one ctype.
        if slice_idx is not None:
            # As long as we haven't ruled out the possibility of common
            # wildcard sets, then we will use _identify_wildcard_sets to
            # identify the wilcards for this obj and check compatibility
            # of the wildcards with any previously-identified wildcards.
            slice_idx = _identify_wildcard_sets(_iter._iter_stack, slice_idx)
        elif len(ctypes) > 1:
            break

    if index is None:
        if not slice_idx:
            index = SetOf(_ReferenceSet(reference))
        else:
            wildcards = sum((sorted(lvl.items())
                             for lvl in slice_idx if lvl is not None), [])
            # Wildcards is a list of (coordinate, set) tuples.  Coordinate
            # is that within the subsets list, and set is a wildcard set.
            index = wildcards[0][1]
            # index is the first wildcard set.
            if not isinstance(index, _SetDataBase):
                index = SetOf(index)
            for lvl, idx in wildcards[1:]:
                if not isinstance(idx, _SetDataBase):
                    idx = SetOf(idx)
                index = index * idx
            # index is now either a single Set, or a SetProduct of the
            # wildcard sets.
    if ctype is _NotSpecified:
        if len(ctypes) == 1:
            ctype = ctypes.pop()
        else:
            ctype = IndexedComponent
    elif ctype is None:
        ctype = IndexedComponent

    obj = ctype(index, ctype=ctype)
    obj._constructed = True
    obj._data = _data
    obj.referent = referent
    return obj
Exemple #10
0
 def clear(self):
     """
     Clear manager state
     """
     super(PyroAsynchronousActionManager, self).clear()
     self.results = OrderedDict()
Exemple #11
0
class PyroAsynchronousActionManager(AsynchronousActionManager):
    def __init__(self, host=None, port=None, verbose=0):

        self.host = host
        self.port = port
        self._verbose = verbose
        self._paused = False
        self._paused_task_dict = {}
        self._dispatcher_name_to_client = {}
        self._dispatcher_proxies = {}
        # map from task id to the corresponding action handle.
        # we only retain entries for tasks for which we expect
        # a result/response.
        self._last_extracted_ah_id = None
        super(PyroAsynchronousActionManager, self).__init__()
        # the list of cached results obtained from the dispatch server.
        # to avoid communication overhead, grab any/all results available,
        # and then cache them here - but return one-at-a-time via
        # the standard _perform_wait_any interface. the elements in this
        # list are simply tasks - at this point, we don't care about the
        # queue name associated with the task.
        self.results = OrderedDict()

    def clear(self):
        """
        Clear manager state
        """
        super(PyroAsynchronousActionManager, self).clear()
        self.results = OrderedDict()

    def close(self):
        """Close the manager."""
        if len(self.results):
            print("WARNING: %s is closing with %s local "
                  "results waiting to be processed." %
                  (type(self).__name__, len(self.results)))
        if len(self._paused_task_dict):
            print("WARNING: %s is closing with %s paused "
                  "tasks waiting to be queued." %
                  (type(self).__name__, len(self._paused_task_dict)))
        self.results = OrderedDict()
        self._paused = False
        self._paused_task_dict = {}
        for client in self._dispatcher_name_to_client.values():
            # the client will release the dispatcher proxy
            client.close()
        self._dispatcher_name_to_client = {}
        self._dispatcher_proxies = {}

    def pause(self):
        self._paused = True

    def unpause(self):
        self._paused = False
        if len(self._paused_task_dict):
            for dispatcher_name in self._paused_task_dict:
                client = self._dispatcher_name_to_client[dispatcher_name]
                client.add_tasks(self._paused_task_dict[dispatcher_name],
                                 verbose=self._verbose > 1)
        self._paused_task_dict = {}

    def get_results(self, ah):
        return self.results.pop(ah.id, None)

    def wait_all(self, *args):
        """
        Wait for all actions to complete.  The arguments to this method
        are expected to be ActionHandle objects or iterators that return
        ActionHandle objects.  If no arguments are provided, then this
        method will terminate after all queued actions are complete.
        """
        # Collect event handlers from the arguments
        ahs = self._flatten(*args)
        if len(ahs):
            while len(ahs) > 0:
                ahs.difference_update(
                    [ah for ah in ahs if ah.id in self.results])
                if len(ahs):
                    self._download_results()
        else:
            while self.queued_action_counter > 0:
                self._download_results()

    def wait_any(self, *args):
        # Collect event handlers from the arguments
        ahs = self._flatten(*args)
        if len(ahs):
            while (1):
                for ah in ahs:
                    if ah.id in self.results:
                        return ah
                self._download_results()
        else:
            while len(self.results) == 0:
                self._download_results()
            ah_id, result = self.results.popitem(last=False)
            if ah_id == self._last_extracted_ah_id:
                self._last_extracted_ah_id = ah_id
                self._download_results()
                self.results[ah_id] = result
                ah_id, result = self.results.popitem(last=False)
            self.results[ah_id] = result
            return self.event_handle[ah_id]

    def wait_for(self, ah):
        """
        Wait for the specified action to complete.
        """
        while (1):
            if ah.id in self.results:
                break
            else:
                self._download_results()
        return self.get_results(ah)

    def _create_client(self, dispatcher=None):
        if dispatcher is None:
            client = pyu_pyro.Client(host=self.host, port=self.port)
        else:
            client = pyu_pyro.Client(dispatcher=dispatcher)
        if client.URI in self._dispatcher_name_to_client:
            self._dispatcher_name_to_client[client.URI].close()
        self._dispatcher_name_to_client[client.URI] = client
        return client

    #
    # Perform the queue operation. This method returns the
    # ActionHandle, and the ActionHandle status indicates whether
    # the queue was successful.
    #
    def _perform_queue(self, ah, *args, **kwds):

        queue_name = kwds.pop('queue_name', None)
        generate_response = kwds.pop('generate_response', True)

        dispatcher_name = self._get_dispatcher_name(queue_name)
        task_data = self._get_task_data(ah, *args, **kwds)
        task = pyu_pyro.Task(data=task_data,
                             id=ah.id,
                             generateResponse=generate_response)

        if self._paused:
            if dispatcher_name not in self._paused_task_dict:
                self._paused_task_dict[dispatcher_name] = dict()
            if queue_name not in self._paused_task_dict[dispatcher_name]:
                self._paused_task_dict[dispatcher_name][queue_name] = []
            self._paused_task_dict[dispatcher_name][queue_name].append(task)
        else:
            client = self._dispatcher_name_to_client[dispatcher_name]
            client.add_task(task,
                            verbose=self._verbose > 1,
                            override_type=queue_name)

        # only populate the action_handle-to-task dictionary is a
        # response is expected.
        if not generate_response:
            ah.status = ActionStatus.done
            self.event_handle[ah.id].update(ah)
            self.queued_action_counter -= 1

        return ah

    #
    # Abstract Methods
    #

    def _get_dispatcher_name(self, queue_name):
        raise NotImplementedError(
            type(self).__name__ + ": This method is abstract")

    def _get_task_data(self, ah, **kwds):
        raise NotImplementedError(
            type(self).__name__ + ": This method is abstract")

    def _download_results(self):
        raise NotImplementedError(
            type(self).__name__ + ": This method is abstract")
Exemple #12
0
    def _solve_impl(self,
                    sp,
                    rho=1.0,
                    y_init=0.0,
                    z_init=0.0,
                    output_solver_log=False):

        if len(sp.scenario_tree.stages) > 2:
            raise ValueError("ADMM solver does not yet handle more "
                             "than 2 time-stages")

        start_time = time.time()

        scenario_tree = sp.scenario_tree
        num_scenarios = len(scenario_tree.scenarios)
        num_stages = len(scenario_tree.stages)
        num_na_nodes = 0
        num_na_variables = 0
        num_na_continuous_variables = 0
        num_na_binary_variables = 0
        num_na_integer_variables = 0
        for stage in sp.scenario_tree.stages[:-1]:
            for tree_node in stage.nodes:
                num_na_nodes += 1
                num_na_variables += len(tree_node._standard_variable_ids)
                for id_ in tree_node._standard_variable_ids:
                    if tree_node.is_variable_binary(id_):
                        num_na_binary_variables += 1
                    elif tree_node.is_variable_integer(id_):
                        num_na_integer_variables += 1
                    else:
                        num_na_continuous_variables += 1


#        print("-"*20)
#        print("Problem Statistics".center(20))
#        print("-"*20)
#        print("Total number of scenarios.................: %10s"
#              % (num_scenarios))
#        print("Total number of time stages...............: %10s"
#              % (num_stages))
#        print("Total number of non-anticipative nodes....: %10s"
#              % (num_na_nodes))
#        print("Total number of non-anticipative variables: %10s\n#"
#              "                                continuous: %10s\n#"
#              "                                    binary: %10s\n#"
#              "                                   integer: %10s"
#              % (num_na_variables,
#                 num_na_continuous_variables,
#                 num_na_binary_variables,
#                 num_na_integer_variables))

        rel_tol_primal = \
            self.get_option("primal_residual_relative_tolerance")
        rel_tol_dual = \
            self.get_option("dual_residual_relative_tolerance")
        max_iterations = \
            self.get_option("max_iterations")

        self.objective_history = OrderedDict()
        self.primal_residual_history = OrderedDict()
        self.dual_residual_history = OrderedDict()
        self.iterations = 0
        if output_solver_log:
            print("")
        label_cols = ("{0:^4} {1:>16} {2:>8} {3:>8} {4:>12}".format(
            "iter", "objective", "pr_res", "du_res", "lg(||rho||)"))
        with ADMMAlgorithm(sp, self._options) as admm:
            rho, x, y, z = admm.initialize_algorithm_data(rho_init=rho,
                                                          y_init=y_init,
                                                          z_init=z_init)
            rho_strategy = RhoStrategyFactory(self.get_option("rho_strategy"),
                                              self._options)
            rho_strategy.initialize(sp, x, y, z, rho)
            for i in xrange(max_iterations):

                objective = \
                    admm.run_x_update(x, y, z, rho)
                (unscaled_primal_residual,
                 unscaled_dual_residual,
                 x_scale,
                 z_scale) = \
                    admm.run_z_update(x, y, z, rho)
                y_scale = \
                    admm.run_y_update(x, y, z, rho)

                # we've completed another iteration
                self.iterations += 1

                # check for convergence
                primal_rel_scale = max(1.0, x_scale, z_scale)
                dual_rel_scale = max(1.0, y_scale)
                primal_residual = unscaled_primal_residual / \
                                  math.sqrt(num_scenarios) / \
                                  primal_rel_scale
                dual_residual = unscaled_dual_residual / \
                                math.sqrt(num_na_variables) / \
                                dual_rel_scale

                self.objective_history[i] = \
                    objective
                self.primal_residual_history[i] = \
                    primal_residual
                self.dual_residual_history[i] = \
                    dual_residual

                if output_solver_log:
                    if (i % 10) == 0:
                        print(label_cols)
                    print("%4d %16.7e %8.2e %8.2e %12.2e" %
                          (i, objective, primal_residual, dual_residual,
                           math.log(admm.compute_nodevector_norm(rho))))

                if (primal_residual < rel_tol_primal) and \
                   (dual_residual < rel_tol_dual):
                    if output_solver_log:
                        print("\nNumber of Iterations....: %s" %
                              (self.iterations))
                    break
                else:
                    rho_strategy.update_rho(sp, x, y, z, rho)

            else:
                if output_solver_log:
                    print("\nMaximum number of iterations reached: %s" %
                          (max_iterations))

        if output_solver_log:
            print("")
            print("                        {0:^24} {1:^24}".\
                  format("(scaled)", "(unscaled)"))
            print("Objective..........:    {0:^24} {1:^24.16e}".\
                  format("-", objective))
            print("Primal residual....:    {0:^24.16e} {1:^24.16e}".\
                  format(primal_residual, unscaled_primal_residual))
            print("Dual residual......:    {0:^24.16e} {1:^24.16e}".\
                  format(dual_residual, unscaled_dual_residual))
            unscaled_err = unscaled_primal_residual + \
                           unscaled_dual_residual
            err = primal_residual + dual_residual
            print("Overall error......:    {0:^24.16e} {1:^24.16e}".\
                  format(err, unscaled_err))

        results = SPSolverResults()
        results.objective = objective
        results.xhat = z
        return results