Ejemplo n.º 1
0
 def __init__(self,
              block,
              interval_obj,
              interval_sec,
              poll_sec=1,
              interval_cb=None,
              time_func=time.localtime):
     self._rflag = False
     self._check_params(interval_obj, interval_sec, poll_sec, interval_cb,
                        time_func)
     self._iobj = interval_obj
     self._isec = interval_sec
     self._psec = round(interval_sec / round(interval_sec / poll_sec), 5)
     self._min_block_size = self.BLOCK_SIZE_PER_PSEC * poll_sec
     self._check_block(block)
     self._block = block
     self._texcludes = set()
     self._ssfunc = block.stream_snapshot_from_marker
     self._restrict_block_resize(block, poll_sec)
     self._tfunc = time_func
     self._interval_cb = interval_cb
     self._wait_adj_down_exp = 0
     self._buffers = dict()
     self._buffers_lock = _Lock()
     self._bthread = _Thread(target=self._background_worker)
     self._bthread.start()
Ejemplo n.º 2
0
    def _init(self, *args, **kwargs):
        # A small helper function which will do the following:
        # * get a client from the cache in a thread safe manner, or
        #   create a new one from scratch
        # * store the input arguments as class members
        # * create a LoadBalancedView from the client
        # * create a lock to regulate access to the view
        # * return the view.
        from ipyparallel import Client
        # Turn the arguments into something that might be hashable.
        args_key = (args, tuple(sorted([(k, kwargs[k]) for k in kwargs])))
        if _hashable(args_key):
            with _client_cache_lock:
                if args_key in _client_cache:
                    rc = _client_cache[args_key]
                else:
                    _client_cache[args_key] = Client(*args, **kwargs)
                    rc = _client_cache[args_key]
        else:
            # If the arguments are not hashable, just create a brand new
            # client.
            rc = Client(*args, **kwargs)

        # Save the init arguments.
        self._args = args
        self._kwargs = kwargs

        # NOTE: we need to regulate access to the view because,
        # while run_evolve() is running in a separate thread, we
        # could be doing other things involving the view (e.g.,
        # asking extra_info()). Thus, create the lock here.
        self._view_lock = _Lock()

        return rc.load_balanced_view()
Ejemplo n.º 3
0
    def copy_object(self,
                    src_path,
                    dst_path,
                    src_locator=None,
                    dst_locator=None):
        """
        Copy object.

        Args:
            src_path (str): Source object path.
            dst_path (str): Destination object path.
            src_locator (str): Source locator.
            dst_locator (str): Destination locator.
        """
        if src_locator is None:
            src_locator, src_path = src_path.split('/', 1)

        if dst_locator is None:
            dst_locator, dst_path = dst_path.split('/', 1)

        file = self._get_object(src_locator, src_path).copy()
        del file['_lock']
        file = _deepcopy(file)
        file['_lock'] = _Lock()

        self._get_locator_content(dst_locator)[dst_path] = file

        if self._header_mtime:
            file[self._header_mtime] = self._format_date(_time())
Ejemplo n.º 4
0
 def __init__(self, pool, max_threads=None):
     self._pool = pool
     self._max_threads = max_threads
     self._queue = _Queue()
     self._lock = _Lock()
     self._n_active = 0
     return
Ejemplo n.º 5
0
    def __init__(self, *args, **kwargs):
        self._content_length = kwargs.get("content_length", 0)

        _ObjectRawIORandomWriteBase.__init__(self, *args, **kwargs)
        _WorkerPoolBase.__init__(self)

        if self._writable:
            self._size_lock = _Lock()
Ejemplo n.º 6
0
 def __init__(self):
     """Init."""
     super().__init__()
     self._last_operation = None
     self._thread = None
     self._ignore = False
     self._enabled = True
     self._lock = _Lock()
Ejemplo n.º 7
0
 def __init__(self, address, size=1000, date_time=False, timeout=DEF_TIMEOUT):         
   self._hub_addr = _check_and_resolve_address(address)
   self._my_sock = _socket.socket()
   self._my_sock.settimeout(timeout / 1000)
   self._my_sock.connect(self._hub_addr)    
   self._call_LOCK = _Lock()
   _vcall(_pack_msg(_vCONN_BLOCK), self._my_sock, self._hub_addr)      
   # in case __del__ is called during socket op
   self._call(_vCREATE, '__init__', size, date_time, timeout) 
Ejemplo n.º 8
0
 def __init__(self, type=None, serial=None):
     """Open an MBGUG device with specified serial number and type.
        The serial number can be passed as string or integer.
        It is enough to specifiy the significant ending of the serial number.
        If the serial number is not specified, return the first device found.
     """
     self._handle = None
     self._io__Lock = _Lock()
     self._open(type, serial)
Ejemplo n.º 9
0
def wait(combination):
    """
    Blocks the program execution until the given key combination is pressed.
    """
    lock = _Lock()
    lock.acquire()
    hotkey_handler = add_hotkey(combination, lock.release)
    lock.acquire()
    remove_hotkey(hotkey_handler)
Ejemplo n.º 10
0
def wait(combination):
    """
    Blocks the program execution until the given key combination is pressed.
    """
    lock = _Lock()
    lock.acquire()
    hotkey_handler = add_hotkey(combination, lock.release)
    lock.acquire()
    remove_hotkey(hotkey_handler)
Ejemplo n.º 11
0
def wait(combination=None):
    """
    Blocks the program execution until the given key combination is pressed or,
    if given no parameters, blocks forever.
    """
    lock = _Lock()
    lock.acquire()
    if combination is not None:
        hotkey_handler = add_hotkey(combination, lock.release)
    lock.acquire()
    remove_hotkey(hotkey_handler)
Ejemplo n.º 12
0
    def __init__(self, *args, **kwargs):

        # If a content length is provided, allocate pages for this blob
        self._content_length = kwargs.get('content_length', 0)

        _ObjectRawIORandomWriteBase.__init__(self, *args, **kwargs)
        _WorkerPoolBase.__init__(self)

        if self._writable:

            # Create lock for resizing
            self._size_lock = _Lock()
Ejemplo n.º 13
0
    def __init__(self, host_type=None, region=None, *args, **kwargs):
        _Client.__init__(self, *args, **kwargs)

        self._metering_env = None
        self._host_type = host_type or self._config['host']['host_type']
        self._region = region or self._config['host']['region']

        # Accelerator executable is exclusive
        self._accelerator_lock = _Lock()

        # Need accelerator executable to run
        if not _cfg.accelerator_executable_available():
            raise _exc.HostConfigurationException(gen_msg='no_host_found')
Ejemplo n.º 14
0
    def __init__(self, fpga_slot_id=0, fpga_image=None, drm_ctrl_base_addr=0):
        self._fpga_slot_id = fpga_slot_id
        self._fpga_image = fpga_image
        self._drm_ctrl_base_addr = drm_ctrl_base_addr

        # FPGA read/write low level functions ans associated locks
        self._fpga_read_register = None
        self._fpga_write_register = None
        self._fpga_read_register_lock = _Lock()
        self._fpga_write_register_lock = _Lock()

        # Device and library handles
        self._fpga_handle = None
        self._fpga_library = self._get_driver()

        # Initialize FPGA
        if fpga_image:
            self._program_fpga(fpga_image)
        self._init_fpga()

        # Call backs
        self._read_register_callback = self._get_read_register_callback()
        self._write_register_callback = self._get_write_register_callback()
Ejemplo n.º 15
0
 def __init__(self, address, size=1000, date_time=False, timeout=DEF_TIMEOUT):
     _check_address(address)
     self._hub_addr = address
     self._my_sock = _socket.socket()
     self._my_sock.settimeout(timeout / 1000)
     self._my_sock.connect(address)
     self._connected = False
     self._call_LOCK = _Lock()
     try:
         _vcall(_pack_msg(_vCONN_BLOCK), self._my_sock, self._hub_addr)
         # in case __del__ is called during socket op
         self._call(_vCREATE, "__init__", size, date_time, timeout)
         self._connected = True
     except:
         raise
Ejemplo n.º 16
0
 def _init(self, use_pool):
     # Implementation of the ctor. Factored out
     # because it's re-used in the pickling support.
     if not isinstance(use_pool, bool):
         raise TypeError(
             "The 'use_pool' parameter in the mp_island constructor must be a boolean, but it is of type {} instead."
             .format(type(use_pool)))
     self._use_pool = use_pool
     if self._use_pool:
         # Init the process pool, if necessary.
         mp_island.init_pool()
     else:
         # Init the pid member and associated lock.
         self._pid_lock = _Lock()
         self._pid = None
Ejemplo n.º 17
0
 def __init__(self, raise_404, raise_416, raise_500, format_date=None):
     self._put_lock = _Lock()
     self._system = None
     self._locators = {}
     self._header_size = None
     self._header_mtime = None
     self._header_ctime = None
     self._raise_404 = raise_404
     self._raise_416 = raise_416
     self._raise_500 = raise_500
     self._raise_server_error = False
     if format_date is None:
         from wsgiref.handlers import format_date_time
         format_date = format_date_time
     self._format_date = format_date
Ejemplo n.º 18
0
 def __init__(self):
     super().__init__()
     self._nodes = set()
     self._nodes_lock = _Lock()
     # This is triggered when wait_for_ready_callbacks should rebuild the wait list
     gc, gc_handle = _rclpy.rclpy_create_guard_condition()
     self._guard_condition = gc
     self._guard_condition_handle = gc_handle
     # True if shutdown has been called
     self._is_shutdown = False
     self._work_tracker = _WorkTracker()
     # State for wait_for_ready_callbacks to reuse generator
     self._cb_iter = None
     self._last_args = None
     self._last_kwargs = None
Ejemplo n.º 19
0
 def __init__( self, address, size = 1000, date_time = False,
               timeout = DEF_TIMEOUT ):       
     _check_address( address )
     self._hub_addr = address
     self._my_sock = _socket.socket()
     self._my_sock.settimeout( timeout / 1000 )
     self._my_sock.connect( address )
     self._connected = False
     self._call_LOCK = _Lock()
     try:
         _vcall( _pack_msg( _vCONN_BLOCK ), self._my_sock, self._hub_addr)          
         # in case __del__ is called during socket op
         self._call( _vCREATE, '__init__', size, date_time, timeout )
         self._connected = True
     except:
         raise
Ejemplo n.º 20
0
 def __init__(self, address, password=None, size=1000, date_time=False, 
              timeout=DEF_TIMEOUT):         
     self._hub_addr = _check_and_resolve_address(address)
     self._my_sock = _socket.socket()
     self._my_sock.settimeout(timeout / 1000)
     if password is not None:
         check_password(password)
     self._my_sock.connect(self._hub_addr)  
     try:
         _handle_req_from_server(self._my_sock,password)
     except:
         self._my_sock.close()
         raise
     self._call_LOCK = _Lock()
     _vcall(_pack_msg(_vCONN_BLOCK), self._my_sock, self._hub_addr)      
     # in case __del__ is called during socket op
     self._call(_vCREATE, '__init__', size, date_time, timeout) 
Ejemplo n.º 21
0
class _SingleShotQueue:
    __common_lock = _Lock()

    def __init__(self):
        self.__value = _NOTSET
        self.__ready = None
        return

    ##
    # @note Timeout is ignored because the PendingResult should not ever
    #       block.
    def put(self, object, timeout=None):
        self.__common_lock.acquire()
        if self.__value is not _NOTSET:
            self.__common_lock.release()
            raise EInternalError(
                "Only one put is allowed on a _SingleShotQueue.")
        self.__value = object
        if self.__ready is not None:
            self.__common_lock.release()
            self.__ready.acquire()
            self.__ready.notify()
            self.__ready.release()
        else:
            self.__common_lock.release()
        return

    def get(self, timeout=None):
        self.__common_lock.acquire()
        result = self.__value
        if result is not _NOTSET:
            self.__common_lock.release()
            return result
        else:
            if self.__ready is None:
                self.__ready = _Condition(self.__common_lock)
        self.__common_lock.release()
        self.__ready.acquire()
        if self.__value is _NOTSET:
            self.__ready.wait(timeout)
        result = self.__value
        if result is _NOTSET:
            result = NORESULT
        self.__ready.release()
        return result
Ejemplo n.º 22
0
 def __init__(self, address, password=None, size=1000, date_time=False, 
              timeout=DEF_TIMEOUT):
     self._valid = False
     self._hub_addr = _check_and_resolve_address(address)
     self._my_sock = _socket.socket()
     self._my_sock.settimeout(timeout / 1000)
     if password is not None:
         check_password(password)
     self._my_sock.connect(self._hub_addr)  
     try:
         _handle_req_from_server(self._my_sock,password)
     except:
         self._my_sock.close()
         raise
     self._call_LOCK = _Lock()
     _vcall(_pack_msg(_vCONN_BLOCK), self._my_sock, self._hub_addr)      
     # in case __del__ is called during socket op
     self._call(_vCREATE, '__init__', size, date_time, timeout) 
     self._valid = True
Ejemplo n.º 23
0
def load_features(jobs, host, port, path, tiles):
    ''' Load data from tiles to features.
    
        Calls load_tile_features() in a thread pool to speak HTTP.
    '''
    features = []
    lock = _Lock()
    
    args = (lock, host, port, path, tiles, features)
    threads = [Thread(target=load_tile_features, args=args) for i in range(jobs)]
    
    for thread in threads:
        thread.start()
    
    for thread in threads:
        thread.join()
    
    logging.debug('Loaded %d features' % len(features))
    return features
Ejemplo n.º 24
0
def load_features(jobs, host, port, path, tiles):
    ''' Load data from tiles to features.
    
        Calls load_tile_features() in a thread pool to speak HTTP.
    '''
    features = []
    lock = _Lock()
    
    args = (lock, host, port, path, tiles, features)
    threads = [Thread(target=load_tile_features, args=args) for i in range(jobs)]
    
    for thread in threads:
        thread.start()
    
    for thread in threads:
        thread.join()
    
    logging.debug('Loaded %d features' % len(features))
    return features
Ejemplo n.º 25
0
 def __init__(self, block, interval_obj, interval_sec, poll_sec=1,
              interval_cb=None, time_func=time.localtime):
     self._rflag = False
     self._check_params(interval_obj, interval_sec, poll_sec, interval_cb, time_func)
     self._iobj = interval_obj
     self._isec = interval_sec
     self._psec = round(interval_sec / round(interval_sec/poll_sec), 5)
     self._min_block_size = self.BLOCK_SIZE_PER_PSEC * poll_sec
     self._check_block(block)        
     self._block = block
     self._texcludes = set()
     self._ssfunc = block.stream_snapshot_from_marker
     self._restrict_block_resize(block, poll_sec)
     self._tfunc = time_func
     self._interval_cb = interval_cb
     self._wait_adj_down_exp = 0
     self._buffers = dict()
     self._buffers_lock = _Lock()
     self._bthread = _Thread(target=self._background_worker)
     self._bthread.start()      
Ejemplo n.º 26
0
    def _init(self, *args, **kwargs):
        # A small helper function which will do the following:
        # * get a client from the cache in a thread safe manner, or
        #   create a new one from scratch
        # * store the input arguments as class members
        # * create a LoadBalancedView from the client
        # * create a lock to regulate access to the view
        # * return the view.
        from ipyparallel import Client
        # Turn the arguments into something that might be hashable.
        # Make sure the kwargs are sorted so that two sets of identical
        # kwargs will be recognized as equal also if the keys are stored
        # in different order.
        args_key = (args, tuple(sorted([(k, kwargs[k]) for k in kwargs])))
        if _hashable(args_key):
            with _client_cache_lock:
                # Try to see if a client constructed with the same
                # arguments already exists in the cache.
                rc = _client_cache.get(args_key)
                if rc is None:
                    # No cached client exists. Create a new client
                    # and store it in the cache.
                    rc = Client(*args, **kwargs)
                    _client_cache[args_key] = rc
        else:
            # If the arguments are not hashable, just create a brand new
            # client.
            rc = Client(*args, **kwargs)

        # Save the init arguments.
        self._args = args
        self._kwargs = kwargs

        # NOTE: we need to regulate access to the view because,
        # while run_evolve() is running in a separate thread, we
        # could be doing other things involving the view (e.g.,
        # asking extra_info()). Thus, create the lock here.
        self._view_lock = _Lock()

        return rc.load_balanced_view()
Ejemplo n.º 27
0
class ipyparallel_island(object):
    """Ipyparallel island.

    This user-defined island (UDI) will dispatch evolution tasks to an ipyparallel cluster.
    The communication with the cluster is managed via an :class:`ipyparallel.LoadBalancedView`
    instance which is created either implicitly when the first evolution is run, or explicitly
    via the :func:`~pygmo.ipyparallel_island.init_view()` method. The
    :class:`~ipyparallel.LoadBalancedView` instance is a global object shared among all the
    ipyparallel islands.

    .. seealso::

       https://ipyparallel.readthedocs.io/en/latest/

    """

    # Static variables for the view.
    _view_lock = _Lock()
    _view = None

    @staticmethod
    def init_view(client_args=[],
                  client_kwargs={},
                  view_args=[],
                  view_kwargs={}):
        """Init the ipyparallel view.

        .. versionadded:: 2.12

        This method will initialise the :class:`ipyparallel.LoadBalancedView`
        which is used by all ipyparallel islands to submit the evolution tasks
        to an ipyparallel cluster. If the :class:`ipyparallel.LoadBalancedView`
        has already been created, this method will perform no action.

        The input arguments *client_args* and *client_kwargs* are forwarded
        as positional and keyword arguments to the construction of an
        :class:`ipyparallel.Client` instance. From the constructed client,
        an :class:`ipyparallel.LoadBalancedView` instance is then created
        via the :func:`ipyparallel.Client.load_balanced_view()` method, to
        which the positional and keyword arguments *view_args* and
        *view_kwargs* are passed.

        Note that usually it is not necessary to explicitly invoke this
        method: an :class:`ipyparallel.LoadBalancedView` is automatically
        constructed with default settings the first time an evolution task
        is submitted to an ipyparallel island. This method should be used
        only if it is necessary to pass custom arguments to the construction
        of the :class:`ipyparallel.Client` or :class:`ipyparallel.LoadBalancedView`
        objects.

        Args:

            client_args(:class:`list`): the positional arguments used for the
              construction of the client
            client_kwargs(:class:`dict`): the keyword arguments used for the
              construction of the client
            view_args(:class:`list`): the positional arguments used for the
              construction of the view
            view_kwargs(:class:`dict`): the keyword arguments used for the
              construction of the view

        Raises:

           unspecified: any exception thrown by the constructor of :class:`ipyparallel.Client`
             or by the :func:`ipyparallel.Client.load_balanced_view()` method

        """
        from ._ipyparallel_utils import _make_ipyparallel_view

        with ipyparallel_island._view_lock:
            if ipyparallel_island._view is None:
                # Create the new view.
                ipyparallel_island._view = _make_ipyparallel_view(
                    client_args, client_kwargs, view_args, view_kwargs)

    @staticmethod
    def shutdown_view():
        """Destroy the ipyparallel view.

        .. versionadded:: 2.12

        This method will destroy the :class:`ipyparallel.LoadBalancedView`
        currently being used by the ipyparallel islands for submitting
        evolution tasks to an ipyparallel cluster. The view can be re-inited
        implicitly by submitting a new evolution task, or by invoking
        the :func:`~pygmo.ipyparallel_island.init_view()` method.

        """
        import gc
        with ipyparallel_island._view_lock:
            if ipyparallel_island._view is None:
                return

            old_view = ipyparallel_island._view
            ipyparallel_island._view = None
            del (old_view)
            gc.collect()

    def run_evolve(self, algo, pop):
        """Evolve population.

        This method will evolve the input :class:`~pygmo.population` *pop* using the input
        :class:`~pygmo.algorithm` *algo*, and return *algo* and the evolved population. The evolution
        task is submitted to the ipyparallel cluster via a global :class:`ipyparallel.LoadBalancedView`
        instance initialised either implicitly by the first invocation of this method,
        or by an explicit call to the :func:`~pygmo.ipyparallel_island.init_view()` method.

        Args:

            pop(:class:`~pygmo.population`): the input population
            algo(:class:`~pygmo.algorithm`): the input algorithm

        Returns:

            :class:`tuple`: a tuple of 2 elements containing *algo* (i.e., the :class:`~pygmo.algorithm` object that was used for the evolution) and the evolved :class:`~pygmo.population`

        Raises:

            unspecified: any exception thrown by the evolution, by the creation of a
              :class:`ipyparallel.LoadBalancedView`, or by the sumission of the evolution task
              to the ipyparallel cluster

        """
        # NOTE: as in the mp_island, we pre-serialize
        # the algo and pop, so that we can catch
        # serialization errors early.
        import pickle
        from ._ipyparallel_utils import _make_ipyparallel_view

        ser_algo_pop = pickle.dumps((algo, pop))
        with ipyparallel_island._view_lock:
            if ipyparallel_island._view is None:
                ipyparallel_island._view = _make_ipyparallel_view([], {}, [],
                                                                  {})
            ret = ipyparallel_island._view.apply_async(_evolve_func_ipy,
                                                       ser_algo_pop)

        return pickle.loads(ret.get())

    def get_name(self):
        """Island's name.

        Returns:
            :class:`str`: ``"Ipyparallel island"``

        """
        return "Ipyparallel island"

    def get_extra_info(self):
        """Island's extra info.

        Returns:
            :class:`str`: a string with extra information about the status of the island

        """
        from copy import deepcopy
        with ipyparallel_island._view_lock:
            if ipyparallel_island._view is None:
                return "\tNo cluster view has been created yet"
            else:
                d = deepcopy(ipyparallel_island._view.queue_status())
        return "\tQueue status:\n\t\n\t" + "\n\t".join(
            ["(" + str(k) + ", " + str(d[k]) + ")" for k in d])
Ejemplo n.º 28
0
class IDSearch:
    """ID Search Class."""

    # NOTE: Maybe we should move these mappings to
    # static tables in our cs-constants server.

    _beamline2idname = {
        'CARNAUBA': 'SI-06SB:ID-APU22',
        'CATERETE': 'SI-07SP:ID-APU22',
        'EMA': 'SI-08SB:ID-APU22',
        'MANACA': 'SI-09SA:ID-APU22',
        'SABIA': 'SI-10SB:ID-Delta52',
        'IPE': 'SI-11SP:ID-Delta52',
        'COLIBRI': 'SI-12SB:ID-Delta52',
        'HARPIA': 'SI-14SB:ID-Delta21',
        'SAGUI': 'SI-15SP:ID-Delta21',
        'INGA': 'SI-16SB:ID-Delta21',
        'SAPUCAIA': 'SI-17SA:ID-APU19',
        'PAINEIRA': 'SI-18SB:ID-Delta21',
    }
    _idname2beamline = {v: k for k, v in _beamline2idname.items()}

    _idname_2_orbitcorr = {
        'SI-09SA:ID-APU22': (
            'SI-09SA:PS-CH-1',
            'SI-09SA:PS-CH-2',
            'SI-09SA:PS-CH-3',
            'SI-09SA:PS-CH-4',
            'SI-09SA:PS-CV-1',
            'SI-09SA:PS-CV-2',
            'SI-09SA:PS-CV-3',
            'SI-09SA:PS-CV-4',
        )
    }

    _idname_2_orbitffwd_fname = {
        'SI-09SA:ID-APU22': 'si-id-apu22-ffwd-09sa',
    }

    _idname_2_orbitffwd_dict = dict()

    _lock = _Lock()

    @staticmethod
    def conv_idname_2_beamline(idname):
        """Return the beam line name corresponding to sector."""
        if idname in IDSearch._idname2beamline.keys():
            return IDSearch._idname2beamline[idname]
        return None

    @staticmethod
    def conv_beamline_2_idname(beamline):
        """Return the beam line name corresponding to idname."""
        if beamline in IDSearch._beamline2idname.keys():
            return IDSearch._beamline2idname[beamline]
        return None

    @staticmethod
    def get_idname_2_beamline_dict():
        """Return idname to beamline dictionary."""
        return IDSearch._idname2beamline.copy()

    @staticmethod
    def get_beamline_2_idname_dict():
        """Return beamline to idname dictionary."""
        return IDSearch._beamline2idname.copy()

    @staticmethod
    def get_idnames(filters=None):
        """Return a sorted and filtered list of all ID names."""
        filters_ = dict()
        if filters:
            filters_.update(filters)
        filters_['dis'] = 'ID'
        return _PSSearch.get_psnames(filters_)

    @staticmethod
    def conv_idname_2_orbitcorr(idname):
        """Return list of orbit correctors of a given ID."""
        return IDSearch._idname_2_orbitcorr[idname]

    @staticmethod
    def conv_idname_2_orbitffwd(idname):
        """Convert idname to orbit feedforward excdata."""
        IDSearch._reload_idname_2_orbitffwd_dict(idname)
        return IDSearch._idname_2_orbitffwd_dict[idname]

    # --- private methods ---

    @staticmethod
    def _reload_idname_2_orbitffwd_dict(idname):
        """Load ID ffwd data."""
        with IDSearch._lock:
            if idname in IDSearch._idname_2_orbitffwd_dict:
                return
            if not _web.server_online():
                raise Exception('could not read "' + str(idname) +
                                '" from web server!')
            ffwd_fname = IDSearch._idname_2_orbitffwd_fname[idname]
            IDSearch._idname_2_orbitffwd_dict[idname] = \
                _ExcitationData(filename_web=ffwd_fname + '.txt')
Ejemplo n.º 29
0
__author__ = "Sergi Blanch-Torné"
__email__ = "*****@*****.**"
__copyright__ = "Copyright 2015, CELLS / ALBA Synchrotron"
__license__ = "GPLv3+"

from datetime import datetime as _datetime
import logging as _logging
from logging import handlers as _handlers
from multiprocessing import current_process as _currentProcess
import os
from threading import currentThread as _currentThread
from threading import Lock as _Lock
from weakref import ref as _weakref

global lock
lock = _Lock()

_logger_NOTSET = _logging.NOTSET  # 0
_logger_CRITICAL = _logging.CRITICAL  # 50
_logger_ERROR = _logging.ERROR  # 40
_logger_WARNING = _logging.WARNING  # 30
_logger_INFO = _logging.INFO  # 20
_logger_DEBUG = _logging.DEBUG  # 10


__all__ = ["Logger"]


class Logger(object):
    '''This class is a very basic debugging flag mode used as a super class
       for the other classes in this library.
Ejemplo n.º 30
0
from ._canonical_names import all_modifiers, sided_modifiers, normalize_name

_modifier_scan_codes = set()
def is_modifier(key):
    """
    Returns True if `key` is a scan code or name of a modifier key.
    """
    if _is_str(key):
        return key in all_modifiers
    else:
        if not _modifier_scan_codes:
            scan_codes = (key_to_scan_codes(name, False) for name in all_modifiers) 
            _modifier_scan_codes.update(*scan_codes)
        return key in _modifier_scan_codes

_pressed_events_lock = _Lock()
_pressed_events = {}
_physically_pressed_keys = _pressed_events
_logically_pressed_keys = {}
class _KeyboardListener(_GenericListener):
    transition_table = {
        #Current state of the modifier, per `modifier_states`.
        #|
        #|             Type of event that triggered this modifier update.
        #|             |
        #|             |         Type of key that triggered this modiier update.
        #|             |         |
        #|             |         |            Should we send a fake key press?
        #|             |         |            |
        #|             |         |     =>     |       Accept the event?
        #|             |         |            |       |
Ejemplo n.º 31
0
 def create_lock():
     """."""
     Channel.LOCK = _Lock()
Ejemplo n.º 32
0
def multithreadingTake(lockObj):
    def sendEvent(eventLst, who):
        eventLst[who].set()
        while eventLst[who].isSet():  # wait to the thread to work
            _sleep(1)
    testName = "Lock take test"
    _printHeader("%s for %s" % (testName, lockObj))
    joinerEvent = _Event()
    joinerEvent.clear()
    userThreads = []
    requestEvents = []
    accessEvents = []
    releaseEvents = []
    printLock = _Lock()
    for i in range(2):
        requestEvent = _Event()
        accessEvent = _Event()
        releaseEvent = _Event()
        userThread = _Thread(target=threadFunction,
                             args=(lockObj, joinerEvent,
                                   requestEvent, accessEvent, releaseEvent,
                                   printLock),
                             name='%d' % (i))
        requestEvents.append(requestEvent)
        accessEvents.append(accessEvent)
        releaseEvents.append(releaseEvent)
        userThreads.append(userThread)
        userThread.start()
    # here is where the test starts ---
    try:
        _printInfo("Initial state %r\n" % (lockObj),
                   level=1, lock=printLock)
        if lockObj.isLock():
            return False, "%s FAILED" % (testName)

        _printInfo("Tell the threads to access",
                   level=1, lock=printLock, top=True)
        sendEvent(accessEvents, 0)
        sendEvent(accessEvents, 1)
        _printInfo("both should have had access",
                   level=1, lock=printLock, bottom=True)

        _printInfo("Thread 0 take the lock",
                   level=1, lock=printLock, top=True)
        sendEvent(requestEvents, 0)
        if not lockObj.isLock() or lockObj.owner != '0':
            raise Exception("It shall be lock by 0")
        _printInfo("Tell the threads to access",
                   level=1, lock=printLock)
        sendEvent(accessEvents, 0)
        sendEvent(accessEvents, 1)
        _printInfo("0 should, but 1 don't",
                   level=1, lock=printLock, bottom=True)

        _printInfo("Try to lock when it is already",
                   level=1, lock=printLock, top=True)
        sendEvent(requestEvents, 1)
        if not lockObj.isLock() or lockObj.owner != '0':
            raise Exception("It shall be lock by user 0")
        _printInfo("Tell the threads to access",
                   level=1, lock=printLock)
        sendEvent(accessEvents, 0)
        sendEvent(accessEvents, 1)
        _printInfo("0 should, but 1 don't",
                   level=1, lock=printLock, bottom=True)

        _printInfo("Try to release by a NON-owner",
                   level=1, lock=printLock, top=True)
        sendEvent(releaseEvents, 1)
        if not lockObj.isLock() or lockObj.owner != '0':
            raise Exception("It shall be lock by user 0")
        _printInfo("Tell the threads to access",
                   level=1, lock=printLock)
        sendEvent(accessEvents, 0)
        sendEvent(accessEvents, 1)
        _printInfo("0 should, but 1 don't",
                   level=1, lock=printLock, bottom=True)

        _printInfo("release the lock",
                   level=1, lock=printLock, top=True)
        sendEvent(releaseEvents, 0)
        if lockObj.isLock():
            raise Exception("It shall be released")
        _printInfo("Tell the threads to access",
                   level=1, lock=printLock)
        sendEvent(accessEvents, 0)
        sendEvent(accessEvents, 1)
        _printInfo("both should have had to",
                   level=1, lock=printLock, bottom=True)

        # TODO: timeout
        _printInfo("Thread 1 take the lock and expire it",
                   level=1, lock=printLock, top=True)
        sendEvent(requestEvents, 1)
        if not lockObj.isLock() or lockObj.owner != '1':
            raise Exception("It shall be lock by 1")
        _printInfo("Tell the threads to access",
                   level=1, lock=printLock)
        sendEvent(accessEvents, 0)
        sendEvent(accessEvents, 1)
        _printInfo("1 should, but 0 don't",
                   level=1, lock=printLock)
        _printInfo("Sleep %d seconds to expire the lock"
                   % TEST_EXPIRATION_TIME,
                   level=1, lock=printLock)
        _sleep(TEST_EXPIRATION_TIME)
        _printInfo("Tell the threads to access",
                   level=1, lock=printLock)
        sendEvent(accessEvents, 0)
        sendEvent(accessEvents, 1)
        _printInfo("both should have had to",
                   level=1, lock=printLock, bottom=True)

        answer = True, "%s PASSED" % (testName)
    except Exception as e:
        print(e)
        print_exc()
        answer = False, "%s FAILED" % (testName)
    joinerEvent.set()
    while len(userThreads) > 0:
        userThread = userThreads.pop()
        userThread.join(1)
        if userThread.isAlive():
            userThreads.append(userThread)
    print("All threads has finished")
    return answer
Ejemplo n.º 33
0
    def put_object(self,
                   locator,
                   path,
                   content=None,
                   headers=None,
                   data_range=None,
                   new_file=False):
        """
        Put object.

        Args:
            locator (str): locator name
            path (str): Object path.
            content (bytes like-object): File content.
            headers (dict): Header to put with the file.
            data_range (tuple of int): Range of position of content.
            new_file (bool): If True, force new file creation.

        Returns:
            dict: File header.
        """
        with self._put_lock:
            if new_file:
                self.delete_object(locator, path, not_exists_ok=True)
            try:
                # Existing file
                file = self._get_locator_content(locator)[path]
            except KeyError:
                # New file
                self._get_locator_content(locator)[path] = file = {
                    'Accept-Ranges': 'bytes',
                    'ETag': str(_uuid()),
                    '_content': bytearray(),
                    '_lock': _Lock()
                }

                if self._header_size:
                    file[self._header_size] = 0

                if self._header_ctime:
                    file[self._header_ctime] = self._format_date(_time())

        # Update file
        with file['_lock']:
            if content:
                file_content = file['_content']

                # Write full content
                if not data_range or (data_range[0] is None
                                      and data_range[1] is None):
                    file_content[:] = content

                # Write content range
                else:
                    # Define range
                    start, end = data_range
                    if start is None:
                        start = 0
                    if end is None:
                        end = start + len(content)

                    # Add padding if missing data
                    if start > len(file_content):
                        file_content[len(file_content):start] = (
                            start - len(file_content)) * b'\0'

                    # Flush new content
                    file_content[start:end] = content

            if headers:
                file.update(headers)

            if self._header_size:
                file[self._header_size] = len(file['_content'])

            if self._header_mtime:
                file[self._header_mtime] = self._format_date(_time())

            # Return Header
            header = file.copy()
        del header['_content']
        return header
Ejemplo n.º 34
0
def initConfig(**kwargs):
    """
    This method can be optionally called prior any call to another function in this module.
    It is indented to be called in the MainThread.
    This method can be call with empty params.

    Note: this module doesn't use any package-level variables in hiYaPyCo module.

    :param jinja2Lock: lock to use for synchronization in DisableVarSubst and load().
                      If not supplied, default RLock is used.
                      Will be available as HiYaPyCo.jinja2Lock.
    :param jinja2ctx: context for overriding values in initialization (default is _DebugUndefined) of Jinja2 Environment:
                      gloabls will be override values in Environment.globals.update(**globals)
                      Default is 'uname':platform.uname
                      Will be available as HiYaPyCo.jinja2ctx.
    :param load: this params will be used as default values in load() function. See hiyapyco.load()
                Default values are
                      'method':_hiyapyco.METHOD_SUBSTITUTE,
                      'mergelists':False,
                      'interpolate':True,
                      'castinterpolated':True
                This means, by default:
                      We're replacing list, not merging them.
                      We're interpolating values in the data (to scalar/list/OrderDict).
                      We're also using casting to appropriate type.
    :param safe_dump: this params will be used as default values in safe_dump() and as_str(). See yaml.dump_all()
                Default values are
                      'default_flow_style':False,
                      'sort_keys':False
                This means, by default:
                       we prefer block style always.
                       we preserve the key order (no sorting for key in the dictionary).

    If running from the MainThread, this method is idempotent.

    :return:
    """
    jinja2Lock_p = kwargs.get('jinja2Lock', None)
    if jinja2Lock_p is None:
        jinja2Lock_p = _Lock()
    HiYaPyCo.jinja2Lock = jinja2Lock_p

    jinja2ctx_d = kwargs.get('jinja2ctx', {})
    globals_d = jinja2ctx_d.pop('globals', {})
    # jinja2ctx_p = _Environment(undefined=_StrictUndefined)
    jinja2ctx_p = _Environment(**{'undefined': _DebugUndefined, **jinja2ctx_d})
    jinja2ctx_p.globals.update(**{'uname': _uname, **globals_d})

    with HiYaPyCo.jinja2Lock:
        HiYaPyCo.jinja2ctx = jinja2ctx_p

    load_d = kwargs.get('load', {})
    _load_d_p = {
        'method': _hiyapyco.METHOD_SUBSTITUTE,
        'mergelists': False,
        'interpolate': True,
        'castinterpolated': True,
        **load_d
    }

    method = _load_d_p['method']
    # TODO: HiYaPyCo._substmerge() bug workarround, see https://github.com/zerwes/hiyapyco/pull/38
    HiYaPyCo._deepmerge = _HiYaPyCo._substmerge
    if method == _hiyapyco.METHOD_MERGE:
        #restore original _deepmerge
        HiYaPyCo._deepmerge = _HiYaPyCo._deepmerge
    global _load_d
    _load_d = _load_d_p

    safe_dump_d = kwargs.get('safe_dump', {})
    global _safe_dump_d
    #See https://github.com/yaml/pyyaml/pull/256
    _safe_dump_d = {
        'default_flow_style': False,
        'sort_keys': False,
        **safe_dump_d
    }
Ejemplo n.º 35
0
 def __init__(self, scpiObj):
     super(LockThreadedTest, self).__init__()
     self._scpiObj = scpiObj
     self._printLock = _Lock()
     self._prepareCommands()
     self._prepareClients()
Ejemplo n.º 36
0
    
    def get_base(self):
        return self.get_current().get_base_transaction()
    
    @contextmanager
    def with_current(self, transaction):
        old = self.current
        self.current = transaction
        try:
            yield
        finally:
            self.current = old

_stm_state = _State()
# Lock that we lock on while committing transactions
_global_lock = _Lock()
# Number of the last transaction to successfully commit. The first transaction
# run will change this to the number 1, the second transaction to the number
# 2, and so on.
_last_transaction = 0


class _Timer(_Thread):
    """
    A timer similar to threading.Timer but with a few differences:
    
        This class waits significantly longer (0.5 seconds at present) between
        checks to see if we've been canceled before we're actually supposed to
        time out. This isn't visible to transactions themselves (they always
        respond instantly to any changes that could make them complete sooner),
        but it 1: saves us a decent bit of CPU time, but 2: means that if a
Ejemplo n.º 37
0
# 2. ... at most this many IP addresses may be using relays
# 3. accounting dict mapping IP addresses to their access information
#
# TODO: If we add additional relay services in the future, those relays should
# use this same structure (i.e. rate-limiting should be in-effect across all
# of the relays that we sponsor).
#
# TODO: If we ever expand to having more than one running instance on Google
# App Engine, this data structure would not be shared between them, and this
# rate-limiting strategy would need to be reconsidered.

# pylint:disable=invalid-name
_LimitLevel = _namedtuple('LimitLevel',
                          ['within', 'max_single', 'max_total', 'lookup'])
_limit_levels = [_LimitLevel(60, 25, 5, {}), _LimitLevel(86400, 500, 100, {})]
_limit_lock = _Lock()
# pylint:enable=invalid-name


def voicetext(environ, start_response):
    """
    After validating the incoming request, retrieve the audio file from
    the upstream VoiceText service, check it, and return it.
    """

    remote_addr = environ.get('REMOTE_ADDR', '')
    if not remote_addr:
        _warn("Relay denied -- no remote IP address")
        start_response(_CODE_403, _HEADERS_JSON)
        return _MSG_DENIED
Ejemplo n.º 38
0
 def __init__(self, scpiObj):
     super(LockThreadedTest, self).__init__()
     self._scpiObj = scpiObj
     self._printLock = _Lock()
     self._prepareCommands()
     self._prepareClients()
Ejemplo n.º 39
0
def is_modifier(key):
    """
    Returns True if `key` is a scan code or name of a modifier key.
    """
    if _is_str(key):
        return key in all_modifiers
    else:
        if not _modifier_scan_codes:
            scan_codes = (key_to_scan_codes(name, False)
                          for name in all_modifiers)
            _modifier_scan_codes.update(*scan_codes)
        return key in _modifier_scan_codes


_pressed_events_lock = _Lock()
_pressed_events = {}
_physically_pressed_keys = _pressed_events
_logically_pressed_keys = {}


class _KeyboardListener(_GenericListener):
    transition_table = {
        #Current state of the modifier, per `modifier_states`.
        #|
        #|             Type of event that triggered this modifier update.
        #|             |
        #|             |         Type of key that triggered this modiier update.
        #|             |         |
        #|             |         |            Should we send a fake key press?
        #|             |         |            |
Ejemplo n.º 40
0
class mp_island(object):
    """Multiprocessing island.

    .. versionadded:: 2.10

       The *use_pool* parameter (in previous versions, :class:`~pygmo.mp_island` always used a process pool).

    This user-defined island (UDI) will dispatch evolution tasks to an external Python process
    using the facilities provided by the standard Python :mod:`multiprocessing` module.

    If the construction argument *use_pool* is :data:`True`, then a process from a global
    :class:`pool <multiprocessing.pool.Pool>` shared between different instances of
    :class:`~pygmo.mp_island` will be used. The pool is created either implicitly by the construction
    of the first :class:`~pygmo.mp_island` object or explicitly via the :func:`~pygmo.mp_island.init_pool()`
    static method. The default number of processes in the pool is equal to the number of logical CPUs on the
    current machine. The pool's size can be queried via :func:`~pygmo.mp_island.get_pool_size()`,
    and changed via :func:`~pygmo.mp_island.resize_pool()`. The pool can be stopped via
    :func:`~pygmo.mp_island.shutdown_pool()`.

    If *use_pool* is :data:`False`, each evolution launched by an :class:`~pygmo.mp_island` will be offloaded
    to a new :class:`process <multiprocessing.Process>` which will then be terminated at the end of the evolution.

    Generally speaking, a process pool will be faster (and will use fewer resources) than spawning a new process
    for every evolution. A process pool, however, by its very nature limits the number of evolutions that can
    be run simultaneously on the system, and it introduces a serializing behaviour that might not be desirable
    in certain situations (e.g., when studying parallel evolution with migration in an :class:`~pygmo.archipelago`).

    .. note::

       Due to certain implementation details of CPython, it is not possible to initialise, resize or shutdown the pool
       from a thread different from the main one. Normally this is not a problem, but, for instance, if the first
       :class:`~pygmo.mp_island` instance is created in a thread different from the main one, an error
       will be raised. In such a situation, the user should ensure to call :func:`~pygmo.mp_island.init_pool()`
       from the main thread before spawning the secondary thread.

    .. warning::

       Due to internal limitations of CPython, sending an interrupt signal (e.g., by pressing ``Ctrl+C`` in an interactive
       Python session) while an :class:`~pygmo.mp_island` is evolving might end up sending an interrupt signal also to the
       external evolution process(es). This can lead to unpredictable runtime behaviour (e.g., the session may hang). Although
       pygmo tries hard to limit as much as possible the chances of this occurrence, it cannot eliminate them completely. Users
       are thus advised to tread carefully with interrupt signals (especially in interactive sessions) when using
       :class:`~pygmo.mp_island`.

    .. warning::

       Due to an `upstream bug <https://bugs.python.org/issue38501>`_, when using Python 3.8 the multiprocessing
       machinery may lead to a hangup when exiting a Python session. As a workaround until the bug is resolved, users
       are advised to explicitly call :func:`~pygmo.mp_island.shutdown_pool()` before exiting a Python session.

    """

    # Static variables for the pool.
    _pool_lock = _Lock()
    _pool = None
    _pool_size = None

    def __init__(self, use_pool=True):
        """
        Args:

           use_pool(:class:`bool`): if :data:`True`, a process from a global pool will be used to run the evolution, otherwise a new
              process will be spawned for each evolution

        Raises:

           TypeError: if *use_pool* is not of type :class:`bool`
           unspecified: any exception thrown by :func:`~pygmo.mp_island.init_pool()` if *use_pool* is :data:`True`

        """
        self._init(use_pool)

    def _init(self, use_pool):
        # Implementation of the ctor. Factored out
        # because it's re-used in the pickling support.
        if not isinstance(use_pool, bool):
            raise TypeError(
                "The 'use_pool' parameter in the mp_island constructor must be a boolean, but it is of type {} instead."
                .format(type(use_pool)))
        self._use_pool = use_pool
        if self._use_pool:
            # Init the process pool, if necessary.
            mp_island.init_pool()
        else:
            # Init the pid member and associated lock.
            self._pid_lock = _Lock()
            self._pid = None

    @property
    def use_pool(self):
        """Pool usage flag (read-only).

        Returns:

           :class:`bool`: :data:`True` if this island uses a process pool, :data:`False` otherwise

        """
        return self._use_pool

    def __copy__(self):
        # For copy/deepcopy, construct a new instance
        # with the same arguments used to construct self.
        # NOTE: no need for locking, as _use_pool is set
        # on construction and never touched again.
        return mp_island(self._use_pool)

    def __deepcopy__(self, d):
        return self.__copy__()

    def __getstate__(self):
        # For pickle/unpickle, we employ the construction
        # argument, which will be used to re-init the class
        # during unpickle.
        return self._use_pool

    def __setstate__(self, state):
        # NOTE: we need to do a full init of the object,
        # in order to set the use_pool flag and, if necessary,
        # construct the _pid and _pid_lock objects.
        self._init(state)

    def run_evolve(self, algo, pop):
        """Evolve population.

        This method will evolve the input :class:`~pygmo.population` *pop* using the input
        :class:`~pygmo.algorithm` *algo*, and return *algo* and the evolved population. The evolution
        is run either on one of the processes of the pool backing :class:`~pygmo.mp_island`, or in
        a new separate process. If this island is using a pool, and the pool was previously
        shut down via :func:`~pygmo.mp_island.shutdown_pool()`, an exception will be raised.

        Args:

           algo(:class:`~pygmo.algorithm`): the input algorithm
           pop(:class:`~pygmo.population`): the input population

        Returns:

           :class:`tuple`: a tuple of 2 elements containing *algo* (i.e., the :class:`~pygmo.algorithm` object that was used for the evolution) and the evolved :class:`~pygmo.population`

        Raises:

           RuntimeError: if the pool was manually shut down via :func:`~pygmo.mp_island.shutdown_pool()`
           unspecified: any exception thrown by the evolution, by the (de)serialization
             of the input arguments or of the return value, or by the public interface of the
             process pool


        """
        # NOTE: the idea here is that we pass the *already serialized*
        # arguments to the mp machinery, instead of letting the multiprocessing
        # module do the serialization. The advantage of doing so is
        # that if there are serialization errors, we catch them early here rather
        # than failing in the bootstrap phase of the remote process, which
        # can lead to hangups.
        import pickle
        ser_algo_pop = pickle.dumps((algo, pop))

        if self._use_pool:
            with mp_island._pool_lock:
                # NOTE: run this while the pool is locked. We have
                # functions to modify the pool (e.g., resize()) and
                # we need to make sure we are not trying to touch
                # the pool while we are sending tasks to it.
                if mp_island._pool is None:
                    raise RuntimeError(
                        "The multiprocessing island pool was stopped. Please restart it via mp_island.init_pool()."
                    )
                res = mp_island._pool.apply_async(_evolve_func_mp_pool,
                                                  (ser_algo_pop, ))
            # NOTE: there might be a bug in need of a workaround lurking in here:
            # http://stackoverflow.com/questions/11312525/catch-ctrlc-sigint-and-exit-multiprocesses-gracefully-in-python
            # Just keep it in mind.
            return pickle.loads(res.get())
        else:
            from ._mp_utils import _get_spawn_context

            # Get the context for spawning the process.
            mp_ctx = _get_spawn_context()

            parent_conn, child_conn = mp_ctx.Pipe(duplex=False)
            p = mp_ctx.Process(target=_evolve_func_mp_pipe,
                               args=(child_conn, ser_algo_pop))
            p.start()
            with self._pid_lock:
                self._pid = p.pid
            # NOTE: after setting the pid, wrap everything
            # in a try block with a finally clause for
            # resetting the pid to None. This way, even
            # if there are exceptions, we are sure the pid
            # is set back to None.
            try:
                res = parent_conn.recv()
                p.join()
            finally:
                with self._pid_lock:
                    self._pid = None
            if isinstance(res, RuntimeError):
                raise res
            return pickle.loads(res)

    @property
    def pid(self):
        """ID of the evolution process (read-only).

        This property is available only if the island is *not* using a process pool.

        Returns:

           :class:`int`: the ID of the process running the current evolution, or :data:`None` if no evolution is ongoing

        Raises:

           ValueError: if the island is using a process pool

        """
        if self._use_pool:
            raise ValueError(
                "The 'pid' property is available only when the island is configured to spawn new processes, but this mp_island is using a process pool instead."
            )
        with self._pid_lock:
            pid = self._pid
        return pid

    def get_name(self):
        """Island's name.

        Returns:

           :class:`str`: ``"Multiprocessing island"``

        """
        return "Multiprocessing island"

    def get_extra_info(self):
        """Island's extra info.

        If the island uses a process pool and the pool was previously shut down via :func:`~pygmo.mp_island.shutdown_pool()`,
        invoking this function will trigger the creation of a new pool.

        Returns:

           :class:`str`: a string containing information about the state of the island (e.g., number of processes in the pool, ID of the evolution process, etc.)

        Raises:

           unspecified: any exception thrown by :func:`~pygmo.mp_island.get_pool_size()`

        """
        retval = "\tUsing a process pool: {}\n".format(
            "yes" if self._use_pool else "no")
        if self._use_pool:
            retval += "\tNumber of processes in the pool: {}".format(
                mp_island.get_pool_size())
        else:
            with self._pid_lock:
                pid = self._pid
            if pid is None:
                retval += "\tNo active evolution process"
            else:
                retval += "\tEvolution process ID: {}".format(pid)
        return retval

    @staticmethod
    def _init_pool_impl(processes):
        # Implementation method for initing
        # the pool. This will *not* do any locking.
        from ._mp_utils import _make_pool

        if mp_island._pool is None:
            mp_island._pool, mp_island._pool_size = _make_pool(processes)

    @staticmethod
    def init_pool(processes=None):
        """Initialise the process pool.

        This method will initialise the process pool backing :class:`~pygmo.mp_island`, if the pool
        has not been initialised yet or if the pool was previously shut down via :func:`~pygmo.mp_island.shutdown_pool()`.
        Otherwise, this method will have no effects.

        Args:

           processes(:data:`None` or an :class:`int`): the size of the pool (if :data:`None`, the size of the pool will be
             equal to the number of logical CPUs on the system)

        Raises:

           ValueError: if the pool does not exist yet and the function is being called from a thread different
             from the main one, or if *processes* is a non-positive value
           TypeError: if *processes* is not :data:`None` and not an :class:`int`

        """
        with mp_island._pool_lock:
            mp_island._init_pool_impl(processes)

    @staticmethod
    def get_pool_size():
        """Get the size of the process pool.

        If the process pool was previously shut down via :func:`~pygmo.mp_island.shutdown_pool()`, invoking this
        function will trigger the creation of a new pool.

        Returns:

           :class:`int`: the current size of the pool

        Raises:

           unspecified: any exception thrown by :func:`~pygmo.mp_island.init_pool()`

        """
        with mp_island._pool_lock:
            mp_island._init_pool_impl(None)
            return mp_island._pool_size

    @staticmethod
    def resize_pool(processes):
        """Resize pool.

        This method will resize the process pool backing :class:`~pygmo.mp_island`.

        If the process pool was previously shut down via :func:`~pygmo.mp_island.shutdown_pool()`, invoking this
        function will trigger the creation of a new pool.

        Args:

           processes(:class:`int`): the desired number of processes in the pool

        Raises:

           TypeError: if the *processes* argument is not an :class:`int`
           ValueError: if the *processes* argument is not strictly positive
           unspecified: any exception thrown by :func:`~pygmo.mp_island.init_pool()`

        """
        from ._mp_utils import _make_pool

        if not isinstance(processes, int):
            raise TypeError("The 'processes' argument must be an int")
        if processes <= 0:
            raise ValueError(
                "The 'processes' argument must be strictly positive")

        with mp_island._pool_lock:
            # NOTE: this will either init a new pool
            # with the requested number of processes,
            # or do nothing if the pool exists already.
            mp_island._init_pool_impl(processes)
            if processes == mp_island._pool_size:
                # Don't do anything if we are not changing
                # the size of the pool.
                return
            # Create new pool.
            new_pool, new_size = _make_pool(processes)
            # Stop the current pool.
            mp_island._pool.close()
            mp_island._pool.join()
            # Assign the new pool.
            mp_island._pool = new_pool
            mp_island._pool_size = new_size

    @staticmethod
    def shutdown_pool():
        """Shutdown pool.

        .. versionadded:: 2.8

        This method will shut down the process pool backing :class:`~pygmo.mp_island`, after
        all pending tasks in the pool have completed.

        After the process pool has been shut down, attempting to run an evolution on the island
        will raise an error. A new process pool can be created via an explicit call to
        :func:`~pygmo.mp_island.init_pool()` or one of the methods of the public API of
        :class:`~pygmo.mp_island` which trigger the creation of a new process pool.

        """
        with mp_island._pool_lock:
            if mp_island._pool is not None:
                mp_island._pool.close()
                mp_island._pool.join()
                mp_island._pool = None
                mp_island._pool_size = None
Ejemplo n.º 41
0
    def __init__(self,
                 pru,
                 prucqueue,
                 psmodel,
                 devices,
                 processing=False,
                 scanning=False,
                 freq=None,
                 init=True):
        """Init."""
        # --- Init structures ---

        print()
        print('PRUController: struct initialization')
        print('devices: {}'.format(devices))

        # init timetsamp
        self._timestamp_update = _time.time()

        # init time interval
        t0_ = _time.time()

        # init timestamp of last SOFB setpoint execution
        self._sofb_mode = False

        # index of device in self._device_ids for next update in SOFB mode
        self._sofb_update_dev_idx = 0  # cyclical updates!

        # create lock
        self._lock = _Lock()

        # PRU communication object
        self._pru = pru

        # store power supply model
        self._psmodel = psmodel

        # devices
        self._devices = _dcopy(devices)

        # sorted list of device ids
        self._device_ids = sorted([dev[1] for dev in devices])

        # initialize UDC
        self._udc, self._parms, self._psupplies = PRUController._init_udc(
            pru, self._psmodel.name, self._device_ids, freq)

        # index of device in self._device_ids for wfmref update
        self._wfm_update = False
        self._wfm_update_dev_idx = 0  # cyclical updates!

        # update time interval attribute
        self._scan_interval = self._get_scan_interval()

        # time interval
        t1_ = _time.time()
        print('TIMING struct init [{:.3f} ms]'.format(1000 * (t1_ - t0_)))

        # attributes that control processing flow
        self._queue = prucqueue
        self._processing = processing
        self._scanning = scanning

        # starts communications
        self._dev_idx_last_scanned = None
        self._thread_process = None
        self._thread_scan = None
        if init:
            self.bsmp_init_communication()
Ejemplo n.º 42
0
def multithreading_take(lock_obj):
    def send_event(event_lst, who):
        event_lst[who].set()
        while event_lst[who].isSet():  # wait to the thread to work
            _sleep(1)

    test_name = "Lock take test"
    _print_header("{} for {}".format(test_name, lock_obj))
    joiner_event = _Event()
    joiner_event.clear()
    user_threads = []
    request_events = []
    access_events = []
    release_events = []
    print_lock = _Lock()
    for i in range(2):
        request_event = _Event()
        access_event = _Event()
        release_event = _Event()
        user_thread = _Thread(target=thread_function,
                              args=(lock_obj, joiner_event, request_event,
                                    access_event, release_event, print_lock),
                              name='{:d}'.format(i))
        request_events.append(request_event)
        access_events.append(access_event)
        release_events.append(release_event)
        user_threads.append(user_thread)
        user_thread.start()
    # here is where the test starts ---
    try:
        _print_info("Initial state {!r}\n".format(lock_obj),
                    level=1,
                    lock=print_lock)
        if lock_obj.isLock():
            return False, "{} FAILED".format(test_name)

        _print_info("Tell the threads to access",
                    level=1,
                    lock=print_lock,
                    top=True)
        send_event(access_events, 0)
        send_event(access_events, 1)
        _print_info("both should have had access",
                    level=1,
                    lock=print_lock,
                    bottom=True)

        _print_info("Thread 0 take the lock",
                    level=1,
                    lock=print_lock,
                    top=True)
        send_event(request_events, 0)
        if not lock_obj.isLock() or lock_obj.owner != '0':
            raise Exception("It shall be lock by 0")
        _print_info("Tell the threads to access", level=1, lock=print_lock)
        send_event(access_events, 0)
        send_event(access_events, 1)
        _print_info("0 should, but 1 don't",
                    level=1,
                    lock=print_lock,
                    bottom=True)

        _print_info("Try to lock when it is already",
                    level=1,
                    lock=print_lock,
                    top=True)
        send_event(request_events, 1)
        if not lock_obj.isLock() or lock_obj.owner != '0':
            raise Exception("It shall be lock by user 0")
        _print_info("Tell the threads to access", level=1, lock=print_lock)
        send_event(access_events, 0)
        send_event(access_events, 1)
        _print_info("0 should, but 1 don't",
                    level=1,
                    lock=print_lock,
                    bottom=True)

        _print_info("Try to release by a NON-owner",
                    level=1,
                    lock=print_lock,
                    top=True)
        send_event(release_events, 1)
        if not lock_obj.isLock() or lock_obj.owner != '0':
            raise Exception("It shall be lock by user 0")
        _print_info("Tell the threads to access", level=1, lock=print_lock)
        send_event(access_events, 0)
        send_event(access_events, 1)
        _print_info("0 should, but 1 don't",
                    level=1,
                    lock=print_lock,
                    bottom=True)

        _print_info("release the lock", level=1, lock=print_lock, top=True)
        send_event(release_events, 0)
        if lock_obj.isLock():
            raise Exception("It shall be released")
        _print_info("Tell the threads to access", level=1, lock=print_lock)
        send_event(access_events, 0)
        send_event(access_events, 1)
        _print_info("both should have had to",
                    level=1,
                    lock=print_lock,
                    bottom=True)

        # TODO: timeout
        _print_info("Thread 1 take the lock and expire it",
                    level=1,
                    lock=print_lock,
                    top=True)
        send_event(request_events, 1)
        if not lock_obj.isLock() or lock_obj.owner != '1':
            raise Exception("It shall be lock by 1")
        _print_info("Tell the threads to access", level=1, lock=print_lock)
        send_event(access_events, 0)
        send_event(access_events, 1)
        _print_info("1 should, but 0 don't", level=1, lock=print_lock)
        _print_info("Sleep {:d} seconds to expire the lock".format(
            TEST_EXPIRATION_TIME),
                    level=1,
                    lock=print_lock)
        _sleep(TEST_EXPIRATION_TIME)
        _print_info("Tell the threads to access", level=1, lock=print_lock)
        send_event(access_events, 0)
        send_event(access_events, 1)
        _print_info("both should have had to",
                    level=1,
                    lock=print_lock,
                    bottom=True)

        answer = True, "{} PASSED".format(test_name)
    except Exception as e:
        print(e)
        print_exc()
        answer = False, "{} FAILED".format(test_name)
    joiner_event.set()
    while len(user_threads) > 0:
        user_thread = user_threads.pop()
        user_thread.join(1)
        if user_thread.is_alive():
            user_threads.append(user_thread)
    print("All threads has finished")
    return answer
Ejemplo n.º 43
0
class FpgaDriver(_FpgaDriverBase):
    """
    Generates functions to use XRT with accelize_drm.DrmManager.

    Args:
        fpga_slot_id (int): FPGA slot ID.
        fpga_image (str): Path to ".xclbin" binary to use to program FPGA.
        drm_ctrl_base_addr (int): DRM Controller base address.
        log_dir (path-like object): directory where XRT will output log file.
    """
    _name = _match(r'_(.+)\.py', _basename(__file__)).group(1)
    _reglock = _Lock()

    @staticmethod
    def _get_xrt_lib():
        """
        Detect XRT installation path:
        """
        for prefix in (_environ.get("XILINX_XRT",
                                    "/opt/xilinx/xrt"), '/usr', '/usr/local'):
            if _isfile(_join(prefix, 'bin', 'xbutil')):
                return prefix
        raise RuntimeError('Unable to find Xilinx XRT')

    @staticmethod
    def _get_driver():
        """
        Get FPGA driver

        Returns:
            ctypes.CDLL: FPGA driver.
        """
        xrt_path = FpgaDriver._get_xrt_lib()
        if _isfile(_join(xrt_path, 'lib', 'libxrt_aws.so')):
            print('Loading XRT API library for AWS targets')
            fpga_library = _cdll.LoadLibrary(
                _join(xrt_path, 'lib', 'libxrt_aws.so'))
        elif _isfile(_join(xrt_path, 'lib', 'libxrt_core.so')):
            print('Loading XRT API library for Xilinx targets')
            fpga_library = _cdll.LoadLibrary(
                _join(xrt_path, 'lib', 'libxrt_core.so'))
        else:
            raise RuntimeError('Unable to find Xilinx XRT Library')
        return fpga_library

    @staticmethod
    def _get_xbutil():
        xrt_path = FpgaDriver._get_xrt_lib()
        _xbutil_path = _join(xrt_path, 'bin', 'awssak')
        if not _isfile(_xbutil_path):
            _xbutil_path = _join(xrt_path, 'bin', 'xbutil')
        if not _isfile(_xbutil_path):
            raise RuntimeError('Unable to find Xilinx XRT Board Utility')
        return _xbutil_path

    @property
    def _xbutil(self):
        return self._get_xbutil()

    def _get_lock(self):
        """
        Get a lock on the FPGA driver
        """
        def create_lock():
            return XrtLock(self)

        return create_lock

    def _clear_fpga(self):
        """
        Clear FPGA
        """
        clear_fpga = _run(
            ['fpga-clear-local-image', '-S',
             str(self._fpga_slot_id)],
            stderr=_STDOUT,
            stdout=_PIPE,
            universal_newlines=True,
            check=False)
        if clear_fpga.returncode:
            raise RuntimeError(clear_fpga.stdout)
        print('FPGA cleared')

    def _program_fpga(self, fpga_image):
        """
        Program the FPGA with the specified image.

        Args:
            fpga_image (str): FPGA image.
        """
        # Vitis does not reprogram a FPGA that has already the bitstream.
        # So to force it we write another bitstream first.
        clear_image = _join(SCRIPT_DIR, 'clear.awsxclbin')
        load_image = _run([
            self._xbutil, 'program', '-d',
            str(self._fpga_slot_id), '-p', clear_image
        ],
                          stderr=_STDOUT,
                          stdout=_PIPE,
                          universal_newlines=True,
                          check=False)
        if load_image.returncode:
            raise RuntimeError(load_image.stdout)
        print('Cleared AWS XRT slot #%d' % self._fpga_slot_id)

        # Now load the real image
        fpga_image = _realpath(_fsdecode(fpga_image))
        load_image = _run([
            self._xbutil, 'program', '-d',
            str(self._fpga_slot_id), '-p', fpga_image
        ],
                          stderr=_STDOUT,
                          stdout=_PIPE,
                          universal_newlines=True,
                          check=False)
        if load_image.returncode:
            raise RuntimeError(load_image.stdout)
        print('Programmed AWS XRT slot #%d with FPGA image %s' %
              (self._fpga_slot_id, fpga_image))

    def _reset_fpga(self):
        """
        Reset FPGA including FPGA image.
        """
        reset_image = _run(
            [self._xbutil, 'reset', '-d',
             str(self._fpga_slot_id)],
            stderr=_STDOUT,
            stdout=_PIPE,
            universal_newlines=True,
            check=False)
        if reset_image.returncode:
            raise RuntimeError(reset_image.stdout)

    def _init_fpga(self):
        """
        Initialize FPGA handle with driver library.
        """
        # Find all devices
        xcl_probe = self._fpga_library.xclProbe
        xcl_probe.restype = _c_uint  # Devices count

        if xcl_probe() < 1:
            raise RuntimeError("xclProbe does not found devices")

        # Open device
        xcl_open = self._fpga_library.xclOpen
        xcl_open.restype = _POINTER(_c_void_p)  # Device handle
        xcl_open.argtypes = (
            _c_uint,  # deviceIndex
            _c_char_p,  # logFileName
            _c_int,  # level
        )
        log_file = _join(self._log_dir, 'slot_%d_xrt.log' % self._fpga_slot_id)
        device_handle = xcl_open(
            self._fpga_slot_id,
            log_file.encode(),
            3  # XCL_ERROR
        )
        if not device_handle:
            raise RuntimeError("xclOpen failed to open device")
        self._fpga_handle = device_handle

    def _get_read_register_callback(self):
        """
        Read register callback.

        Returns:
            function: Read register callback
        """
        xcl_read = self._fpga_library.xclRead
        xcl_read.restype = _c_size_t  # read size or error code
        xcl_read.argtypes = (
            _c_void_p,  # handle
            _c_int,  # space
            _c_uint64,  # offset
            _c_void_p,  # hostBuf
            _c_size_t  # size
        )
        self._fpga_read_register = xcl_read

        def read_register(register_offset, returned_data, driver=self):
            """
            Read register.

            Args:
                register_offset (int): Offset
                returned_data (int pointer): Return data.
                driver (accelize_drm.fpga_drivers._aws_xrt.FpgaDriver):
                    Keep a reference to driver.
            """
            with driver._fpga_read_register_lock():
                size_or_error = driver._fpga_read_register(
                    driver._fpga_handle,
                    2,  # XCL_ADDR_KERNEL_CTRL
                    driver._drm_ctrl_base_addr + register_offset,
                    returned_data,
                    4  # 4 bytes
                )

            # Return 0 return code if read size else return error code
            return size_or_error if size_or_error != 4 else 0

        return read_register

    def _get_write_register_callback(self):
        """
        Write register callback.

        Returns:
            function: Write register callback
        """
        xcl_write = self._fpga_library.xclWrite
        xcl_write.restype = _c_size_t  # written size or error code
        xcl_write.argtypes = (
            _c_void_p,  # handle
            _c_int,  # space
            _c_uint64,  # offset
            _c_char_p,  # hostBuf
            _c_size_t  # size
        )
        self._fpga_write_register = xcl_write

        def write_register(register_offset, data_to_write, driver=self):
            """
            Write register.

            Args:
                register_offset (int): Offset
                data_to_write (int): Data to write.
                driver (accelize_drm.fpga_drivers._aws_xrt.FpgaDriver):
                    Keep a reference to driver.
            """
            with driver._fpga_write_register_lock():
                size_or_error = driver._fpga_write_register(
                    driver._fpga_handle,
                    2,  # XCL_ADDR_KERNEL_CTRL
                    driver._drm_ctrl_base_addr + register_offset,
                    data_to_write.to_bytes(4, byteorder="little"),
                    4  # 4 bytes
                )

            # Return 0 return code if written size else return error code
            return size_or_error if size_or_error != 4 else 0

        return write_register
    import numpy
    mask = numpy.asarray(mask)
    dtype = mask.dtype
    size = shape[axis]
    if dtype.kind == "b":
        if len(mask) != size:
            raise ValueError("Mask size does not match the shape.")
        indices = [i for i, m in zip(range(size), mask)]
    elif dtype.kind == "i":
        indices = mask
    return indices


from threading import Lock as _Lock
_global_id = 0
_global_id_lock = _Lock()
 
def range_generator():
    global _global_id
    while True:
        with _global_id_lock:
            id = int(_global_id)
            _global_id += 1
        yield id
        
def uuid_generator():
    import uuid
    while True:
        yield str(uuid.uuid4())

import Orange.feature