예제 #1
0
 def _connect_device(self):
     sm = {'auto': 0, 'usb': 1, 'bluetooth': 2}
     ret = self.lib.InitGenericDevice(self.callback, sm[self.search_mode],
                                      self.serial_number)
     if ret != 0:
         if ret == -6:
             auth = self.lib.ShowAuthenticationWindow()
             if auth == 1:
                 raise WorkerInterrupt('Authentication failed')
         else:
             raise WorkerInterrupt(self.ErrorCodeMessage[abs(ret)])
예제 #2
0
    def update(self):

        if self._current > self._stop:
            raise WorkerInterrupt('No more data.')

        min = self._current

        if self._timespan:
            max = min + self._timespan
        else:
            now = clock.now()
            ellapsed = now - self._last
            max = min + ellapsed * self._speed
            self._last = now

        for key, source in self._sources.items():

            # Select data
            data = self._store.select(key, 'index >= min & index < max')

            # Add offset
            if self._resync:
                data.index += self._offset

            # Update port
            getattr(self, source['name']).data = data
            getattr(self, source['name']).meta = source['meta']

        self._current = max
예제 #3
0
    def update(self):

        # At this point, we are sure that we have some data to process

        if self._thread_status is None:
            # When we have not received data, there is nothing to do
            if not self.i.ready():
                return
            self._cross_validate()

        if self._thread_status == 'FAILED':
            raise WorkerInterrupt('Estimator fit failed.')

        elif self._thread_status == 'SUCCESS':
            self.logger.info(
                f'Scores from cross-validation are: {self._scores}. ')

            # send an event to announce that fitting is ready.
            self.o_events.data = pd.DataFrame(
                index=[pd.Timestamp(time(), unit='s')],
                columns=['label', 'data'],
                data=[[self._event_label, self._scores]])

            self._reset()
        else:  # self._thread_status == 'WORKING'
            return
예제 #4
0
 def _load_lib(self):
     os_name = platform.system()
     bitness = 8 * struct.calcsize('P')
     if os_name in ['Windows', 'Microsoft']:
         libname = 'GenericDeviceInterfaceDLL.dll' if bitness == 32 else 'GenericDeviceInterfaceDLL_x64.dll'
     else:
         raise WorkerInterrupt('Operating system not compatible')
     libpath = os.path.join(os.path.dirname(__file__), '../libs', libname)
     self.lib = CDLL(libpath)
예제 #5
0
    def _set_config(self, match):
        """ Initialize the speller configuration. """

        data = match[self._event_data].values[0]
        if isinstance(data, str):
            try:
                data = json.loads(data)
            except ValueError:
                raise WorkerInterrupt(f'Could not deserialize data from event'
                                      f' {self._event_label_config}.')
        try:
            self._symbols = data['symbols']
            self._repetitions = data['repetitions']
            self._groups = data['groups']
        except KeyError as k:
            raise WorkerInterrupt(
                f'Could not configure {k} from event {self._event_label_config} '
            )

        self._nb_groups = len(self._groups)
        self._nb_symbols = len(self._symbols)
        # check that each symbol remains to the same number of group
        _unique_occurrence_symbol_per_group = np.unique([
            np.sum([str(symbols_idx) in group for group in self._groups])
            for symbols_idx in range(self._nb_symbols)
        ])
        if len(_unique_occurrence_symbol_per_group) != 1:
            # todo: raise WorkerInterrupt instead of warning
            self.logger.warning(
                f'The number of occurrence of each symbol in each group should be fixed. '
                f'Found {_unique_occurrence_symbol_per_group}. ')

        self._nb_occurrence_symbol_per_round = _unique_occurrence_symbol_per_group[
            0]
        self.logger.info(
            f'Set configuration for p300 interface with a grid of {self._nb_symbols} symbols flashing '
            f'{self._nb_occurrence_symbol_per_round} times per round and {self._repetitions} rounds per block.  '
        )
        self._node_ready = True
예제 #6
0
    def update(self):

        if not self.i.ready():
            return
        self.o.meta = self.i.meta
        if not self._ready:
            try:
                self._query()
                self._ready = True
            except KeyError as e:
                raise WorkerInterrupt(e)
        else:
            self._query()
예제 #7
0
 def update(self):
     """Monitor proxy"""
     if self._timeout == 0:
         pass
     now = time.time()
     count = 0
     try:
         while True:
             self._monitor.recv_multipart(zmq.NOBLOCK, copy=False)
             self._last_event = now
             count += 1
     except zmq.ZMQError:
         if count > 0:
             self.logger.debug("Received %d messages", count)
         if (now - self._last_event) > self._timeout:
             raise WorkerInterrupt("No data after %d seconds" %
                                   self._timeout)
예제 #8
0
    def __init__(self):

        self.device = None

        # Setup
        try:
            # On Unix systems, we need to manually set the product and vendor IDs
            ftd.setVIDPID(VID, PID)
        except AttributeError:
            # The method is not available on Windows
            pass

        # Connect
        try:
            # Open the first FTDI device
            self.device = ftd.open(0)
            # Get info
            self.logger.info(self.device.getDeviceInfo())
        except ftd.ftd2xx.DeviceError:
            # Could not open device
            raise WorkerInterrupt('Could not open device')

        # Initialize connection
        if self.device:
            self.device.setBaudRate(921600)
            self.device.setFlowControl(ftd.defines.FLOW_NONE, 0, 0)
            self.device.setDataCharacteristics(ftd.defines.BITS_8,
                                               ftd.defines.STOP_BITS_1,
                                               ftd.defines.PARITY_NONE)
            self.device.setTimeouts(2000, 2000)
            self.device.setLatencyTimer(2)
            self.device.setUSBParameters(BUFFER_SIZE, BUFFER_SIZE)

        # Start acquisition
        self.packet_count = 0
        self.time_delta = {
            '1024Hz': np.timedelta64(int(1e9 / 1024), 'ns'),
            '256Hz': np.timedelta64(int(1e9 / 256), 'ns'),
        }
        self.start()
        self.time_start = now()
예제 #9
0
파일: epoch.py 프로젝트: dojeda/timeflux
 def _valid_port(self, port):
     """ Checks that the port has valid meta and data. """
     if port.data is None or port.data.empty:
         return False
     if 'epoch' not in port.meta:
         return False
     if port.data.shape[0] != self._num_times:
         if self._reporting == 'error':
             raise WorkerInterrupt(
                 f'Received an epoch with {port.data.shape[0]} '
                 f'samples instead of {self._num_times}.')
         elif self._reporting == 'warn':
             self.logger.warning(
                 f'Received an epoch with {port.data.shape[0]} '
                 f'samples instead of {self._num_times}. '
                 f'Skipping.')
             return False
         else:  # reporting is None
             # be cool
             return False
     return True
예제 #10
0
 def _valid_port(self, port):
     """ Checks that the port has valid meta and data. """
     if port.data is None or port.data.empty:
         return False
     if "epoch" not in port.meta:
         return False
     if port.data.shape[0] != self._num_times:
         if self._reporting == "error":
             raise WorkerInterrupt(
                 f"Received an epoch with {port.data.shape[0]} "
                 f"samples instead of {self._num_times}.")
         elif self._reporting == "warn":
             self.logger.warning(
                 f"Received an epoch with {port.data.shape[0]} "
                 f"samples instead of {self._num_times}. "
                 f"Skipping.")
             return False
         else:  # reporting is None
             # be cool
             return False
     return True
예제 #11
0
    def _query_device(self):

        # get device info
        self.device_info = DeviceInfoStruct()
        ret = self.lib.GetDeviceInfo(byref(self.device_info))
        if not ret:
            raise WorkerInterrupt('Failed to retrieve device info')

        n_chan = self.device_info.NumberOfChannels
        sn = self.device_info.SerialNumber

        # get channel info
        TypeID_to_unit = ['NA', 'uV', 'uV', 'mV', 'Bit']
        TypeID_to_type = ['NA', 'voltage', 'voltage', 'voltage', 'Binary']
        channel_info = ChannelInfoStruct()
        self.channels = []
        for i in range(n_chan):
            ret = self.lib.GetChannelInfo(i, byref(channel_info))
            ch_name = channel_info.Name.decode('utf-8')
            self.channels.append(ch_name)
            tid = channel_info.TypeId
예제 #12
0
    def update(self):

        if self._current > self._stop:
            raise WorkerInterrupt('No more data.')

        min = self._current
        max = min + self._timespan

        for key, source in self._sources.items():

            # Select data
            data = self._store.select(key, 'index >= min & index < max')

            # Add offset
            if self._resync:
                data.index += self._offset

            # Update port
            getattr(self, source['name']).data = data

        self._current = max
예제 #13
0
    def __init__(self, host=None, port=8400, timeout=5000):
        """
        Args:
            host (str): The Utopia Hub hostname. Leave to `None` for autodiscovery.
            port (int): The Utopia Hub port.
            timeout (int): Delay (in ms) after which we stop trying to connect.
        """

        # Connect to the Utopia Hub
        self._client = UtopiaClient()
        try:
            self._client.autoconnect(host, port, timeout_ms=timeout)
        except:
            pass
        if not self._client.isConnected:
            raise WorkerInterrupt('Could not connect to Utopia hub')

        # Keep track of the header so it is sent only once
        self._header = None

        # Start the sync server
        self._task = Task(Server(), 'start').start()
예제 #14
0
파일: fit.py 프로젝트: timeflux/timeflux_ml
    def update(self):

        # At this point, we are sure that we have some data to process
        self.o_events.data = pd.DataFrame()

        if self._thread_status is None:
            # When we have not received data, there is nothing to do
            if not self.i.ready():
                return
            self._fit()

        if self._thread_status == 'FAILED':
            raise WorkerInterrupt('Estimator fit failed.')

        elif self._thread_status == 'SUCCESS':
            if self._has_targets:
                model = {
                    'values': deepcopy(self._pipeline),
                    'label': deepcopy(self._le)
                }
            else:
                model = {'values': deepcopy(self._pipeline)}

            self.o_model.meta.update({'pipeline': model})

            self.logger.info(
                f'The model {self._pipeline} was successfully fitted. ')

            # send an event to announce that fitting is ready.
            if self._event_label_base is not None:
                self.o_events.data = self.o_events.data.append(
                    pd.DataFrame(index=[pd.Timestamp(time(), unit='s')],
                                 columns=['label', 'data'],
                                 data=[[self._event_label_base + '_ends',
                                        '']]))

            self._reset()
        else:  # self._thread_status == 'WORKING'
            return
예제 #15
0
    def update(self):

        # Let's get ready
        self._clear()

        # Are we dealing with continuous data or epochs?
        if self._dimensions is None:
            port_name = "i_training" if self.fit else "i"
            if getattr(self, port_name).ready():
                self._dimensions = 2
            elif len(list(self.iterate(port_name + "_*"))) > 0:
                self._dimensions = 3

        # Set the accumulation boundaries
        if self._accumulation_start is None:
            matches = match_events(self.i_events, self.event_start_accumulation)
            if matches is not None:
                self._accumulation_start = matches.index.values[0]
                self._status = ACCUMULATING
        if self._accumulation_stop is None:
            matches = match_events(self.i_events, self.event_stop_accumulation)
            if matches is not None:
                self._accumulation_stop = matches.index.values[0]

        # Always buffer a few seconds, in case the start event is coming late
        if self._status == IDLE:
            start = (now() - self._buffer_size).to_datetime64()
            stop = max_time()
            self._accumulate(start, stop)

        # Accumulate between boundaries
        if self._status == ACCUMULATING:
            start = self._accumulation_start
            stop = self._accumulation_stop if self._accumulation_stop else max_time()
            self._accumulate(start, stop)

        # Should we start fitting the model?
        if self._status < FITTING:
            if match_events(self.i_events, self.event_start_training) is not None:
                self._status = FITTING
                self._task = Task(
                    self._pipeline, "fit", self._X_train, self._y_train
                ).start()

        # Is the model ready?
        if self._status == FITTING:
            status = self._task.status()
            if status:
                if status["success"]:
                    self._pipeline = status["instance"]
                    self._status = READY
                    self.logger.debug(f"Model fitted in {status['time']} seconds")
                else:
                    self.logger.error(
                        f"An error occured while fitting: {status['exception'].args[0]}"
                    )
                    self.logger.debug(
                        "\nTraceback (most recent call last):\n"
                        + "".join(status["traceback"])
                    )
                    raise WorkerInterrupt()

        # Run the pipeline
        if self._status == READY:
            self._receive()
            if self._X is not None:
                args = [self._X]
                if self.mode.startswith("fit"):
                    args.append(self._y)
                # TODO: optionally loop through epochs instead of sending them all at once
                self._out = getattr(self._pipeline, self.mode)(*args)

        # Set output streams
        self._send()
예제 #16
0
    def __init__(self, filename, keys, speed=1, timespan=None, resync=True):
        """
        Initialize.

        Parameters
        ----------
        filename : string
            The path to the HDF5 file.
        keys: list
            The list of keys to replay.
        speed: float
            The speed at which the data must be replayed. 1 means real-time.
        timespan: float
            The timespan of each chunk, in seconds.
            If not None, will take precedence over the `speed` parameter
        resync: boolean
            If False, timestamps will not be resync'ed to current time
        """

        # Load store
        try:
            self._store = pd.HDFStore(filename, mode='r')
        except IOError as e:
            raise WorkerInterrupt(e)

        # Init
        self._sources = {}
        self._start = pd.Timestamp.max
        self._stop = pd.Timestamp.min
        self._speed = speed
        self._timespan = None if not timespan else pd.Timedelta(f'{timespan}s')
        self._resync = resync

        for key in keys:
            try:
                # Check format
                if not self._store.get_storer(key).is_table:
                    self.logger.warning('%s: Fixed format. Will be skipped.',
                                        key)
                    continue
                # Get first index
                first = self._store.select(key, start=0, stop=1).index[0]
                # Get last index
                nrows = self._store.get_storer(key).nrows
                last = self._store.select(key, start=nrows - 1,
                                          stop=nrows).index[0]
                # Check index type
                if type(first) != pd.Timestamp:
                    self.logger.warning('%s: Invalid index. Will be skipped.',
                                        key)
                    continue
                # Find lowest and highest indices across stores
                if first < self._start:
                    self._start = first
                if last > self._stop:
                    self._stop = last
                # Extract meta
                if self._store.get_node(key)._v_attrs.__contains__('meta'):
                    meta = self._store.get_node(key)._v_attrs['meta']
                else:
                    meta = {}
                # Set output port name, port will be created dynamically
                name = 'o' + key.replace('/', '_')
                # Update sources
                self._sources[key] = {
                    'start': first,
                    'stop': last,
                    'nrows': nrows,
                    'name': name,
                    'meta': meta
                }
            except KeyError:
                self.logger.warning('%s: Key not found.', key)

        # Current time
        now = clock.now()

        # Time offset
        self._offset = pd.Timestamp(now) - self._start

        # Current query time
        self._current = self._start

        # Last update
        self._last = now
예제 #17
0
 def update(self):
     self._count += 1
     self.logger.debug('Doing things')
     if self._interrupt == self._count:
         raise WorkerInterrupt('Interrupting')
예제 #18
0
    def __init__(self,
                 filename,
                 keys,
                 speed=1,
                 timespan=None,
                 resync=True,
                 start=0):
        """
        Initialize.

        Parameters
        ----------
        filename : string
            The path to the HDF5 file.
        keys: list
            The list of keys to replay.
        speed: float
            The speed at which the data must be replayed. 1 means real-time.
            Default: 1
        timespan: float
            The timespan of each chunk, in seconds.
            If not None, will take precedence over the `speed` parameter
            Default: None
        resync: boolean
            If False, timestamps will not be resync'ed to current time
            Default: True
        start: float
            Start directly at the given time offset, in seconds
            Default: 0
        """

        # Load store
        try:
            self._store = pd.HDFStore(self._find_path(filename), mode="r")
        except IOError as e:
            raise WorkerInterrupt(e)

        # Init
        self._sources = {}
        self._start = pd.Timestamp.max
        self._stop = pd.Timestamp.min
        self._speed = speed
        self._timespan = None if not timespan else pd.Timedelta(f"{timespan}s")
        self._resync = resync

        for key in keys:
            try:
                # Check format
                if not self._store.get_storer(key).is_table:
                    self.logger.warning("%s: Fixed format. Will be skipped.",
                                        key)
                    continue
                # Get first index
                first = self._store.select(key, start=0, stop=1).index[0]
                # Get last index
                nrows = self._store.get_storer(key).nrows
                last = self._store.select(key, start=nrows - 1,
                                          stop=nrows).index[0]
                # Check index type
                if type(first) != pd.Timestamp:
                    self.logger.warning("%s: Invalid index. Will be skipped.",
                                        key)
                    continue
                # Find lowest and highest indices across stores
                if first < self._start:
                    self._start = first
                if last > self._stop:
                    self._stop = last
                # Extract meta
                if self._store.get_node(key)._v_attrs.__contains__("meta"):
                    meta = self._store.get_node(key)._v_attrs["meta"]
                else:
                    meta = {}
                # Set output port name, port will be created dynamically
                name = "o" + key.replace("/", "_")
                # Update sources
                self._sources[key] = {
                    "start": first,
                    "stop": last,
                    "nrows": nrows,
                    "name": name,
                    "meta": meta,
                }
            except KeyError:
                self.logger.warning("%s: Key not found.", key)

        # Current time
        now = clock.now()

        # Starting timestamp
        self._start += pd.Timedelta(f"{start}s")

        # Time offset
        self._offset = pd.Timestamp(now) - self._start

        # Current query time
        self._current = self._start

        # Last update
        self._last = now
예제 #19
0
    def __init__(self,
                 port,
                 rate=1000,
                 channels=("A1", "A2", "A3", "A4", "A5", "A6")):

        # Check port
        if not port.startswith("/dev/") and not port.startswith("COM"):
            raise ValueError(f"Invalid serial port: {port}")

        # Check rate
        if rate not in (1, 10, 100, 1000):
            raise ValueError(f"Invalid rate: {rate}")

        # Check channels
        unique_channels = set(channels)
        analog_channels = ["A1", "A2", "A3", "A4", "A5", "A6"]
        channels = []
        for channel_num, channel_name in enumerate(analog_channels):
            if channel_name in unique_channels:
                channels.append(channel_num)

        # Set column names
        # Sequence number and numeric channels are always present
        self.columns = ["SEQ", "I1", "I2", "O1", "O2"]
        # Add required analog channels
        for channel in channels:
            self.columns.append(analog_channels[channel])

        # Compute the sample size in bytes
        self.channel_count = len(channels)
        if self.channel_count <= 4:
            self.sample_size = int(
                np.ceil((12.0 + 10.0 * self.channel_count) / 8.0))
        else:
            self.sample_size = int(
                np.ceil((52.0 + 6.0 * (self.channel_count - 4)) / 8.0))

        # Connect to BITalino
        try:
            self.device = BITalino(port)
        except UnicodeDecodeError:
            # This can happen after an internal buffer overflow.
            # The solution seems to power off the device and repair.
            raise WorkerInterrupt("Unstable state. Could not connect.")
        except Exception as e:
            raise WorkerInterrupt(e)

        # Set battery threshold
        # The red led will light up at 5-10%
        self.device.battery(30)

        # Read BITalino version
        self.logger.info(self.device.version())

        # Read state and show battery level
        # http://forum.bitalino.com/viewtopic.php?t=448
        state = self.device.state()
        battery = round(
            1 + (state["battery"] - 511) * ((99 - 1) / (645 - 511)), 2)
        self.logger.info("Battery: %.2f%%", battery)

        # Start Acquisition
        self.device.start(rate, channels)

        # Initialize counters for timestamp indices and continuity checks
        self.last_sample_counter = 15
        self.time_device = np.datetime64(int(time.time() * 1e6), "us")
        self.time_local = self.time_device
        self.time_delta = np.timedelta64(int(1000 / rate), "ms")

        # Set meta
        self.meta = {"rate": rate}
예제 #20
0
    def __init__(self, filename, keys, timespan=.04, resync=True):
        """
        Initialize.

        Parameters
        ----------
        filename : string
            The path to the HDF5 file.
        keys: list
            The list of keys to replay.
        timespan: float
            The timespan of each chunk, in seconds.
        resync: boolean
            If False, timestamps will not be resync'ed to current time
        """

        # Load store
        try:
            self._store = pd.HDFStore(filename, mode='r')
        except IOError as e:
            raise WorkerInterrupt(e)

        # Init
        self._sources = {}
        self._start = pd.Timestamp.max
        self._stop = pd.Timestamp.min
        self._timespan = pd.Timedelta(f'{timespan}s')
        self._resync = resync

        for key in keys:
            try:
                # Check format
                if not self._store.get_storer(key).is_table:
                    self.logger.warning('%s: Fixed format. Will be skipped.',
                                        key)
                    continue
                # Get first index
                first = self._store.select(key, start=0, stop=1).index[0]
                # Get last index
                nrows = self._store.get_storer(key).nrows
                last = self._store.select(key, start=nrows - 1,
                                          stop=nrows).index[0]
                # Check index type
                if type(first) != pd.Timestamp:
                    self.logger.warning('%s: Invalid index. Will be skipped.',
                                        key)
                    continue
                # Find lowest and highest indices across stores
                if first < self._start:
                    self._start = first
                if last > self._stop:
                    self._stop = last
                # Set output port name, port will be created dynamically
                name = 'o' + key.replace('/', '_')
                # Update sources
                self._sources[key] = {
                    'start': first,
                    'stop': last,
                    'nrows': nrows,
                    'name': name
                }
            except KeyError:
                self.logger.warning('%s: Key not found.', key)

        # Time offset
        self._offset = pd.Timestamp(clock.now()) - self._start

        # Current query time
        self._current = self._start
예제 #21
0
파일: mne.py 프로젝트: neuroidss/timeflux
def xarray_to_mne(data, meta, context_key, event_id, reporting='warn',
                  ch_types='eeg', **kwargs):
    """ Convert DataArray and meta into mne Epochs object

    Args:
        data (DataArray): Array of dimensions ('epoch', 'time', 'space')
        meta (dict): Dictionary with keys 'epochs_context', 'rate', 'epochs_onset'
        context_key (str|None): key to select the context label.
        If the context is a string, `context_key` should be set to ``None``.
        event_id (dict): Associates context label to an event_id that should be an int. (eg. dict(auditory=1, visual=3))
        reporting ('warn'|'error'| None): How this function handles epochs with invalid context:
            - 'error' will raise a TimefluxException
            - 'warn' will print a warning with :py:func:`warnings.warn` and skip the corrupted epochs
            - ``None`` will skip the corrupted epochs
        ch_types (list|str): Channel type to
    Returns:
        epochs (mne.Epochs): mne object with the converted data.
    """
    if isinstance(ch_types, str): ch_types = [ch_types] * len(data.space)

    if isinstance(data, xr.DataArray):
        pass
    elif isinstance(data, xr.Dataset):
        # extract data
        data = data.data
    else:
        raise ValueError(f'data should be of type DataArray or Dataset, received {data.type} instead. ')
    _dims = data.coords.dims
    if 'target' in _dims:
        np_data = data.transpose('target', 'space', 'time').values
    elif 'epoch' in _dims:
        np_data = data.transpose('epoch', 'space', 'time').values
    else:
        raise ValueError(f'Data should have either `target` or `epoch` in its coordinates. Found {_dims}')
    # create events objects are essentially numpy arrays with three columns:
    # event_sample | previous_event_id | event_id

    events = np.array([[onset.value, 0, _context_to_id(context, context_key, event_id)]
                       for (context, onset)
                       in zip(meta['epochs_context'], meta['epochs_onset'])])  # List of three arbitrary events
    events_mask = np.isnan(events.astype(float))[:, 2]
    if events_mask.any():
        if reporting == 'error':
            raise WorkerInterrupt(f'Found {events_mask.sum()} epochs with corrupted context. ')
        else:  # reporting is either None or warn
            # be cool, skip those evens
            events = events[~events_mask, :]
            np_data = np_data[~events_mask, :, :]
            if reporting == 'warn':
                logger.warning(f'Found {events_mask.sum()} epochs with corrupted context. '
                               f'Skipping them. ')
    # Fill the second column with previous event ids.
    events[0, 1] = events[0, 2]
    events[1:, 1] = events[0:-1, 2]
    # set the info
    rate = meta['rate']
    info = mne.create_info(ch_names=list(data.space.values), sfreq=rate,
                           ch_types=ch_types)
    # construct the mne object
    epochs = mne.EpochsArray(np_data, info=info, events=events.astype(int),
                             event_id=event_id,
                             tmin=data.time.values[0] / np.timedelta64(1, 's'),
                             verbose=False, **kwargs)
    return epochs
예제 #22
0
 def _start_device(self):
     fs = wintypes.DWORD(self.sampling_rate)
     ret = self.lib.StartGenericDevice(byref(fs))
     if ret != 0:
         raise WorkerInterrupt(self.ErrorCodeMessage[abs(ret)])