예제 #1
0
    def get_all_gpib_id(self, use_cached=True):
        """ Queries the host for all connected GPIB instruments, and
        queries their identities with ``instrID()``.

        Warning: This might cause your instrument to lock into remote mode.

        Args:
            use_cached (bool): query only if not cached, default True

        Returns:
            dict: dictionary with gpib addresses as keys and \
                identity strings as values.
        """
        gpib_resources = self.list_gpib_resources_info(use_cached=use_cached)
        if self.__cached_gpib_instrument_list is None:
            use_cached = False
        if use_cached:
            return self.__cached_gpib_instrument_list
        else:
            gpib_instrument_list = dict()
            logger.debug("Caching GPIB instrument list in %s", self)
            for gpib_address in gpib_resources.keys():
                visa_object = VISAObject(gpib_address, tempSess=True)
                try:
                    instr_id = visa_object.instrID()
                    gpib_instrument_list[gpib_address] = instr_id
                except pyvisa.VisaIOError as err:
                    logger.error(err)
            self.__cached_gpib_instrument_list = gpib_instrument_list
            return gpib_instrument_list
예제 #2
0
    def isLive(self):
        """ Attempts VISA connection to instrument, and checks whether
            :meth:`~lightlab.equipment.visa_bases.visa_object.instrID`
            matches :data:`id_string`.

            Produces a warning if it is live but the id_string is wrong.

            Returns:
                (bool): True if "live", False otherwise.
        """
        try:
            driver = self.driver_object
            query_id = driver.instrID()
            logger.info("Found %s in %s.", self.name, self.address)
            if self.id_string is not None:
                if self.id_string == query_id:
                    logger.info("id_string of %s is accurate", self.name)
                    return True
                else:
                    logger.warning("%s: %s, expected %s", self.address,
                                   query_id, self.id_string)
                    return False
            else:
                logger.debug("Cannot authenticate %s in %s.",
                             self.name, self.address)
                return True
        except pyvisa.VisaIOError as err:
            logger.warning(err)
            return False
예제 #3
0
def descend(yArr, invalidIndeces, startIndex, direction, threshVal):
    ''' From the start index, descend until reaching a threshold level and return that index
    If it runs into the invalidIndeces or an edge, returns i at the edge of validity and False for validPeak
    '''
    iterUntilFail = len(yArr)  # number of tries before giving up to avoid hang
    if direction in [0, -1, 'left']:
        sideSgn = -1
    elif direction in [1, 'right']:
        sideSgn = 1
    i = startIndex
    validPeak = True
    tooCloseToEdge = False
    tooCloseToOtherPeak = False
    for _ in range(iterUntilFail):
        if not validPeak:
            break

        if i + sideSgn <= -1 or i + sideSgn > len(yArr):
            tooCloseToEdge = True
            logger.debug('Descend: too close to edge of available range')
        if invalidIndeces[i]:
            tooCloseToOtherPeak = True
            logger.debug('Descend: too close to another peak')
        validPeak = not tooCloseToEdge and not tooCloseToOtherPeak

        if yArr[i] <= threshVal:
            break

        # logger.debug('Index %s: blanked=%s, yArr=%s', i, invalidIndeces[i], yArr[i])
        i += sideSgn
    else:
        validPeak = False
    return i, validPeak
예제 #4
0
 def isLive(self):
     ''' Pings the system and returns if it is alive.
     '''
     if self.hostname is not None:
         logger.debug("Pinging %s...", self.hostname)
         response = os.system("ping -c 1 {}".format(self.hostname))
         if response != 0:
             logger.warning("%s is not reachable via ping.", self)
         return response == 0
     else:
         logger.warning("Hostname not set. Unable to ping.")
         return False
예제 #5
0
 def query(self, queryStr, expected_talker=None):
     ret = self._query(queryStr)
     if expected_talker is not None:
         if ret != expected_talker:
             log_function = logger.warning
         else:
             log_function = logger.debug
         log_function("'%s' returned '%s', expected '%s'", queryStr, ret,
                      str(expected_talker))
     else:
         logger.debug("'%s' returned '%s'", queryStr, ret)
     return ret
예제 #6
0
    def tsp_startup(self, restart=False):
        """ Ensures that the TSP network is available.

        - Checks if tsplink.state is online.
        - If offline, send a reset().
        """
        state = self.query_print("tsplink.state")
        if state == "online" and not restart:
            return True
        elif state == "offline":
            nodes = int(float(self.query_print("tsplink.reset()")))
            logger.debug("%s TSP nodes found.", nodes)
            return True
예제 #7
0
    def __contains__(self, item):

        if isinstance(item, Instrument):
            instrument_search = item in self.instruments
            if not instrument_search:
                logger.info("%s not found in %s's instruments.", item, self)
            return instrument_search
        elif isinstance(item, Device):
            device_search = item in self.devices
            if not device_search:
                logger.info("%s not found in %s's devices.", item, self)
            return device_search
        else:
            logger.debug("%s is neither an Instrument nor a Device", item)
            return False
예제 #8
0
 def setBgSmoothed(self, raw=None, smoothNm=None):
     ''' Attempts to find background using a low-pass filter.
     Does not return. Stores results in the assistant variables.
     '''
     if raw is None:
         raw = self.rawSpect()
     if smoothNm is None:
         smoothNm = self.bgSmoothDefault
     if self.arePeaks:
         logger.debug(
             'Warning fake background spectrum is being used on the drop port'
         )
         self.setBgConst(raw)
     else:
         self.__backgrounds['smoothed'] = raw.lowPass(windowWidth=smoothNm)
예제 #9
0
 def setBgTuned(self, base, displaced):
     ''' Insert the pieces of the displaced spectrum into where the peaks are
         It is assumed that these spectra were taken with this object's fgSpect method
     '''
     if self.arePeaks:
         logger.debug(
             'Warning fake background spectrum is being used on the drop port'
         )
         self.__backgrounds['tuned'] = self.getBgSpect()
         return
     res = self.resonances(base)
     baseRaw = base + self.getBgSpect()
     displacedRaw = displaced + self.getBgSpect()
     for r in res:
         spliceWind = r.lam + 6 * r.fwhm * np.array([-1, 1]) / 2
         baseRaw = baseRaw.splice(displacedRaw, segment=spliceWind)
     self.__backgrounds['tuned'] = baseRaw
예제 #10
0
 def setBgNulled(self, filtShapes, avgCnt=3):
     ''' Uses the peak shape information to null out resonances
     This gives the best estimate of background INDEPENDENT of the tuning state.
     It is assumed that the fine background taken by tuning is present, and the filter shapes were taken with that
     spect should be a foreground spect, but be careful when it is also derived from bgNulled
     '''
     if self.arePeaks:
         logger.debug(
             'Warning, null-based background spectrum not used for drop port. Ignoring'
         )
         self.__backgrounds['nulled'] = self.getBgSpect()
     else:
         spect = self.fgSpect(avgCnt, bgType='tuned')
         newBg = spect.copy()
         for i, r in enumerate(self.resonances(newBg)):
             nulledPiece = spect - filtShapes[i].db().shift(r.lam)
             newBg = newBg.splice(nulledPiece)
         self.__backgrounds['nulled'] = self.getBgSpect(
             bgType='tuned') - newBg
예제 #11
0
    def saveState(self, fname=None, save_backup=True):
        """ Saves the current lab, together with all its dependencies,
        to a JSON file.

        But first, it checks whether the file has the same hash as the
        previously loaded one. If file is not found, skip this check.

        If the labstate was created from scratch, save with ``_saveState()``.

        Args:
            fname (str or Path): file path to save
            save_backup (bool): saves a backup just in case, defaults to True.

        Raises:
            OSError: if there is any problem saving the file.
        """
        if fname is None:
            fname = self.filename
        try:
            loaded_lab = LabState.loadState(fname)
        except FileNotFoundError:
            logger.debug("File not found: %s. Saving for the first time.",
                         fname)
            self._saveState(fname, save_backup=False)
            return
        except JSONDecodeError:
            if os.stat(fname).st_size == 0:
                logger.warning("%s is empty. Saving for the first time.",
                               _filename)
                self._saveState(fname, save_backup=False)
                return
            else:
                raise

        if not self.__sha256__:
            logger.debug(
                "Attempting to compare fabricated labstate vs. preloaded one.")
            self.__sha256__ = self.__toJSON()["__sha256__"]
            logger.debug("self.__sha256__: %s", self.__sha256__)

        if loaded_lab == self:
            logger.debug("Detected no changes in labstate. Nothing to do.")
            return

        if loaded_lab.__sha256__ == self.__sha256__:
            self._saveState(fname, save_backup)
        else:
            logger.error(
                "%s's hash does not match with the one loaded in memory. Aborting save.",
                fname)
예제 #12
0
    def list_resources_info(self, use_cached=True):
        """ Executes a query to the NI Visa Resource manager and
        returns a list of instruments connected to it.

        Args:
            use_cached (bool): query only if not cached, default True

        Returns:
            list: list of `pyvisa.highlevel.ResourceInfo` named tuples.

        """
        if self.__cached_list_resources_info is None:
            use_cached = False
        if use_cached:
            return self.__cached_list_resources_info
        else:
            list_query = self._visa_prefix() + "?*::INSTR"
            rm = pyvisa.ResourceManager()
            logger.debug("Caching resource list in %s", self)
            self.__cached_list_resources_info = rm.list_resources_info(
                query=list_query)
            return self.__cached_list_resources_info
예제 #13
0
    def _saveState(self, fname=None, save_backup=True):
        """ Saves the file without checking hash """
        if fname is None:
            fname = self.filename
        filepath = Path(fname).resolve()

        # it is good to backup this file in caseit exists
        if save_backup:
            if filepath.exists():  # pylint: disable=no-member
                # gets folder/filename.* and transforms into folder/filename_{timestamp}.json
                filepath_backup = Path(filepath).with_name(
                    "{}_{}.json".format(filepath.stem, timestamp_string()))
                logger.debug(f"Backup {filepath} to {filepath_backup}")
                shutil.copy2(filepath, filepath_backup)

        # save to filepath, overwriting
        filepath.touch()  # pylint: disable=no-member
        with open(filepath, 'w') as file:
            json_state = self.__toJSON()
            file.write(json.encode(json_state))
            self.__sha256__ = json_state["__sha256__"]
            logger.debug("%s's sha: %s", fname, json_state["__sha256__"])
예제 #14
0
    def gather(self, soakTime=None, autoSave=False, returnToStart=False):  # pylint: disable=arguments-differ
        ''' Perform the sweep

            Args:
                soakTime (None, float): wait this many seconds at the first point to let things settle
                autoSave (bool): save data on completion, if savefile is specified
                returnToStart (bool): If True, actuates everything to the first point after the sweep completes

            Returns:
                None
        '''
        # Initialize builders that start off with None grids
        if self.data is None:
            # oldData = None
            self.data = OrderedDict()
        else:
            # oldData = self.data.copy()
            for dKeySrc in (self.actuate, self.measure, self.parse):
                for dKey in dKeySrc.keys():
                    try:
                        del self.data[dKey]
                    except KeyError:
                        pass
        try:
            swpName = 'Generic sweep in ' + ', '.join(self.actuate.keys())
            prog = io.ProgressWriter(swpName, self.swpShape, **self.monitorOptions)

            # Soak at the first point
            if soakTime is not None:
                logger.debug('Soaking for %s seconds.', soakTime)
                for actuObj in self.actuate.values():
                    actuObj.function(actuObj.domain[0])
                time.sleep(soakTime)

            for index in np.ndindex(self.swpShape):
                pointData = OrderedDict()  # Everything that will be measured *at this index*

                for statKey, statMat in self.static.items():
                    pointData[statKey] = statMat[index]

                # Do the actuation, storing domain args and return values (if present)
                for iDim, actu in enumerate(self.actuate.items()):
                    actuKey, actuObj = actu
                    if actuObj.domain is None:
                        x = None
                    else:
                        x = actuObj.domain[index[iDim]]
                        pointData[actuKey] = x
                    if iDim == self.actuDims - 1 or index[iDim + 1] == 0 or actuObj.doOnEveryPoint:
                        y = actuObj.function(x)  # The actual function call occurs here
                        if y is not None:
                            pointData[actuKey + '-return'] = y

                # Do the measurement, store return values
                for measKey, measFun in self.measure.items():
                    pointData[measKey] = measFun()
                    # print('   Meas', measKey, ':', pointData[measKey])

                # Parse and store
                for parseKey, parseFun in self.parse.items():
                    try:
                        pointData[parseKey] = parseFun(pointData)
                    except KeyError as err:
                        if parseKey in self.parse.keys():
                            print('Parsing out of order.',
                                  'Parser', parseKey, 'depends on parser', err,
                                  'but is being executed first')
                        raise err

                # Insert point data into the full matrix data builder
                # On the first go through, initialize array of correct datatype
                for k, v in pointData.items():
                    if all(i == 0 for i in index):
                        if np.isscalar(v):
                            self.data[k] = np.zeros(self.swpShape, dtype=float)
                        else:
                            self.data[k] = np.empty(self.swpShape, dtype=object)
                    self.data[k][index] = v

                # Plotting during the sweep
                if self.monitorOptions['livePlot']:
                    if all(i == 0 for i in index):
                        axArr = None
                    axArr = self.plot(axArr=axArr, index=index)
                    flatIndex = np.ravel_multi_index(index, self.swpShape)
                    if flatIndex % self.monitorOptions['plotEvery'] == 0:
                        display.display(plt.gcf())
                        display.clear_output(wait=True)
                # Progress report
                prog.update()
            # End of the main loop

        except Exception as err:
            logger.error('Error while sweeping. Keeping data. %s', err)
            raise

        if returnToStart:
            for actuObj in self.actuate.values():
                actuObj.function(actuObj.domain[0])

        if autoSave:
            self.save()
예제 #15
0
def findPeaks(yArrIn,
              isPeak=True,
              isDb=False,
              expectedCnt=1,
              descendMin=1,
              descendMax=3,
              minSep=0):
    '''Takes an array and finds a specified number of peaks

        Looks for maxima/minima that are separated from others, and
        stops after finding ``expectedCnt``

        Args:
            isDb (bool): treats dips like DB dips, so their width is relative to outside the peak, not inside
            descendMin (float): minimum amount to descend to be classified as a peak
            descendMax (float): amount to descend down from the peaks to get the width (i.e. FWHM is default)
            minSep (int): the minimum spacing between two peaks, in array index units

        Returns:
            array (float): indeces of peaks, sorted from biggest peak to smallest peak
            array (float): width of peaks, in array index units

        Raises:
            Exception: if not enough peaks found. This plots on fail, so you can see what's going on
    '''
    xArr = np.arange(len(yArrIn))
    yArr = yArrIn.copy()
    sepInds = int(np.floor(minSep))

    pkInds = np.zeros(expectedCnt, dtype=int)
    pkWids = np.zeros(expectedCnt)
    blanked = np.zeros(xArr.shape, dtype=bool)

    if not isPeak:
        yArr = 0 - yArr

    descendBy = descendMax

    yArrOrig = yArr.copy()

    for iPk in range(expectedCnt):  # Loop over peaks
        logger.debug('--iPk = %s', iPk)
        isValidPeak = False
        for iAttempt in range(
                1000
        ):  # Loop through falsities like edges and previously found peaks
            if isValidPeak:
                break
            logger.debug('Attempting to find actual')
            indOfMax = yArr.argmax()
            peakAmp = yArr[indOfMax]
            if isPeak or not isDb:
                absThresh = peakAmp - descendBy
            else:
                absThresh = min(descendBy, peakAmp - descendBy)
            logger.debug('absThresh = %s', absThresh)

            # Didn't find a peak anywhere
            if blanked.all() or absThresh <= np.amin(yArr) or iAttempt == 999:
                descendBy -= .5  # Try reducing the selectivity
                if descendBy >= descendMin:
                    logger.debug('Reducing required descent to %s', descendBy)
                    continue
                else:
                    # plot a debug view of the spectrum that throws an error when exited
                    logger.warning('Found %s of %s peaks. Look at the plot.',
                                   iPk, expectedCnt)
                    plt.plot(yArr)
                    plt.plot(yArrOrig)
                    plt.show(block=True)
                    raise PeakFinderError(
                        'Did not find enough peaks exceeding threshold')

            # descend data down by a threshold amount
            logger.debug('-Left side')
            indL, validL = descend(yArr, blanked, indOfMax - sepInds, 'left',
                                   absThresh)
            logger.debug('-Right side')
            indR, validR = descend(yArr, blanked, indOfMax + sepInds, 'right',
                                   absThresh)
            hmInds = [indL, indR + 1]
            isValidPeak = validL and validR
            # throw out data around this peak by minimizing yArr and recording as blank
            yArr[hmInds[0]:hmInds[1]] = np.amin(yArr)
            blanked[hmInds[0]:hmInds[1]] = True
        logger.debug('Successfully found a peak')
        pkInds[iPk] = int(np.mean(hmInds))
        pkWids[iPk] = np.diff(hmInds)[0]
    return pkInds, pkWids
예제 #16
0
def binarySearch(evalPointFun,
                 targetY,
                 startBounds,
                 hardConstrain=False,
                 xTol=0,
                 yTol=0,
                 livePlot=False):
    '''
        Gives the x where ``evalPointFun(x) == targetY``, approximately.
        The final call to evalPointFun will be of this value,
        so no need to call it again, if your goal is to set to the target.

        xTol and yTol are OR-ed conditions.
        If one is satisfied, it will terminate successfully.
        You must specify at least one.

        Assumes that the function is monotonic in any direction
        It often works when there is a peak inside the ``startBounds``,
        although not always.

        Args:
            evalPointFun (function): y=f(x) one argument, one return. The function that we want to find the target Y value of
            startBounds (list, ndarray): minimum and maximum x values that bracket the peak of interest
            hardConstrain (bool, list): if not True, will do a bracketSearch. If list, will stay within those
            xTol (float): if *domain* shifts become less than this, terminates successfully
            yTol (float): if *range* shifts become less than this, terminates successfully
            livePlot (bool): for notebook plotting

        Returns:
            (float): the optimal X value
    '''
    # Argument checking
    if xTol is None and yTol is None:
        raise ValueError('Must specify either xTol or yTol, ' +
                         'or binary search will never converge.')

    startBounds = sorted(startBounds)
    tracker = MeasuredFunction([], [])

    def measureError(xVal):
        yVal = evalPointFun(xVal)
        tracker.addPoint((xVal, yVal))
        err = yVal - targetY
        if livePlot:
            plotAfterPointMeasurement(tracker, targetY)
        return err

    # First check out what happens at the edges
    for x in startBounds:
        measureError(x)
    isIncreasing = tracker.ordi[1] > tracker.ordi[0]

    outOfRangeDirection = doesMFbracket(targetY, tracker)
    if outOfRangeDirection != 'in-range':
        # Case 1: we won't tolerate it
        if hardConstrain is True:
            if outOfRangeDirection == 'high':
                bestGuess = tracker.absc[np.argmax(tracker.ordi)]
            else:
                bestGuess = tracker.absc[np.argmin(tracker.ordi)]
            raise SearchRangeError(
                'binarySearch function value ' +
                'outside of hard constraints! ' + 'Results invalid.',
                outOfRangeDirection, bestGuess)
        # Case 2: try to get it in range
        else:
            try:
                newStartBounds = bracketSearch(evalPointFun=evalPointFun,
                                               targetY=targetY,
                                               startBounds=startBounds,
                                               xTol=xTol,
                                               hardConstrain=hardConstrain,
                                               livePlot=livePlot)
            except SearchRangeError as err:
                logger.debug(
                    'Failed to bracket targetY=%s. Returning best guess',
                    targetY)
                return err.args[2]
            try:
                return binarySearch(
                    evalPointFun=evalPointFun,
                    targetY=targetY,
                    startBounds=newStartBounds,  # important change
                    xTol=xTol,
                    yTol=yTol,
                    hardConstrain=True,  # important change
                    livePlot=livePlot)
            except SearchRangeError as err:
                raise SearchRangeError(
                    'It was in range and then not, ' + 'so probably noise.',
                    err.args[1], err.args[2])

    # By now we are certain that the target is bounded by the start points
    thisX = np.mean(startBounds)
    absStep = np.diff(startBounds)[0] / 4
    for _ in range(30):
        newErr = measureError(thisX)
        # Case 1: converged within tolerance
        if abs(newErr) < yTol or absStep < xTol:

            return thisX

        # Case 2: guess new point and reduce step by factor of 2
        if isIncreasing:
            thisX -= np.sign(newErr) * absStep
        else:
            thisX += np.sign(newErr) * absStep
        absStep /= 2
    raise Exception(
        'Binary search did 30 iterations and still did not converge')
예제 #17
0
def peakSearch(evalPointFun,
               startBounds,
               nSwarm=3,
               xTol=0.,
               yTol=0.,
               livePlot=False):
    ''' Returns the optimal input that gives you the peak, and the peak value

        You must set either xTol or yTol.
        Be careful with yTol! It is best used with a big swarm.
        It does not guarantee that you are that close to peak, just that the swarm is that flat

        This algorithm is a modified swarm that is robust to outliers, sometimes.
            Each iteration, it takes <nSwarm> measurements and looks at the best (highest).
            The update is calculated by shrinking the swarm around the index of the best value.
            It does not compare between iterations: that makes it robust to one-time outliers.
            It attributes weight only by order of y values in an iteration, not the value between iterations or the magnitude of differences between y's within an iteration

        Not designed to differentiate global vs. local maxima

        Args:
            evalPointFun (function): y=f(x) one argument, one return. The function that we want to find the peak of
            startBounds (list, ndarray): minimum and maximum x values that bracket the peak of interest
            nSwarm (int): number of evaluations per iteration. Use more if it's a narrow peak in a big bounding area
            xTol (float): if the swarm x's fall within this range, search returns successfully
            yTol (float): if the swarm y's fall within this range, search returns successfully
            livePlot (bool): for notebook plotting

        Returns:
            (float, float): best (x,y) point of the peak
    '''
    # Argument checking
    if xTol is None and yTol is None:
        raise ValueError('Must specify either xTol or yTol, ' +
                         'or peak search will never converge.')

    nSwarm += (nSwarm + 1) % 2
    tracker = MeasuredFunction([], [])

    def shrinkAround(arr, bestInd, shrinkage=.6):
        fulcrumVal = 2 * arr[bestInd] - np.mean(arr)
        return fulcrumVal + (arr - fulcrumVal) * shrinkage

    offsToMeasure = np.linspace(*startBounds, nSwarm)
    for iIter in range(20):  # pylint: disable=unused-variable
        # Take measurements of the points
        measuredVals = np.zeros(nSwarm)
        for iPt, offs in enumerate(offsToMeasure):
            meas = evalPointFun(offs)
            measuredVals[iPt] = meas
            tracker.addPoint((offs, meas))
            if livePlot:
                plotAfterPointMeasurement(tracker)

        # Move the lowest point closer
        bestInd = np.argmax(measuredVals)
        # print('iter =', iIter, '; offArr =', offsToMeasure, '; best =', np.max(measuredVals))
        worstInd = np.argmin(measuredVals)
        if measuredVals[bestInd] - measuredVals[worstInd] < yTol \
                or offsToMeasure[-1] - offsToMeasure[0] < xTol:
            # logger.debug('Converged on peak')
            break
        if worstInd == float(nSwarm - 1) / 2:
            logger.debug('Detected positive curvature')
            # break
        offsToMeasure = shrinkAround(offsToMeasure, bestInd)
    return (offsToMeasure[bestInd], measuredVals[bestInd])
예제 #18
0
 def write(self, writeStr):
     with self._tcpsocket.connected() as s:
         logger.debug("Sending '%s'", writeStr)
         s.send(writeStr)
     time.sleep(0.05)