Beispiel #1
0
    def shutdown(self):
        logger.debug("Shutting Down!")

        for f in self.files:
            try:
                logger.debug("Closing %s", f)
                f.close()
                del f
            except:
                logger.debug("File probably already closed...")

        if len(self.plotters) > 0 and not self.leave_plot_server_open:
            self.plot_server.stop()

        self.shutdown_instruments()
        self.disconnect_instruments()
Beispiel #2
0
def count_matrices_ber(data, start_state=None, threshold=None, display=None):
    num_clusters = 2
    if threshold is None:
        clust = clusterer(data)
        state = clust.fit_predict(data.reshape(-1, 1)).reshape((-1, 2))
    else:
        logger.debug("Cluster data based on threshold = {}".format(threshold))
        state = data > threshold
        state = state.reshape((-1, 2))

    init_state = state[:, 0]
    final_state = state[:, 1]
    switched = np.logical_xor(init_state, final_state)

    init_state_frac = [np.mean(init_state == ct) for ct in range(num_clusters)]
    for ct, fraction in enumerate(init_state_frac):
        logger.debug("Initial fraction of state %d: %f" % (ct, fraction))

    if start_state is not None and start_state in range(num_clusters):
        start_stt = start_state
    else:
        start_stt = np.argmax(init_state_frac)
    logger.debug("Start state set to state: {}".format(start_stt))
    logger.debug("Switched state is state: {}".format(1 - start_stt))

    # This array contains a 2x2 count_matrix for each coordinate tuple
    count_mat = np.zeros((2, 2))

    # count_mat      = np.zeros((2,2), dtype=np.int)
    count_mat[0, 0] = np.logical_and(init_state == 0,
                                     np.logical_not(switched)).sum()
    count_mat[0, 1] = np.logical_and(init_state == 0, switched).sum()
    count_mat[1, 0] = np.logical_and(init_state == 1, switched).sum()
    count_mat[1, 1] = np.logical_and(init_state == 1,
                                     np.logical_not(switched)).sum()

    return count_mat, start_stt
Beispiel #3
0
 async def check_for_refinement(self):
     if not self.done and self.step == self.num_points():
         # Check to see if we need to perform any refinements
         await asyncio.sleep(0.1)
         logger.debug("Refining on axis {}".format(self.name))
         if self.refine_func:
             if not await self.refine_func(self, self.experiment):
                 # Returns false if no refinements needed, otherwise adds points to list
                 self.step = 0
                 self.done = True
                 self.reset()
                 logger.debug("Sweep Axis '{}' complete.".format(self.name))
                 return False
             return True
         else:
             self.step = 0
             self.done = True
             logger.debug("Sweep Axis '{}' complete.".format(self.name))
             return False
Beispiel #4
0
 def request(self, method, command):
     """ Send a request via HTTP and retrieve response """
     if self._resource is None:
         logger.error(
             "No connection established for %s. Query returns None." %
             self.name)
         return None
     logger.debug("Send request to %s: %s" % (self.name, command))
     self._resource.request(method, command)
     res = self._resource.getresponse()
     if res.status == 200:
         logger.debug("Successfully made request to %s. Status: %s - %s" %
                      (self.name, res.status, res.reason))
     else:
         logger.warning("Issue making request to %s. Status: %s - %s" %
                        (self.name, res.status, res.reason))
     data = res.read()
     logger.debug("Response from %s: %s" % (self.name, data))
     return data
Beispiel #5
0
    def shutdown(self):
        logger.debug("Shutting Down!")
        for f in self.files:
            try:
                logger.debug("Closing %s", f)
                f.close()
                del f
            except:
                logger.debug("File probably already closed...")

        if hasattr(self, 'plot_server'):
            try:
                if len(self.plotters
                       ) > 0:  #and not self.leave_plot_server_open:
                    self.plot_server.stop()
            except:
                logger.warning("Could not stop plot server gracefully...")

        self.shutdown_instruments()

        if not self.keep_instruments_connected:
            self.disconnect_instruments()
Beispiel #6
0
    def serial_set_FIR85_enable(self, dac, FIR85):
        '''
        Enables the DAC NCO FIR85 filter.
        Parameters:
            FIR85 (bool): Enables the FIR85 NCO filter if True, disables it if False
        '''
        if self.ser is None:
            logger.debug('Fake read 0x00.')
            code = 0x00
        else:
            code = self.serial_read_dac_register(dac, 0x111)

        if FIR85:
            code |= (1 << 0)
        else:
            code &= ~(1 << 0)

        if self.ser is None:
            logger.debug('Fake wrote {:#x}'.format(code))
        else:
            logger.debug(self.serial_write_dac_register(dac, 0x111, code))

        sleep(0.1)
Beispiel #7
0
    def main(self):
        """
        Generic run method which waits on a single stream and calls `process_data` on any new_data
        """
        # try:

        logger.debug('Running "%s" run loop', self.filter_name)
        setproctitle(f"python auspex filter: {self}")
        input_stream = getattr(self,
                               self._input_connectors[0]).input_streams[0]
        desc = input_stream.descriptor

        stream_done = False
        stream_points = 0

        while not self.exit.is_set(
        ):  # and not self.finished_processing.is_set():
            # Try to pull all messages in the queue. queue.empty() is not reliable, so we
            # ask for forgiveness rather than permission.
            messages = []

            # For any filter-specific loop needs
            self.checkin()

            # Check to see if the parent process still exists:
            if not self._parent_process_running():
                logger.warning(
                    f"{self} with pid {os.getpid()} could not find parent with pid {os.getppid()}. Assuming something has gone wrong. Exiting."
                )
                break

            while not self.exit.is_set():
                try:
                    messages.append(input_stream.queue.get(False))
                except queue.Empty as e:
                    time.sleep(0.002)
                    break

            self.push_resource_usage()
            for message in messages:
                message_type = message['type']
                if message['type'] == 'event':
                    logger.debug('%s "%s" received event with type "%s"',
                                 self.__class__.__name__, message_type)

                    # Check to see if we're done
                    if message['event_type'] == 'done':
                        logger.debug(f"{self} received done message!")
                        stream_done = True
                    else:
                        # Propagate along the graph
                        self.push_to_all(message)
                        self.process_message(message)

                elif message['type'] == 'data':
                    # if not hasattr(message_data, 'size'):
                    #     message_data = np.array([message_data])
                    message_data = input_stream.pop()
                    if message_data is not None:
                        logger.debug('%s "%s" received %d points.',
                                     self.__class__.__name__, self.filter_name,
                                     message_data.size)
                        logger.debug("Now has %d of %d points.",
                                     input_stream.points_taken.value,
                                     input_stream.num_points())
                        stream_points += len(message_data)
                        self.process_data(message_data)
                        self.processed += message_data.nbytes

            if stream_done:
                self.push_to_all({
                    "type": "event",
                    "event_type": "done",
                    "data": None
                })
                self.done.set()
                break

        # When we've finished, either prematurely or as expected
        self.on_done()
Beispiel #8
0
def disjointNameRefz (tgtBaseClName, acceptClassRefz=None, bEchoDetails=False, szLogLabel="DisJ?"):
    """
    Where acceptClassRefz is defined,
    determines whether the tgtBaseClName is disjoint from the one or more
    acceptClassRefz {string, list, set} object value[s].  More precisely,
    determines if (lower-case) tgtBaseClName is a substring of the
    (lowercase) acceptClassRefz elements.
    Logic generalized here to support numerous MetaClass initialization
    constraint analysis/usage.
    """
    if None == tgtBaseClName:
        raise Exception( "Bogus tgtBaseClName [%s] cited!", tgtBaseClName)

    bDisjointForRefz = False

    szTgtSubKey = tgtBaseClName.lower()

    if not (None == acceptClassRefz):
        # acceptClassRefz defined
        #
        acceptedRefzType = type( acceptClassRefz)

        szSE_MsgMask = "%s %s." \
            "\n\r   << <szTgtSubKey> '%s' %s '%s' <acceptClassRefz> [lc] substring."

        szME_MsgMask = "%u/%u: %s << <szTgtSubKey> '%s' %s '%s' <currAcceptClName> %s element substring.\n\r"

        if str == acceptedRefzType:
            # Process acceptedRefzType as a single string value
            # One answer to deal with; yes or no.
            if -1 == acceptClassRefz.lower().find( szTgtSubKey):
                # No match
                logger.debug( (szSE_MsgMask + "\n\r"), "Skipping",
                    szLogLabel, szTgtSubKey, "NOT", acceptClassRefz)
                bDisjointForRefz = True
            else:
                # Matched
                if bEchoDetails:
                    logger.info( szSE_MsgMask, "Continuing",
                        szLogLabel, szTgtSubKey, "noted as", acceptClassRefz)
                # bDisjointForRefz remains false

        else:
            if list == acceptedRefzType or set == acceptedRefzType:
                # Process as a multi-entry list ['A', 'B'] or set {'A', 'B'}
                # Multiple possibilities; return false on first match
                nIndex = 1;
                nCount = len( acceptClassRefz)
                bMatched = False
                for currAcceptClName in acceptClassRefz:
                    if -1 == currAcceptClName.lower().find( szTgtSubKey):
                        # No match
                        logger.debug( szME_MsgMask,
                            nIndex, nCount, "NoMatch", szTgtSubKey, "NOT", currAcceptClName, acceptedRefzType)
                    else:
                        bMatched = True
                        if bEchoDetails:
                            logger.info( szME_MsgMask,
                                nIndex, nCount, "Matched", szTgtSubKey, "noted as", currAcceptClName, acceptedRefzType)
                        break

                    nIndex += 1

                # end for all acceptClassRefz elements

                bDisjointForRefz = not bMatched

                if bDisjointForRefz:
                    logger.debug( "Skipping %s;  %s << bDisjointForRefz\n\r",
                        szLogLabel, bDisjointForRefz)
                else:
                    if bEchoDetails:
                        logger.info( "Continuing %s;  %s << bDisjointForRefz",
                            szLogLabel, bDisjointForRefz)
                #
            else:
                raise Exception( "Unhandled acceptedRefzType: {} cited!".format( acceptedRefzType))

    # else acceptClassRefz NOT defined; default behavior; disjointness not
    # determinte -- no skip applies

    return bDisjointForRefz
Beispiel #9
0
def skipMetaInit (currClName, currClBases, currClDict, acceptClassRefz=None, bEchoDetails=False, szLogLabel="Meta?"):
    """
    Where acceptClassRefz is defined,
    determines equivalence/membership of currClName value with respect to
    the acceptClassRefz {string, list, set} object.
    Logic generalized here to support numerous MetaClass initialization
    constraint analysis/usage.
    """
    if None == currClName:
        raise Exception( "Bogus currClName [%s] cited!", currClName)

    if None == currClBases:
        raise Exception( "Bogus currClBases [%s] cited!", currClBases)

    if None == currClDict:
        raise Exception( "Bogus currClDict [%s] cited!", currClDict)

    bSkipMInit = False

    if not (None == acceptClassRefz):
        # acceptClassRefz defined
        #
        acceptedRefzType = type( acceptClassRefz)

        if str == acceptedRefzType:
            # Process acceptedRefzType as a single string value
            if not (currClName == acceptClassRefz):
                # No Match
                logger.debug( __szSMI_LogTextPrefix +
                    "!= '%s' <acceptClassRefz> %s\n\r",
                    "Skipping", currClName, szLogLabel, acceptClassRefz, acceptedRefzType)
                bSkipMInit = True
            else:
                # Matched
                if bEchoDetails:
                    logger.info( __szSMI_LogTextPrefix +
                        "== <acceptClassRefz> %s 8-)\n\r",
                        "Continuing", currClName, szLogLabel, acceptedRefzType)
        else:
            if list == acceptedRefzType or set == acceptedRefzType:
                # Process as a multi-entry list ['A', 'B'] or set {'A', 'B'}
                if not (currClName in acceptClassRefz):
                    # No match
                    logger.debug( __szSMI_LogTextPrefix +
                        "currClName is NOT an acceptClassRefz element:"  \
                        "\n\r      %s::%s\n\r",
                        "Skipping", currClName, szLogLabel, acceptedRefzType, acceptClassRefz)
                    bSkipMInit = True
                else:
                    # Matched
                    if bEchoDetails:
                        logger.info( __szSMI_LogTextPrefix +
                            "currClName is an element of acceptClassRefz:" \
                            "\n\r      %s::%s 8-)\n\r",
                            "Continuing", currClName, szLogLabel, acceptedRefzType, acceptClassRefz)
            #
            else:
                raise Exception( "Unhandled acceptedRefzType: {} cited!".format( acceptedRefzType))

    # else acceptClassRefz NOT defined; default behavior (no skip) applies

    if bEchoDetails and not bSkipMInit:
        # Optionally paint the Instrument metaclass _init_ Parameters
        logger.info( "%s %s.__init__( currClName, currClBases, currClDict):" \
           "\n\r   --  currClName: %s" \
           "\n\r   -- currClBases: %s" \
           "\n\r   --  currClDict: %s\n\r",
           "++", szLogLabel, currClName, currClBases, currClDict)

    return bSkipMInit
Beispiel #10
0
 def clear_waveform(self, channel=1):
     """ Clear all waveforms loaded in the memory """
     logger.debug("Clear all waveforms loaded in the memory of %s" %
                  self.name)
     self.interface.write("SOURce%d:DATA:VOLatile:CLEar" % channel)
Beispiel #11
0
    def run_sweeps(self):
        # Propagate the descriptors through the network
        self.update_descriptors()
        # Make sure we are starting from scratch... is this necessary?
        self.reset()
        # Update the progress bar if need be
        if self.progressbar is not None:
            self.progressbar.reset()

        #connect all instruments
        if not self.instrs_connected:
            self.connect_instruments()

        # Go find any writers
        self.writers = [n for n in self.nodes if isinstance(n, WriteToHDF5)]
        self.buffers = [n for n in self.nodes if isinstance(n, DataBuffer)]
        if self.name:
            for w in self.writers:
                w.filename.value = os.path.join(
                    os.path.dirname(w.filename.value), self.name)
        self.filenames = [w.filename.value for w in self.writers]
        self.files = []

        # Check for redundancy in filenames, and share plot file objects
        for filename in set(self.filenames):
            wrs = [w for w in self.writers if w.filename.value == filename]

            # Let the first writer with this filename create the file...
            wrs[0].file = wrs[0].new_file()
            self.files.append(wrs[0].file)

            # Make the rest of the writers use this same file object
            for w in wrs[1:]:
                w.file = wrs[0].file
                w.filename.value = wrs[0].filename.value

        # Go and find any plotters
        self.plotters = [
            n for n in self.nodes
            if isinstance(n, (Plotter, MeshPlotter, XYPlotter))
        ]

        # We might have some additional plotters that are separate from
        # The asyncio filter pipeline
        self.plotters.extend(self.extra_plotters)

        # These use neither streams nor the filter pipeline
        self.plotters.extend(self.manual_plotters)

        # Call any final initialization on the filter pipeline
        for n in self.nodes + self.extra_plotters:
            n.experiment = self
            n.loop = self.loop
            # n.executor   = self.executor
            if hasattr(n, 'final_init'):
                n.final_init()

        # Launch the bokeh-server if necessary.
        if len(self.plotters) > 0:
            logger.debug("Found %d plotters", len(self.plotters))

            from .plotting import MatplotServerThread

            plot_desc = {p.name: p.desc() for p in self.plotters}
            self.plot_server = MatplotServerThread(plot_desc)
            for plotter in self.plotters:
                plotter.plot_server = self.plot_server
            time.sleep(0.5)
            client_path = os.path.join(
                os.path.dirname(os.path.abspath(__file__)),
                "matplotlib-client.py")
            subprocess.Popen(['python', client_path, 'localhost'],
                             env=os.environ.copy())
            time.sleep(1)

        def catch_ctrl_c(signum, frame):
            logger.info("Caught SIGINT. Shutting down.")
            self.shutdown()
            raise NameError("Shutting down.")
            sys.exit(0)

        signal.signal(signal.SIGINT, catch_ctrl_c)

        # We want to wait for the sweep method above,
        # not the experiment's run method, so replace this
        # in the list of tasks.
        other_nodes = self.nodes[:]
        other_nodes.extend(self.extra_plotters)
        other_nodes.remove(self)
        tasks = [n.run() for n in other_nodes]

        tasks.append(self.sweep())
        try:
            self.loop.run_until_complete(asyncio.gather(*tasks))
            self.loop.run_until_complete(asyncio.sleep(1))
        except Exception as e:
            logger.exception("message")

        for plot, callback in zip(self.manual_plotters,
                                  self.manual_plotter_callbacks):
            if callback:
                callback(plot)

        self.shutdown()
Beispiel #12
0
 def update_descriptors(self):
     logger.debug("Starting descriptor update in input connector %s.",
                  self.name)
     self.descriptor = self.input_streams[0].descriptor
     self.parent.update_descriptors()
Beispiel #13
0
fake_x6 = True  # for discovery unit test support IMI

# Dirty trick to avoid loading libraries when scraping
# This code using quince.
if config.auspex_dummy_mode:
    fake_x6 = True
else:
    #
    # ----- fix/unitTests_1 / ST-15 delta start...
    bSkipX6DriverLoad = \
        config.disjointNameRefz( "X6",
                                 acceptClassRefz=config.tgtInstrumentClass,
                                 bEchoDetails=config.bEchoInstrumentMetaInit,
                                 szLogLabel="X6 driver load")
    if bSkipX6DriverLoad:
        logger.debug( "X6 module load skipped << ST-15 Delta.")
    else:
        # ----- fix/unitTests_1 / ST-15 delta stop.
        # Original block indented to suit bSkipX6DriverLoad use-case:
        try:
            import libx6
            fake_x6 = False
        #except:
            # logger.warning("Could not load x6 library")
        except Exception as e:
            logger.warning( "libx6 import failed!"
                "\n\r   << EEE Exception: %s", e)
            fake_x6 = True
            print( "      -- Set to continue processing nonetheless << fake_x6 << {}\n\r".format( fake_x6))

class X6Channel(DigitizerChannel):
Beispiel #14
0
    def init_plot_servers(self):
        logger.debug("Found %d plotters", len(self.plotters))

        from .plotting import MatplotServerThread
        plot_desc = {p.name: p.desc() for p in self.standard_plotters}
        if not hasattr(self, "plot_server"):
            self.plot_server = MatplotServerThread(plot_desc)
        if len(self.plotters) > len(self.standard_plotters) and not hasattr(
                self, "extra_plot_server"):
            extra_plot_desc = {
                p.name: p.desc()
                for p in self.extra_plotters + self.manual_plotters
            }
            self.extra_plot_server = MatplotServerThread(
                extra_plot_desc,
                status_port=self.plot_server.status_port + 2,
                data_port=self.plot_server.data_port + 2)
        for plotter in self.standard_plotters:
            plotter.plot_server = self.plot_server
        for plotter in self.extra_plotters + self.manual_plotters:
            plotter.plot_server = self.extra_plot_server
        time.sleep(0.5)
        # Kill a previous plotter if desired.
        if auspex.config.single_plotter_mode and auspex.config.last_plotter_process:
            pros = [auspex.config.last_plotter_process]
            if (not self.leave_plot_server_open or self.first_exp
                ) and auspex.config.last_extra_plotter_process:
                pros += [auspex.config.last_extra_plotter_process]
            for pro in pros:
                if hasattr(os, 'setsid'):  # Doesn't exist on windows
                    try:
                        os.kill(pro.pid,
                                0)  # Raises an error if the PID doesn't exist
                        os.killpg(
                            os.getpgid(pro.pid),
                            signal.SIGTERM)  # Proceed to kill process group
                    except OSError:
                        logger.debug("No plotter to kill.")
                else:
                    try:
                        pro.kill()
                    except:
                        logger.debug("No plotter to kill.")

        client_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                                   "matplotlib-client.py")
        #if not auspex.config.last_plotter_process:
        if hasattr(os, 'setsid'):
            auspex.config.last_plotter_process = subprocess.Popen(
                ['python', client_path, 'localhost'],
                env=os.environ.copy(),
                preexec_fn=os.setsid)
        else:
            auspex.config.last_plotter_process = subprocess.Popen(
                ['python', client_path, 'localhost'], env=os.environ.copy())
        if hasattr(self, 'extra_plot_server') and (
                not auspex.config.last_extra_plotter_process
                or not self.leave_plot_server_open or self.first_exp):
            if hasattr(os, 'setsid'):
                auspex.config.last_extra_plotter_process = subprocess.Popen(
                    [
                        'python', client_path, 'localhost',
                        str(self.extra_plot_server.status_port),
                        str(self.extra_plot_server.data_port)
                    ],
                    env=os.environ.copy(),
                    preexec_fn=os.setsid)
            else:
                auspex.config.last_extra_plotter_process = subprocess.Popen(
                    [
                        'python', client_path, 'localhost',
                        str(self.extra_plot_server.status_port),
                        str(self.extra_plot_server.data_port)
                    ],
                    env=os.environ.copy())
Beispiel #15
0
 def clear_sweeps(self):
     """Delete all sweeps present in this experiment."""
     logger.debug("Removing all axes from experiment.")
     self.sweeper.axes = []
     for oc in self.output_connectors.values():
         oc.descriptor.axes = []
Beispiel #16
0
    def update_descriptors(self):
        if not self.simple_kernel and self.kernel.value is None:
            raise ValueError("Integrator was passed kernel None")

        logger.debug(
            'Updating KernelIntegrator "%s" descriptors based on input descriptor: %s.',
            self.filter_name, self.sink.descriptor)

        record_length = self.sink.descriptor.axes[-1].num_points()

        if self.kernel.value:
            if os.path.exists(
                    os.path.join(config.KernelDir,
                                 self.kernel.value + '.txt')):
                kernel = np.loadtxt(
                    os.path.join(config.KernelDir, self.kernel.value + '.txt'),
                    dtype=complex,
                    converters={
                        0: lambda s: complex(s.decode().replace('+-', '-'))
                    })
            else:
                try:
                    kernel = eval(self.kernel.value.encode('unicode_escape'))
                except:
                    raise ValueError(
                        'Kernel invalid. Provide a file name or an expression to evaluate'
                    )
            if self.simple_kernel.value:
                logger.warning(
                    "Using specified kernel. To use a box car filter instead, clear kernel.value"
                )

        elif self.simple_kernel.value:
            time_pts = self.sink.descriptor.axes[-1].points
            time_step = time_pts[1] - time_pts[0]
            kernel = np.zeros(record_length, dtype=np.complex128)
            sample_start = int(self.box_car_start.value / time_step)
            sample_stop = int(self.box_car_stop.value / time_step) + 1
            kernel[sample_start:sample_stop] = 1.0
            # add modulation
            kernel *= np.exp(2j * np.pi * self.demod_frequency.value *
                             time_pts)
        else:
            raise ValueError(
                'Kernel invalid. Either provide a file name or an expression to evaluate or set simple_kernel.value to true'
            )
        # pad or truncate the kernel to match the record length
        if kernel.size < record_length:
            self.aligned_kernel = np.append(
                kernel,
                np.zeros(record_length - kernel.size, dtype=np.complex128))
        else:
            self.aligned_kernel = np.resize(kernel, record_length)

        # Integrator reduces and removes axis on output stream
        # update output descriptors
        output_descriptor = DataStreamDescriptor()
        # TODO: handle reduction to single point
        output_descriptor.axes = self.sink.descriptor.axes[:-1]
        output_descriptor._exp_src = self.sink.descriptor._exp_src
        output_descriptor.dtype = np.complex128
        for ost in self.source.output_streams:
            ost.set_descriptor(output_descriptor)
            ost.end_connector.update_descriptors()
Beispiel #17
0
    def update_descriptors(self):
        logger.debug(
            'Updating averager "%s" descriptors based on input descriptor: %s.',
            self.name, self.sink.descriptor)
        descriptor_in = self.sink.descriptor
        names = [a.name for a in descriptor_in.axes]

        self.axis.allowed_values = names

        if self.axis.value is None:
            self.axis.value = descriptor_in.axes[0].name

        # Convert named axes to an index
        if self.axis.value not in names:
            raise ValueError(
                "Could not find axis {} within the DataStreamDescriptor {}".
                format(self.axis.value, descriptor_in))
        self.axis_num = descriptor_in.axis_num(self.axis.value)
        logger.debug("Averaging over axis #%d: %s", self.axis_num,
                     self.axis.value)

        self.data_dims = descriptor_in.data_dims()
        if self.axis_num == len(descriptor_in.axes) - 1:
            logger.debug("Performing scalar average!")
            self.points_before_partial_average = 1
            self.avg_dims = [1]
        else:
            self.points_before_partial_average = descriptor_in.num_points_through_axis(
                self.axis_num + 1)
            self.avg_dims = self.data_dims[self.axis_num + 1:]

        # If we get multiple final average simultaneously
        self.reshape_dims = self.data_dims[self.axis_num:]
        if self.axis_num > 0:
            self.reshape_dims = [-1] + self.reshape_dims
        self.mean_axis = self.axis_num - len(self.data_dims)

        self.points_before_final_average = descriptor_in.num_points_through_axis(
            self.axis_num)
        logger.debug("Points before partial average: %s.",
                     self.points_before_partial_average)
        logger.debug("Points before final average: %s.",
                     self.points_before_final_average)
        logger.debug("Data dimensions are %s", self.data_dims)
        logger.debug("Averaging dimensions are %s", self.avg_dims)

        # Define final axis descriptor
        descriptor = descriptor_in.copy()
        self.num_averages = descriptor.pop_axis(self.axis.value).num_points()
        logger.debug("Number of partial averages is %d", self.num_averages)

        self.sum_so_far = np.zeros(self.avg_dims, dtype=descriptor.dtype)
        self.current_avg_frame = np.zeros(self.points_before_final_average,
                                          dtype=descriptor.dtype)
        self.partial_average.descriptor = descriptor
        self.final_average.descriptor = descriptor

        # We can update the visited_tuples upfront if none
        # of the sweeps are adaptive...
        desc_out_dtype = descriptor_in.axis_data_type(
            with_metadata=True, excluding_axis=self.axis.value)
        if not descriptor_in.is_adaptive():
            vals = [
                a.points_with_metadata() for a in descriptor_in.axes
                if a.name != self.axis.value
            ]
            nested_list = list(itertools.product(*vals))
            flattened_list = [
                tuple((val for sublist in line for val in sublist))
                for line in nested_list
            ]
            descriptor.visited_tuples = np.core.records.fromrecords(
                flattened_list, dtype=desc_out_dtype)
        else:
            descriptor.visited_tuples = np.empty((0), dtype=desc_out_dtype)

        for stream in self.partial_average.output_streams + self.final_average.output_streams:
            stream.set_descriptor(descriptor)
            stream.end_connector.update_descriptors()

        # Define variance axis descriptor
        descriptor_var = descriptor_in.copy()
        descriptor_var.data_name = "Variance"
        descriptor_var.pop_axis(self.axis.value)
        if descriptor_var.unit:
            descriptor_var.unit = descriptor_var.unit + "^2"
        descriptor_var.metadata["num_averages"] = self.num_averages
        self.final_variance.descriptor = descriptor_var

        if not descriptor_in.is_adaptive():
            descriptor_var.visited_tuples = np.core.records.fromrecords(
                flattened_list, dtype=desc_out_dtype)
        else:
            descriptor_var.visited_tuples = np.empty((0), dtype=desc_out_dtype)

        for stream in self.final_variance.output_streams:
            stream.set_descriptor(descriptor_var)
            stream.end_connector.update_descriptors()
Beispiel #18
0
 def add_output_stream(self, stream):
     logger.debug("Adding output stream '%s' to output connector %s.",
                  stream, self)
     self.output_streams.append(stream)
     stream.start_connector = self
Beispiel #19
0
 def __init__(self, stream=None, num=0, notebook=False):
     super(ExpProgressBar, self).__init__()
     logger.debug("initialize the progress bars.")
     self.stream = stream
     self.num = num
     self.notebook = notebook
Beispiel #20
0
 def write(self, value):
     logger.debug("Writing '%s'" % value)
Beispiel #21
0
 def add_axis(self, axis):
     for oc in self.output_connectors.values():
         logger.debug("Adding axis %s to connector %s.", axis, oc.name)
         oc.descriptor.add_axis(axis)
Beispiel #22
0
 def query(self, value):
     logger.debug("Querying '%s'" % value)
     if value == ":output?;":
         return "on"
     return np.random.random()
Beispiel #23
0
    def upload_waveform_binary(self,
                               data,
                               channel=1,
                               name="mywaveform",
                               dac=True):
        """ NOT YET WORKING - DO NOT USE
        Load binary data into a waveform memory

        dac: True if values are converted to integer already
        """
        logger.warning(
            "Binary upload is under development. May not work as intended. Please consider using ASCII upload: upload_waveform()"
        )
        N = len(data)
        if N < 8 or N > 16e6:
            log.error(
                "Data has invalid length = %d. Must be between 8 and 16M. Cannot upload waveform."
                % N)
            return False
        # Check length of waveform length, must be <=12
        if len(name) > 12:
            logger.warning("Waveform length is larger than the limit of 12. Will be clipped off: %s --> %s" \
                            %(name,name[:12]))
        name = name[:12]  # Arb waveform name at most 12 characters
        # We don't support float values, so must convert to integer
        if not dac:
            logger.warning(
                "We current do not support uploading float values. Waveform values will be converted to integer."
            )
            # Convert to integer option (dac=True)
            data = [datum * 32767 for datum in data]
        # Values must be within -32767 and 32767
        if abs(np.max(data)) > 32767:
            logger.warning(
                "Some points out of range [-32767,32767] will be clipped off.")
        data = [int(max(min(datum, 32767), -32767)) for datum in data]
        wf = []
        N = N * 2  # 2 bytes for each point
        n = int(np.log10(N)) + 1  # Number of digits of N
        # Split 2-byte integer into 2 separate bytes
        for datum in data:
            if datum > 0:
                wf.append(datum >> 8)
                wf.append(datum % 1 << 8)
            else:
                datum = -datum
                wf.append(-(datum >> 8))
                wf.append(-(datum % 1 << 8))
        wf = np.array(wf, dtype='int8')  # Force datatype to 8-bit integer
        logger.debug("Upload waveform %s to instrument %s, channel %d: %s" %
                     (name, self.name, channel, wf))
        self.interface.write_binary_values(
            "SOURce%s:DATA:ARBitrary1:DAC %s,#%d%d" % (channel, name, n, N),
            wf,
            datatype='b',
            is_big_endian=False)
        # Check if successfully uploaded or not
        data_pts = int(
            self.interface.query("SOURce%s:DATA:ATTR:POIN? %s" %
                                 (channel, name)))
        if data_pts == len(data):
            logger.debug(
                "Successfully uploaded waveform %s to instrument %s, channel %d"
                % (name, self.name, channel))
            return True
        else:
            logger.error(
                "Failed uploading waveform %s to instrument %s, channel %d" %
                (name, self.name, channel))
            return False
Beispiel #24
0
 def values(self, query):
     logger.debug("Returning values %s" % query)
     return np.random.random()