Ejemplo n.º 1
0
class KernelIntegrator(Filter):

    sink = InputConnector()
    source = OutputConnector()
    kernel = Parameter()
    bias = FloatParameter(default=0.0)
    simple_kernel = BoolParameter(default=True)
    box_car_start = FloatParameter(default=0.0)
    box_car_stop = FloatParameter(default=100e-9)
    demod_frequency = FloatParameter(default=0.0)
    """Integrate with a given kernel. Kernel will be padded/truncated to match record length"""
    def __init__(self, **kwargs):
        super(KernelIntegrator, self).__init__(**kwargs)
        self.pre_int_op = None
        self.post_int_op = None
        for k, v in kwargs.items():
            if hasattr(self, k) and isinstance(getattr(self, k), Parameter):
                getattr(self, k).value = v
        if "pre_integration_operation" in kwargs:
            self.pre_int_op = kwargs["pre_integration_operation"]
        if "post_integration_operation" in kwargs:
            self.post_int_op = kwargs["post_integration_operation"]
        # self.quince_parameters = [self.simple_kernel, self.demod_frequency, self.box_car_start, self.box_car_stop]

    def update_descriptors(self):
        if not self.simple_kernel and self.kernel.value is None:
            raise ValueError("Integrator was passed kernel None")

        logger.debug(
            'Updating KernelIntegrator "%s" descriptors based on input descriptor: %s.',
            self.filter_name, self.sink.descriptor)

        record_length = self.sink.descriptor.axes[-1].num_points()

        if self.kernel.value:
            if os.path.exists(
                    os.path.join(config.KernelDir,
                                 self.kernel.value + '.txt')):
                kernel = np.loadtxt(
                    os.path.join(config.KernelDir, self.kernel.value + '.txt'),
                    dtype=complex,
                    converters={
                        0: lambda s: complex(s.decode().replace('+-', '-'))
                    })
            else:
                try:
                    kernel = eval(self.kernel.value.encode('unicode_escape'))
                except:
                    raise ValueError(
                        'Kernel invalid. Provide a file name or an expression to evaluate'
                    )
            if self.simple_kernel.value:
                logger.warning(
                    "Using specified kernel. To use a box car filter instead, clear kernel.value"
                )

        elif self.simple_kernel.value:
            time_pts = self.sink.descriptor.axes[-1].points
            time_step = time_pts[1] - time_pts[0]
            kernel = np.zeros(record_length, dtype=np.complex128)
            sample_start = int(self.box_car_start.value / time_step)
            sample_stop = int(self.box_car_stop.value / time_step) + 1
            kernel[sample_start:sample_stop] = 1.0
            # add modulation
            kernel *= np.exp(2j * np.pi * self.demod_frequency.value *
                             time_pts)
        else:
            raise ValueError(
                'Kernel invalid. Either provide a file name or an expression to evaluate or set simple_kernel.value to true'
            )
        # pad or truncate the kernel to match the record length
        if kernel.size < record_length:
            self.aligned_kernel = np.append(
                kernel,
                np.zeros(record_length - kernel.size, dtype=np.complex128))
        else:
            self.aligned_kernel = np.resize(kernel, record_length)

        # Integrator reduces and removes axis on output stream
        # update output descriptors
        output_descriptor = DataStreamDescriptor()
        # TODO: handle reduction to single point
        output_descriptor.axes = self.sink.descriptor.axes[:-1]
        output_descriptor._exp_src = self.sink.descriptor._exp_src
        output_descriptor.dtype = np.complex128
        for ost in self.source.output_streams:
            ost.set_descriptor(output_descriptor)
            ost.end_connector.update_descriptors()

    def process_data(self, data):

        # TODO: handle variable partial records
        if self.pre_int_op:
            data = self.pre_int_op(data)
        filtered = np.inner(np.reshape(data, (-1, len(self.aligned_kernel))),
                            self.aligned_kernel)
        if self.post_int_op:
            filtered = self.post_int_op(filtered)
        # push to ouptut connectors
        for os in self.source.output_streams:
            os.push(filtered)
Ejemplo n.º 2
0
class WriteToHDF5(Filter):
    """Writes data to file."""

    sink = InputConnector()
    filename = FilenameParameter()
    groupname = Parameter(default='main')
    add_date = BoolParameter(default = False)
    save_settings = BoolParameter(default = True)

    def __init__(self, filename=None, groupname=None, add_date=False, save_settings=True, compress=True, store_tuples=True, exp_log=True, **kwargs):
        super(WriteToHDF5, self).__init__(**kwargs)
        self.compress = compress
        if filename:
            self.filename.value = filename
        if groupname:
            self.groupname.value = groupname
        self.points_taken = 0
        self.file = None
        self.group = None
        self.store_tuples = store_tuples
        self.create_group = True
        self.up_to_date = False
        self.sink.max_input_streams = 100
        self.add_date.value = add_date
        self.save_settings.value = save_settings
        self.exp_log = exp_log
        self.quince_parameters = [self.filename, self.groupname, self.add_date, self.save_settings]

    def final_init(self):
        if not self.filename.value:
            raise Exception("Filename never supplied to writer.")
        # If self.file is still None, then we need to create
        # the file object. Otherwise, we presume someone has
        # already set it up for us.
        if not self.file:
            self.file = self.new_file()

    def new_filename(self):
        filename = self.filename.value
        basename, ext = os.path.splitext(filename)
        if ext == "":
            logger.debug("Filename for writer {} does not have an extension -- using default '.h5'".format(self.name))
            ext = ".h5"

        dirname = os.path.dirname(os.path.abspath(filename))

        if self.add_date.value:
            date     = time.strftime("%y%m%d")
            dirname  = os.path.join(dirname, date)
            basename = os.path.join(dirname, os.path.basename(basename))

        # Set the file number to the maximum in the current folder + 1
        filenums = []
        if os.path.exists(dirname):
            for f in os.listdir(dirname):
                if ext in f:
                    filenums += [int(re.findall('-(\d{4})\.', f)[0])] if os.path.isfile(os.path.join(dirname, f)) else []

        i = max(filenums) + 1 if filenums else 0
        return "{}-{:04d}{}".format(basename,i,ext)

    def new_file(self):
        """ Open a new data file to write """
        # Close the current file, if any
        if self.file is not None:
            try:
                self.file.close()
            except Exception as e:
                logger.error("Encounter exception: {}".format(e))
                logger.error("Cannot close file '{}'. File may be damaged.".format(self.file.filename))
        # Get new file name
        self.filename.value = self.new_filename()
        head = os.path.dirname(self.filename.value)
        head = os.path.normpath(head)
        dirs = head.split(os.sep)
        # Check if path exists. If not, create new one(s).
        os.makedirs(head, exist_ok=True)
        logger.debug("Create new data file: %s." % self.filename.value)
        # Copy current settings to a folder with the file name
        if self.save_settings.value:
            # just move copies to a new directory
            self.save_yaml()
        if self.exp_log:
            self.write_to_log()
        return h5py.File(self.filename.value, 'w', libver='latest')

    def write_to_log(self):
        """ Record the experiment in a log file """
        logfile = os.path.join(config.LogDir, "experiment_log.tsv")
        if os.path.isfile(logfile):
            lf = pd.read_csv(logfile, sep="\t")
        else:
            logger.info("Experiment log file created.")
            lf = pd.DataFrame(columns = ["Filename", "Date", "Time"])
        lf = lf.append(pd.DataFrame([[self.filename.value, time.strftime("%y%m%d"), time.strftime("%H:%M:%S")]],columns=["Filename", "Date", "Time"]),ignore_index=True)
        lf.to_csv(logfile, sep = "\t", index = False)

    def save_yaml(self):
        """ Save a copy of current experiment settings """
        head = os.path.dirname(self.filename.value)
        fulldir = os.path.splitext(self.filename.value)[0]
        if not os.path.exists(fulldir):
            os.makedirs(fulldir)
            config.yaml_dump(config.yaml_load(config.configFile), os.path.join(fulldir, os.path.split(config.configFile)[1]), flatten = True)

    def save_yaml_h5(self):
        """ Save a copy of current experiment settings in the h5 metadata"""
        header = self.file.create_group("header")
        # load them dump to get the 'include' information
        header.attrs['settings'] = config.yaml_dump(config.yaml_load(config.configFile), flatten = True)

    async def run(self):
        self.finished_processing = False
        streams    = self.sink.input_streams
        stream     = streams[0]

        for s in streams[1:]:
            if not np.all(s.descriptor.expected_tuples() == streams[0].descriptor.expected_tuples()):
                raise ValueError("Multiple streams connected to writer must have matching descriptors.")

        desc       = stream.descriptor
        axes       = desc.axes
        params     = desc.params
        axis_names = desc.axis_names(with_metadata=True)

        self.file.attrs['exp_src'] = desc._exp_src
        num_axes   = len(axes)

        if desc.is_adaptive() and not self.store_tuples:
            raise Exception("Cannot omit writing tuples with an adaptive sweep... please enabled store_tuples.")

        if self.store_tuples:
            # All of the combinations for the present values of the sweep parameters only
            tuples          = desc.expected_tuples(with_metadata=True, as_structured_array=True)
        expected_length = desc.expected_num_points()

        compression = 'gzip' if self.compress else None

        # If desired, create the group in which the dataset and axes will reside
        if self.create_group:
            self.group = self.file.create_group(self.groupname.value)
        else:
            self.group = self.file

        self.data_group = self.group.create_group("data")

        # If desired, push experimental metadata into the h5 file
        if self.save_settings.value and 'header' not in self.file.keys(): # only save header once for multiple writers
            self.save_yaml_h5()

        # Create datasets for each stream
        dset_for_streams = {}
        for stream in streams:
            dset = self.data_group.create_dataset(stream.descriptor.data_name, (expected_length,),
                                        dtype=stream.descriptor.dtype,
                                        chunks=True, maxshape=(None,),
                                        compression=compression)
            dset.attrs['is_data'] = True
            dset.attrs['store_tuples'] = self.store_tuples
            dset.attrs['name'] = stream.descriptor.data_name
            dset_for_streams[stream] = dset

        # Write params into attrs
        for k,v in params.items():
            if k not in axis_names:
                self.data_group.attrs[k] = v

        # Create a table for the DataStreamDescriptor
        ref_dtype = h5py.special_dtype(ref=h5py.Reference)
        self.descriptor = self.group.create_dataset("descriptor", (len(axes),), dtype=ref_dtype)
        for k,v in desc.metadata.items():
            self.descriptor.attrs[k] = v

        # Create axis data sets for storing the base axes as well as the
        # full set of tuples. For the former we add
        # references to the descriptor.
        tuple_dset_for_axis_name = {}
        for i, a in enumerate(axes):
            if a.unstructured:
                name = "+".join(a.name)
            else:
                name = a.name

            if a.unstructured:
                # Create another reference table to refer to the constituent axes
                unstruc_ref_dset = self.group.create_dataset(name, (len(a.name),), dtype=ref_dtype)
                unstruc_ref_dset.attrs['unstructured'] = True

                for j, (col_name, col_unit) in enumerate(zip(a.name, a.unit)):
                    # Create table to store the axis value independently for each column
                    unstruc_dset = self.group.create_dataset(col_name, (a.num_points(),), dtype=a.dtype)
                    unstruc_ref_dset[j] = unstruc_dset.ref
                    unstruc_dset[:] = a.points[:,j]
                    unstruc_dset.attrs['unit'] = col_unit
                    unstruc_dset.attrs['name'] = col_name

                    # This stores the values taking during the experiment sweeps
                    if self.store_tuples:
                        dset = self.data_group.create_dataset(col_name, (expected_length,), dtype=a.dtype,
                                                             chunks=True, compression=compression, maxshape=(None,) )
                        dset.attrs['unit'] = col_unit
                        dset.attrs['is_data'] = False
                        dset.attrs['name'] = col_name
                        tuple_dset_for_axis_name[col_name] = dset

                self.descriptor[i] = self.group[name].ref
            else:
                # This stores the axis values
                self.group.create_dataset(name, (a.num_points(),), dtype=a.dtype, maxshape=(None,) )
                self.group[name].attrs['unstructured'] = False
                self.group[name][:] = a.points
                self.group[name].attrs['unit'] = "None" if a.unit is None else a.unit
                self.group[name].attrs['name'] = a.name
                self.descriptor[i] = self.group[name].ref

                # This stores the values taking during the experiment sweeps
                if self.store_tuples:
                    dset = self.data_group.create_dataset(name, (expected_length,), dtype=a.dtype,
                                                          chunks=True, compression=compression, maxshape=(None,) )
                    dset.attrs['unit'] = "None" if a.unit is None else a.unit
                    dset.attrs['is_data'] = False
                    dset.attrs['name'] = name
                    tuple_dset_for_axis_name[name] = dset

            # Give the reader some warning about the usefulness of these axes
            self.group[name].attrs['was_refined'] = False

            if a.metadata is not None:
                # Create the axis table for the metadata
                dset = self.group.create_dataset(name + "_metadata", (a.metadata.size,), dtype=np.uint8, maxshape=(None,) )
                dset[:] = a.metadata
                dset = self.group.create_dataset(name + "_metadata_enum", (a.metadata_enum.size,), dtype='S128', maxshape=(None,) )
                dset[:] = np.asarray(a.metadata_enum, dtype='S128')

                # Associate the metadata with the data axis
                self.group[name].attrs['metadata'] = self.group[name + "_metadata"].ref
                self.group[name].attrs['metadata_enum'] = self.group[name + "_metadata_enum"].ref
                self.group[name].attrs['name'] = name + "_metadata"

                # Create the dataset that stores the individual tuple values
                if self.store_tuples:
                    dset = self.data_group.create_dataset(name + "_metadata" , (expected_length,),
                                                          dtype=np.uint8, maxshape=(None,) )
                    dset.attrs['name'] = name + "_metadata"
                    tuple_dset_for_axis_name[name + "_metadata"] = dset

        # Write all the tuples if this isn't adaptive
        if self.store_tuples:
            if not desc.is_adaptive():
                for i, a in enumerate(axis_names):
                    tuple_dset_for_axis_name[a][:] = tuples[a]

        # Write pointer
        w_idx = 0

        while True:
            # Wait for all of the acquisition to complete
            # Against at least some peoples rational expectations, asyncio.wait doesn't return Futures
            # in the order of the iterable it was passed, but perhaps just in order of completion. So,
            # we construct a dictionary in order that that can be mapped back where we need them:
            futures = {
                asyncio.ensure_future(stream.queue.get()): stream
                for stream in streams
            }

            responses, _ = await asyncio.wait(futures)

            # Construct the inverse lookup
            response_for_stream = {futures[res]: res for res in list(responses)}
            messages = [response_for_stream[stream].result() for stream in streams]

            # Ensure we aren't getting different types of messages at the same time.
            message_types = [m['type'] for m in messages]
            try:
                if len(set(message_types)) > 1:
                    raise ValueError("Writer received concurrent messages with different message types {}".format([m['type'] for m in messages]))
            except:
                import ipdb; ipdb.set_trace()

            # Infer the type from the first message
            message_type = messages[0]['type']

            # If we receive a message
            if message_type == 'event':
                logger.debug('%s "%s" received event of type "%s"', self.__class__.__name__, self.name, message_type)
                if messages[0]['event_type'] == 'done':
                    break
                elif messages[0]['event_type'] == 'refined':
                    refined_axis = messages[0]['data']

                    # Resize the data set
                    num_new_points = desc.num_new_points_through_axis(refined_axis)
                    for stream in streams:
                        dset_for_streams[stream].resize((len(dset_for_streams[streams[0]])+num_new_points,))

                    if self.store_tuples:
                        for an in axis_names:
                            tuple_dset_for_axis_name[an].resize((len(tuple_dset_for_axis_name[an])+num_new_points,))

                    # Generally speaking the descriptors are now insufficient to reconstruct
                    # the full set of tuples. The user should know this, so let's mark the
                    # descriptor axes accordingly.
                    self.group[name].attrs['was_refined'] = True


            elif message_type == 'data':
                message_data = [message['data'] for message in messages]
                message_comp = [message['compression'] for message in messages]
                message_data = [pickle.loads(zlib.decompress(dat)) if comp == 'zlib' else dat for comp, dat in zip(message_comp, message_data)]
                message_data = [dat if hasattr(dat, 'size') else np.array([dat]) for dat in message_data]  # Convert single values to arrays

                for ii in range(len(message_data)):
                    if not hasattr(message_data[ii], 'size'):
                        message_data[ii] = np.array([message_data[ii]])
                    message_data[ii] = message_data[ii].flatten()
                    if message_data[ii].size != message_data[0].size:
                        raise ValueError("Writer received data of unequal length.")

                logger.debug('%s "%s" received %d points', self.__class__.__name__, self.name, message_data[0].size)
                logger.debug("Now has %d of %d points.", stream.points_taken, stream.num_points())

                self.up_to_date = (w_idx == dset_for_streams[streams[0]].len())

                # Write the data
                for s, d in zip(streams, message_data):
                    dset_for_streams[s][w_idx:w_idx+d.size] = d

                # Write the coordinate tuples
                if self.store_tuples:
                    if desc.is_adaptive():
                        tuples = desc.tuples()
                        for axis_name in axis_names:
                            tuple_dset_for_axis_name[axis_name][w_idx:w_idx+d.size] = tuples[axis_name][w_idx:w_idx+d.size]

                self.file.flush()
                w_idx += message_data[0].size
                self.points_taken = w_idx

                logger.debug("HDF5: Write index at %d", w_idx)
                logger.debug("HDF5: %s has written %d points", stream.name, w_idx)

            # If we have gotten all our data and process_data has returned, then we are done!
            if np.all([v.done() for v in self.input_connectors.values()]):
                self.finished_processing = True
Ejemplo n.º 3
0
class SingleShotMeasurement(Filter):

    save_kernel = BoolParameter(default=False)
    optimal_integration_time = BoolParameter(default=False)
    set_threshold = BoolParameter(default=False)
    zero_mean = BoolParameter(default=False)
    logistic_regression = BoolParameter(default=False)

    sink = InputConnector()
    source = OutputConnector() # Single shot fidelity

    TOLERANCE = 1e-3

    def __init__(self, save_kernel=False, optimal_integration_time=False,
                    zero_mean=False, set_threshold=False,
                    logistic_regression=False, **kwargs):
        super(SingleShotMeasurement, self).__init__(**kwargs)
        if len(kwargs) > 0:
            self.save_kernel.value = save_kernel
            self.optimal_integration_time.value = optimal_integration_time
            self.zero_mean.value = zero_mean
            self.set_threshold.value = set_threshold
            self.logistic_regression.value = logistic_regression

        self.quince_parameters = [self.save_kernel, self.optimal_integration_time,
            self.zero_mean, self.set_threshold, self.logistic_regression]

        self.pdf_data_queue = Queue() #Output queue
        self.fidelity       = self.source

    def update_descriptors(self):

        logger.debug("Updating Plotter %s descriptors based on input descriptor %s", self.filter_name, self.sink.descriptor)
        self.stream = self.sink.input_streams[0]
        self.descriptor = self.sink.descriptor
        try:
            self.time_pts = self.descriptor.axes[self.descriptor.axis_num("time")].points
            self.record_length = len(self.time_pts)
        except ValueError:
            raise ValueError("Single shot filter sink does not appear to have a time axis!")
        self.num_averages = len(self.sink.descriptor.axes[self.descriptor.axis_num("averages")].points)
        self.num_segments = len(self.sink.descriptor.axes[self.descriptor.axis_num("segment")].points)
        self.ground_data = np.zeros((self.record_length, self.num_averages), dtype=np.complex)
        self.excited_data = np.zeros((self.record_length, self.num_averages), dtype=np.complex)
        self.total_points = self.num_segments*self.record_length*self.num_averages # Total points BEFORE sweep axes

        output_descriptor = DataStreamDescriptor()
        output_descriptor.axes = [_ for _ in self.descriptor.axes if type(_) is SweepAxis]
        output_descriptor._exp_src = self.sink.descriptor._exp_src
        output_descriptor.dtype = np.complex128

        if len(output_descriptor.axes) == 0:
            output_descriptor.add_axis(DataAxis("Fidelity", [1]))

        for os in self.fidelity.output_streams:
            os.set_descriptor(output_descriptor)
            os.end_connector.update_descriptors()


    def final_init(self):
        self.fid_buffer = np.empty(self.record_length*self.num_averages*self.num_segments, dtype=np.complex)
        self.idx = 0

    def process_data(self, data):
        """Fill the ground and excited data bins"""

        self.fid_buffer[self.idx:self.idx+len(data)] = data
        self.idx += len(data)

        if self.idx == self.record_length*self.num_averages*self.num_segments:
            self.idx = 0
            reshaped = self.fid_buffer.reshape(self.record_length, -1, order='F')
            self.ground_data = reshaped[:, ::2]
            self.excited_data = reshaped[:, 1::2]
            self.compute_filter()
            if self.logistic_regression.value:
                self.logistic_fidelity()
            if self.save_kernel.value:
                self._save_kernel()
            for os in self.fidelity.output_streams:
                os.push(self.fidelity_result)
            self.pdf_data_queue.put(self.pdf_data)

    def compute_filter(self):
        """Compute the single shot kernel and obtain single-shot measurement
        fidelity.

        Expects that the data will be in self.ground_data and self.excited_data,
        which are (T, N)-shaped numpy arrays, with T the time axis and N the
        number of shots."""
        #get excited and ground state data
        try:
            ground_mean = np.mean(self.ground_data, axis=1)
            excited_mean = np.mean(self.excited_data, axis=1)
        except AttributeError:
            raise Exception("Single shot filter does not appear to have any data!")
        distance = np.abs(np.mean(ground_mean - excited_mean))
        bias = np.mean(ground_mean + excited_mean) / distance
        logger.debug("Found single-shot measurement distance: {} and bias {}.".format(distance, bias))
        #construct matched filter kernel
        old_settings = np.seterr(divide='ignore', invalid='ignore')
        kernel = np.nan_to_num(np.divide(np.conj(ground_mean - excited_mean), np.var(self.ground_data, ddof=1, axis=1)))
        np.seterr(**old_settings)
        #sets kernel to zero when difference is too small, and prevents
        #kernel from diverging when var->0 at beginning of record_length
        kernel = np.multiply(kernel, np.greater(np.abs(ground_mean - excited_mean), self.TOLERANCE * distance))
        #subtract offset to cancel low-frequency fluctuations when integrating
        #raw data (not demod)
        if self.zero_mean.value:
            kernel = kernel - np.mean(kernel)
        logger.debug("Found single shot filter norm: {}.".format(np.sum(np.abs(kernel))))
        #annoyingly numpy's isreal has the opposite behavior to MATLAB's
        if not np.any(np.imag(kernel) > np.finfo(np.complex128).eps):
            #construct analytic signal from Hilbert transform
            kernel = hilbert(np.real(kernel))
        #normalize between -1 and 1
        kernel = kernel / np.amax(np.hstack([np.abs(np.real(kernel)), np.abs(np.imag(kernel))]))
        #apply matched filter
        weighted_ground = self.ground_data * kernel[:, np.newaxis]
        weighted_excited = self.excited_data * kernel[:, np.newaxis]

        if self.optimal_integration_time.value:
            #take cumulative sum up to each time step
            ground_I = np.real(weighted_ground)
            ground_Q = np.imag(weighted_ground)
            excited_I = np.real(weighted_excited)
            excited_Q = np.imag(weighted_excited)
            int_ground_I = np.cumsum(ground_I, axis=0)
            int_ground_Q = np.cumsum(ground_Q, axis=0)
            int_excited_I = np.cumsum(excited_I, axis=0)
            int_excited_Q = np.cumsum(excited_Q, axis=0)
            I_mins = np.amin(np.minimum(int_ground_I, int_excited_I), axis=1)
            I_maxes = np.amax(np.maximum(int_ground_I, int_excited_I), axis=1)
            num_times = int_ground_I.shape[0]
            fidelities = np.zeros((num_times, ))
            #Loop through each integration point; estimate the CDF and
            #then calculate best measurement fidelity
            for pt in range(num_times):
                bins = np.linspace(I_mins[pt], I_maxes[pt], 100)
                g_PDF = np.histogram(int_ground_I[pt, :], bins)[0]
                e_PDF = np.histogram(int_excited_I[pt,:], bins)[0]
                fidelities[pt] = np.sum(np.abs(g_PDF - e_PDF)) / np.sum(g_PDF + e_PDF)
            best_idx = fidelities.argmax(axis=0)
            self.best_integration_time = best_idx
            logger.info("Found best integration time at {} out of {} decimated points.".format(best_idx, num_times))
            #redo calculation with KDEs to get a more accurate estimate
            bins = np.linspace(I_mins[best_idx], I_maxes[best_idx], 100)
            g_KDE = gaussian_kde(int_ground_I[best_idx, :])
            e_KDE = gaussian_kde(int_excited_I[best_idx, :])
            g_PDF = g_KDE(bins)
            e_PDF = e_KDE(bins)
        else:
            ground_I = np.sum(np.real(weighted_ground), axis=0)
            ground_Q = np.sum(np.imag(weighted_excited), axis=0)
            excited_I = np.sum(np.real(weighted_excited), axis=0)
            excited_Q = np.sum(np.imag(weighted_excited), axis=0)
            I_min = np.amin(np.minimum(ground_I, excited_I))
            I_max = np.amax(np.maximum(ground_I, excited_I))
            bins = np.linspace(I_min, I_max, 100)
            g_KDE = gaussian_kde(ground_I)
            e_KDE = gaussian_kde(excited_I)
            g_PDF = g_KDE(bins)
            e_PDF = e_KDE(bins)

        self.kernel = kernel
        max_F_I = 1 - 0.5 * (1 - 0.5 * (bins[2] - bins[1]) * np.sum(np.abs(g_PDF - e_PDF)))
        self.pdf_data = {"Max I Fidelity": max_F_I,
                         "I Bins": bins,
                         "Ground I PDF": g_PDF,
                         "Excited I PDF": e_PDF}

        if self.set_threshold.value:
            indmax = (np.abs(np.cumsum(g_PDF / np.sum(g_PDF))
                        - np.cumsum(e_PDF / np.sum(e_PDF)))).argmax(axis=0)
            self.pdf_data["I Threshold"] = bins[indmax]
            logger.info("Single shot kernel found I threshold at {}.".format(bins[indmax]))

        if self.optimal_integration_time.value:
            mu_g, sigma_g = norm.fit(int_ground_I[best_idx, :])
            mu_e, sigma_e = norm.fit(int_excited_I[best_idx, :])
        else:
            mu_g, sigma_g = norm.fit(ground_I)
            mu_e, sigma_e = norm.fit(excited_I)
        self.pdf_data["Ground I Gaussian PDF"] = norm.pdf(bins, mu_g, sigma_g)
        self.pdf_data["Excited I Gaussian PDF"] = norm.pdf(bins, mu_e, sigma_e)

        #calculate kernel density estimates for other quadrature
        if self.optimal_integration_time.value:
            Q_min = np.amin([int_ground_Q[best_idx,:], int_excited_Q[best_idx,:]])
            Q_max = np.argmax([int_ground_Q[best_idx,:], int_excited_Q[best_idx,:]])
            qbins = np.linspace(Q_min, Q_max, 100)
            g_KDE = gaussian_kde(int_ground_Q[best_idx, :])
            e_KDE = gaussian_kde(int_excited_Q[best_idx, :])
        else:
            qbins = np.linspace(np.amin([ground_Q, excited_Q]), np.amax([ground_Q, excited_Q]), 100)
            g_KDE = gaussian_kde(ground_Q)
            e_KDE = gaussian_kde(excited_Q)
        self.pdf_data["Q Bins"] = qbins
        g_PDF_Q = g_KDE(qbins)
        e_PDF_Q = e_KDE(qbins)
        self.pdf_data["Ground Q PDF"] =  g_PDF_Q
        self.pdf_data["Excited Q PDF"] =  e_PDF_Q
        self.pdf_data["Max Q Fidelity"] = 1 - 0.5 * (1 - 0.5 * (qbins[2] - qbins[1]) * np.sum(np.abs(g_PDF_Q - e_PDF_Q)))

        if self.optimal_integration_time.value:
            mu_g, sigma_g = norm.fit(int_ground_Q[best_idx, :])
            mu_e, sigma_e = norm.fit(int_excited_Q[best_idx, :])
        else:
            mu_g, sigma_g = norm.fit(ground_Q)
            mu_e, sigma_e = norm.fit(excited_Q)
        self.pdf_data["Ground Q Gaussian PDF"] = norm.pdf(bins, mu_g, sigma_g)
        self.pdf_data["Excited Q Gaussian PDF"] = norm.pdf(bins, mu_e, sigma_e)

        self.fidelity_result = self.pdf_data["Max I Fidelity"] + 1j * self.pdf_data["Max Q Fidelity"]
        logger.info("Single shot fidelity filter found: {}".format(self.fidelity_result))

    def logistic_fidelity(self):
        #group data and assign state labels
        gnd_features = np.hstack([np.real(self.ground_data.T),
                                np.imag(self.ground_data.T)])
        ex_features = np.hstack([np.real(self.excited_data.T),
                                np.imag(self.excited_data.T)])
        #liblinear wants arrays in C order
        features = np.ascontiguousarray(np.vstack([gnd_features, ex_features]))
        state = np.ascontiguousarray(np.hstack([np.zeros(self.ground_data.shape[1]),
                                                np.ones(self.excited_data.shape[1])]))
        #Set up logistic regression with cross-validation using liblinear.
        #Cs sets the inverse of the regularization strength, which will be optimized
        #through cross-validation. Uses the default Stratified K-Folds
        #CV generator, with 3 folds.
        #This is set up to be as consistent with the MATLAB implementation
        #as I can make it. --GJR
        Cs = np.logspace(-1,2,5)
        logreg = LogisticRegressionCV(Cs, cv=3, solver='liblinear')
        logreg.fit(features, state) #fit the model
        predictions = logreg.predict(features) #in-place classification
        score = logreg.score(features,state) #mean accuracy of classification
        N = len(predictions)
        S = np.sum(predictions == state) #how many we got right
        #now calculate confidence intervals
        c = 0.95
        flo = betaincinv(S+1, N-S+1, (1-c)/2., )
        fhi = betaincinv(S+1, N-S+1, (1+c)/2., )
        logger.info(("In-place logistic regression fidelity: " +
                "{:.2f}% ({:.2f}, {:.2f})".format(100*score, 100*flo, 100*fhi)))

    def _save_kernel(self):
        import QGL.config as qconfig
        if not qconfig.KernelDir or not os.path.exists(qconfig.KernelDir):
            logger.warning("No kernel directory provided, please set auspex.config.KernelDir")
            logger.warning("Saving kernel to local directory.")
            dir = "./"
        else:
            dir = qconfig.KernelDir
        try:
            logger.info(self.filter_name)
            filename = self.filter_name + "_kernel.txt"
            header = "Single shot fidelity filter - {}:\nSource: {}".format(time.strftime("%m/%d/%y -- %H:%M"), self.filter_name)
            np.savetxt(os.path.join(dir, filename), self.kernel, header=header, comments="#")
        except (AttributeError, IOError) as ex:
            raise AttributeError("Could not save single shot fidelity kernel!") from ex
Ejemplo n.º 4
0
class KernelIntegrator(Filter):

    sink = InputConnector()
    source = OutputConnector()
    kernel = Parameter()
    bias = FloatParameter(default=0.0)
    simple_kernel = BoolParameter(default=True)
    box_car_start = FloatParameter(default=0.0)
    box_car_stop = FloatParameter(default=100e-9)
    frequency = FloatParameter(default=0.0)
    """Integrate with a given kernel. Kernel will be padded/truncated to match record length"""
    def __init__(self, **kwargs):
        super(KernelIntegrator, self).__init__(**kwargs)
        self.pre_int_op = None
        self.post_int_op = None
        for k, v in kwargs.items():
            if hasattr(self, k) and isinstance(getattr(self, k), Parameter):
                getattr(self, k).value = v
        if "pre_integration_operation" in kwargs:
            self.pre_int_op = kwargs["pre_integration_operation"]
        if "post_integration_operation" in kwargs:
            self.post_int_op = kwargs["post_integration_operation"]
        self.quince_parameters = [
            self.simple_kernel, self.frequency, self.box_car_start,
            self.box_car_stop
        ]

    def update_descriptors(self):
        if not self.simple_kernel and self.kernel.value is None:
            raise ValueError("Integrator was passed kernel None")

        logger.debug(
            'Updating KernelIntegrator "%s" descriptors based on input descriptor: %s.',
            self.name, self.sink.descriptor)

        record_length = self.sink.descriptor.axes[-1].num_points()
        if self.simple_kernel.value:
            time_pts = self.sink.descriptor.axes[-1].points
            time_step = time_pts[1] - time_pts[0]
            kernel = np.zeros(record_length, dtype=np.complex128)
            sample_start = int(self.box_car_start.value / time_step)
            sample_stop = int(self.box_car_stop.value / time_step) + 1
            kernel[sample_start:sample_stop] = 1.0
            # add modulation
            kernel *= np.exp(2j * np.pi * self.frequency.value * time_step *
                             time_pts)
        else:
            kernel = eval(self.kernel.value.encode('unicode_escape'))
        # pad or truncate the kernel to match the record length
        if kernel.size < record_length:
            self.aligned_kernel = np.append(
                kernel,
                np.zeros(record_length - kernel.size, dtype=np.complex128))
        else:
            self.aligned_kernel = np.resize(kernel, record_length)

        # Integrator reduces and removes axis on output stream
        # update output descriptors
        output_descriptor = DataStreamDescriptor()
        # TODO: handle reduction to single point
        output_descriptor.axes = self.sink.descriptor.axes[:-1]
        output_descriptor._exp_src = self.sink.descriptor._exp_src
        output_descriptor.dtype = np.complex128
        for os in self.source.output_streams:
            os.set_descriptor(output_descriptor)
            os.end_connector.update_descriptors()

    async def process_data(self, data):

        # TODO: handle variable partial records
        if self.pre_int_op:
            data = self.pre_int_op(data)
        filtered = np.inner(np.reshape(data, (-1, len(self.aligned_kernel))),
                            self.aligned_kernel)
        if self.post_int_op:
            filtered = self.post_int_op(filtered)
        # push to ouptut connectors
        for os in self.source.output_streams:
            await os.push(filtered)