Ejemplo n.º 1
0
    def goal_run_with_grad_no_batch(self, current_params):
        """
        Same as goal_run but with gradient. Very resource intensive. Unoptimized at the
        moment.
        """
        exp_values = []
        sim_values = []
        exp_stds = []
        exp_shots = []
        count = 0
        seqs_pp = self.seqs_per_point

        with tf.GradientTape() as t:
            t.watch(current_params)
            for target, data in self.learn_data.items():
                self.learn_from = data["seqs_grouped_by_param_set"]
                self.gateset_opt_map = data["opt_map"]
                indeces = self.select_from_data(self.batch_sizes[target])
                for ipar in indeces:
                    count += 1
                    data_set = self.learn_from[ipar]
                    m_vals = data_set["results"][:seqs_pp]
                    sim_vals = self._one_par_sim_vals(current_params, data_set,
                                                      ipar, target)
                    sim_values.extend(sim_vals)
                    exp_values.extend(m_vals)

                    self._log_one_dataset(data_set, ipar, indeces, sim_vals,
                                          count)

            if target == "all":
                goal = neg_loglkh_multinom_norm(
                    exp_values, tf.stack(sim_values),
                    tf.Variable(exp_stds, dtype=tf.float64),
                    tf.Variable(exp_shots, dtype=tf.float64))
            else:
                goal = g_LL_prime(exp_values, tf.stack(sim_values),
                                  tf.Variable(exp_stds, dtype=tf.float64),
                                  tf.Variable(exp_shots, dtype=tf.float64))
            grad = t.gradient(goal, current_params).numpy()
            goal = goal.numpy()

        with open(self.logdir + self.logname, "a") as logfile:
            logfile.write("\nFinished batch with ")
            logfile.write("{}: {}\n".format(self.fom.__name__, goal))
            for cb_fom in self.callback_foms:
                val = float(
                    cb_fom(exp_values, sim_values, exp_stds,
                           exp_shots).numpy())
                logfile.write("{}: {}\n".format(cb_fom.__name__, val))
            logfile.flush()

        self.optim_status["params"] = [
            par.numpy().tolist() for par in self.pmap.get_parameters()
        ]
        self.optim_status["goal"] = goal
        self.optim_status["gradient"] = list(grad.flatten())
        self.optim_status["time"] = time.asctime()
        self.evaluation += 1
        return goal, grad
Ejemplo n.º 2
0
    def goal_run(self, current_params):
        """
        Evaluate the figure of merit for the current model parameters.

        Parameters
        ----------
        val : tf.Tensor
            Current model parameters

        Returns
        -------
        tf.float64
            Figure of merit

        """
        exp_values = []
        exp_stds = []
        sim_values = []
        exp_shots = []
        goals = []
        seq_weigths = []
        count = 0
        # TODO: seq per point is not constant. Remove.

        for target, data in self.learn_data.items():

            self.learn_from = data["seqs_grouped_by_param_set"]
            self.gateset_opt_map = data["opt_map"]
            indeces = self.select_from_data(self.batch_sizes[target])

            for ipar in indeces:
                # if count % 100 == 0:
                #     print("count: " + str(count))

                count += 1
                m = self.learn_from[ipar]
                gateset_params = m["params"]
                gateset_opt_map = self.gateset_opt_map
                m_vals = m["results"]
                m_stds = np.array(m["results_std"])
                m_shots = m["shots"]
                sequences = m["seqs"]
                num_seqs = len(sequences)
                if target == "all":
                    num_seqs = len(sequences) * 3

                self.pmap.set_parameters_scaled(current_params)
                self.pmap.model.update_model()

                self.pmap.set_parameters(gateset_params, gateset_opt_map)
                # We find the unique gates used in the sequence and compute
                # only them.
                self.exp.opt_gates = list(
                    set(itertools.chain.from_iterable(sequences)))
                self.exp.get_gates()
                pops = self.exp.evaluate(sequences)
                sim_vals = self.exp.process(labels=self.state_labels[target],
                                            populations=pops)

                exp_stds.extend(m_stds)
                exp_shots.extend(m_shots)

                if target == "all":
                    goal = neg_loglkh_multinom_norm(
                        m_vals,
                        tf.stack(sim_vals),
                        tf.constant(m_stds, dtype=tf.float64),
                        tf.constant(m_shots, dtype=tf.float64),
                    )
                else:
                    goal = g_LL_prime(
                        m_vals,
                        tf.stack(sim_vals),
                        tf.constant(m_stds, dtype=tf.float64),
                        tf.constant(m_shots, dtype=tf.float64),
                    )
                goals.append(goal.numpy())
                seq_weigths.append(num_seqs)
                sim_values.extend(sim_vals)
                exp_values.extend(m_vals)

                with open(self.logdir + self.logname, "a") as logfile:
                    logfile.write(
                        f"\n  Parameterset {ipar + 1}, #{count} of {len(indeces)}:\n"
                        f"{str(self.exp.pmap)}\n")
                    logfile.write(
                        "Sequence    Simulation  Experiment  Std           Shots"
                        "    Diff\n")

                for iseq in range(len(sequences)):
                    m_val = np.array(m_vals[iseq])
                    m_std = np.array(m_stds[iseq])
                    shots = np.array(m_shots[iseq])
                    sim_val = sim_vals[iseq].numpy()
                    with open(self.logdir + self.logname, "a") as logfile:
                        for ii in range(len(sim_val)):
                            logfile.write(
                                f"{iseq + 1:8}    "
                                f"{float(sim_val[ii]):8.6f}    "
                                f"{float(m_val[ii]):8.6f}    "
                                f"{float(m_std[ii]):8.6f}    "
                                f"{float(shots[0]):8}    "
                                f"{float(m_val[ii]-sim_val[ii]):8.6f}\n")
                        logfile.flush()

        goal = g_LL_prime_combined(goals, seq_weigths)

        with open(self.logdir + self.logname, "a") as logfile:
            logfile.write("\nFinished batch with ")
            logfile.write("{}: {}\n".format(self.fom.__name__, goal))
            print("{}: {}".format(self.fom.__name__, goal))
            for est in self.estimator_list:
                val = float(
                    est(exp_values, sim_values, exp_stds, exp_shots).numpy())
                logfile.write("{}: {}\n".format(est.__name__, val))
                # print("{}: {}".format(est.__name__, val))
            print("")
            logfile.flush()

        self.optim_status["params"] = [
            par.numpy().tolist()
            for par in self.exp.get_parameters(self.opt_map)
        ]
        self.optim_status["goal"] = goal
        self.evaluation += 1
        return goal
Ejemplo n.º 3
0
    def goal_run(self, val):
        """
        Evaluate the figure of merit for the current model parameters.

        Parameters
        ----------
        val : tf.Tensor
            Current model parameters

        Returns
        -------
        tf.float64
            Figure of merit

        """
        exp_values = []
        exp_stds = []
        sim_values = []
        exp_shots = []
        goals = []
        seq_weigths = []
        count = 0
        #TODO: seq per point is not constant. Remove.

        # print("tup: " + str(tup))
        # print("val: " + str(val))
        # print(self.opt_map)
        self.exp.set_parameters(val, self.opt_map, scaled=False)
        # print("params>>> ")
        # print(self.exp.print_parameters(self.opt_map))

        # print("self.learn_data.items(): " + str(len(self.learn_data.items())))
        for target, data in self.learn_data.items():

            self.learn_from = data['seqs_grouped_by_param_set']
            self.gateset_opt_map = data['opt_map']
            indeces = self.select_from_data(self.batch_sizes[target])

            for ipar in indeces:
                # if count % 100 == 0:
                #     print("count: " + str(count))

                count += 1
                m = self.learn_from[ipar]
                gateset_params = m['params']
                gateset_opt_map = self.gateset_opt_map
                m_vals = m['results']
                m_stds = np.array(m['results_std'])
                m_shots = m['shots']
                sequences = m['seqs']
                num_seqs = len(sequences)
                if target == 'all':
                    num_seqs = len(sequences) * 3

                self.exp.gateset.set_parameters(self.init_gateset_params,
                                                self.init_gateset_opt_map,
                                                scaled=False)
                self.exp.gateset.set_parameters(gateset_params,
                                                gateset_opt_map,
                                                scaled=False)
                # We find the unique gates used in the sequence and compute
                # only them.
                self.exp.opt_gates = list(
                    set(itertools.chain.from_iterable(sequences)))
                self.exp.get_gates()
                self.exp.evaluate(sequences)
                sim_vals = self.exp.process(labels=self.state_labels[target])

                exp_stds.extend(m_stds)
                exp_shots.extend(m_shots)

                if target == 'all':
                    goal = neg_loglkh_multinom_norm(
                        m_vals, tf.stack(sim_vals),
                        tf.constant(m_stds, dtype=tf.float64),
                        tf.constant(m_shots, dtype=tf.float64))
                else:
                    goal = g_LL_prime(m_vals, tf.stack(sim_vals),
                                      tf.constant(m_stds, dtype=tf.float64),
                                      tf.constant(m_shots, dtype=tf.float64))
                goals.append(goal.numpy())
                seq_weigths.append(num_seqs)
                sim_values.extend(sim_vals)
                exp_values.extend(m_vals)

                with open(self.logdir + self.logname, 'a') as logfile:
                    logfile.write(
                        "\n  Parameterset {}, #{} of {}:\n {}\n {}\n".format(
                            ipar + 1,
                            count,
                            len(indeces),
                            json.dumps(self.gateset_opt_map),
                            self.exp.gateset.get_parameters(
                                self.gateset_opt_map, to_str=True),
                        ))
                    logfile.write(
                        "Sequence    Simulation  Experiment  Std         Shots"
                        "       Diff\n")

                for iseq in range(len(sequences)):
                    m_val = np.array(m_vals[iseq])
                    m_std = np.array(m_stds[iseq])
                    shots = np.array(m_shots[iseq])
                    sim_val = sim_vals[iseq].numpy()
                    int_len = len(str(num_seqs))
                    with open(self.logdir + self.logname, 'a') as logfile:
                        for ii in range(len(sim_val)):
                            logfile.write(
                                f"{iseq + 1:8}    "
                                f"{float(sim_val[ii]):8.6f}    "
                                f"{float(m_val[ii]):8.6f}    "
                                f"{float(m_std[ii]):8.6f}    "
                                f"{float(shots[0]):8}    "
                                f"{float(m_val[ii]-sim_val[ii]):8.6f}\n")
                        logfile.flush()

        goal = g_LL_prime_combined(goals, seq_weigths)
        # TODO make gradient free function use any fom

        with open(self.logdir + self.logname, 'a') as logfile:
            logfile.write("\nFinished batch with ")
            logfile.write("{}: {}\n".format(self.fom.__name__, goal))
            print("{}: {}".format(self.fom.__name__, goal))
            for est in self.estimator_list:
                val = float(
                    est(exp_values, sim_values, exp_stds, exp_shots).numpy())
                logfile.write("{}: {}\n".format(est.__name__, val))
                #print("{}: {}".format(est.__name__, val))
            print("")
            logfile.flush()

        self.optim_status['params'] = [
            par.numpy().tolist()
            for par in self.exp.get_parameters(self.opt_map)
        ]
        self.optim_status['goal'] = goal
        self.evaluation += 1
        return goal
Ejemplo n.º 4
0
    def goal_run_with_grad(self, current_params):
        """
        Same as goal_run but with gradient. Very resource intensive. Unoptimized at the
        moment.
        """
        exp_values = []
        sim_values = []
        exp_stds = []
        exp_shots = []
        goals = []
        grads = []
        seq_weigths = []
        count = 0
        seqs_pp = self.seqs_per_point

        for target, data in self.learn_data.items():

            self.learn_from = data["seqs_grouped_by_param_set"]
            self.gateset_opt_map = data["opt_map"]
            indeces = self.select_from_data(self.batch_sizes[target])

            for ipar in indeces:

                count += 1
                m = self.learn_from[ipar]
                gateset_params = m["params"]
                gateset_opt_map = self.gateset_opt_map
                m_vals = m["results"][:seqs_pp]
                m_stds = np.array(m["results_std"][:seqs_pp])
                m_shots = m["shots"][:seqs_pp]
                sequences = m["seqs"][:seqs_pp]
                num_seqs = len(sequences)
                if target == "all":
                    num_seqs = len(sequences) * 3

                with tf.GradientTape() as t:
                    t.watch(current_params)
                    self.pmap.set_parameters_scaled(current_params)
                    self.pmap.model.update_model()
                    self.pmap.set_parameters(gateset_params, gateset_opt_map)
                    # We find the unique gates used in the sequence and compute
                    # only those.
                    self.exp.opt_gates = list(
                        set(itertools.chain.from_iterable(sequences)))
                    self.exp.get_gates()
                    pops = self.exp.evaluate(sequences)
                    sim_vals = self.exp.process(
                        labels=self.state_labels[target], populations=pops)

                    exp_stds.extend(m_stds)
                    exp_shots.extend(m_shots)

                    if target == "all":
                        g = neg_loglkh_multinom_norm(
                            m_vals,
                            tf.stack(sim_vals),
                            tf.Variable(m_stds, dtype=tf.float64),
                            tf.Variable(m_shots, dtype=tf.float64),
                        )
                    else:
                        g = g_LL_prime(
                            m_vals,
                            tf.stack(sim_vals),
                            tf.Variable(m_stds, dtype=tf.float64),
                            tf.Variable(m_shots, dtype=tf.float64),
                        )

                seq_weigths.append(num_seqs)
                goals.append(g.numpy())
                grads.append(t.gradient(g, current_params).numpy())

                sim_values.extend(sim_vals)
                exp_values.extend(m_vals)

                with open(self.logdir + self.logname, "a") as logfile:
                    logfile.write(
                        f"\n  Parameterset {ipar + 1}, #{count} of {len(indeces)}:\n"
                        f"{str(self.exp.pmap)}\n")
                    logfile.write(
                        "Sequence    Simulation  Experiment  Std           Shots"
                        "    Diff\n")

                for iseq in range(len(sequences)):
                    m_val = np.array(m_vals[iseq])
                    m_std = np.array(m_stds[iseq])
                    shots = np.array(m_shots[iseq])
                    sim_val = sim_vals[iseq].numpy()
                    with open(self.logdir + self.logname, "a") as logfile:
                        for ii in range(len(sim_val)):
                            logfile.write(
                                f"{iseq + 1:8}    "
                                f"{float(sim_val[ii]):8.6f}    "
                                f"{float(m_val[ii]):8.6f}    "
                                f"{float(m_std[ii]):8.6f}    "
                                f"{float(shots[0]):8}    "
                                f"{float(m_val[ii]-sim_val[ii]):8.6f}\n")
                        logfile.flush()

        goal = g_LL_prime_combined(goals, seq_weigths)
        grad = dv_g_LL_prime(goals, grads, seq_weigths)

        with open(self.logdir + self.logname, "a") as logfile:
            logfile.write("\nFinished batch with ")
            logfile.write("{}: {}\n".format(self.fom.__name__, goal))
            for cb_fom in self.callback_foms:
                val = float(
                    cb_fom(exp_values, sim_values, exp_stds,
                           exp_shots).numpy())
                logfile.write("{}: {}\n".format(cb_fom.__name__, val))
            logfile.flush()

        self.optim_status["params"] = [
            par.numpy().tolist() for par in self.pmap.get_parameters()
        ]
        self.optim_status["goal"] = goal
        self.optim_status["gradient"] = list(grad.flatten())
        self.optim_status["time"] = time.asctime()
        self.evaluation += 1
        return goal, grad
Ejemplo n.º 5
0
    def goal_run(self, current_params: tf.Variable) -> tf.float64:
        """
        Evaluate the figure of merit for the current model parameters.

        Parameters
        ----------
        current_params : tf.Tensor
            Current model parameters

        Returns
        -------
        tf.float64
            Figure of merit

        """
        exp_values = []
        sim_values = []
        exp_stds = []
        exp_shots = []
        goals = []
        seq_weigths = []
        count = 0
        seqs_pp = self.seqs_per_point
        # TODO: seq per point is not constant. Remove.

        for target, data in self.learn_data.items():

            self.learn_from = data["seqs_grouped_by_param_set"]
            self.gateset_opt_map = data["opt_map"]
            indeces = self.select_from_data(self.batch_sizes[target])

            for ipar in indeces:

                count += 1
                data_set = self.learn_from[ipar]
                m_vals = data_set["results"][:seqs_pp]
                m_stds = data_set["result_stds"][:seqs_pp]
                m_shots = data_set["shots"][:seqs_pp]
                sequences = data_set["seqs"][:seqs_pp]
                num_seqs = len(sequences)
                if target == "all":
                    num_seqs = len(sequences) * 3

                sim_vals = self._one_par_sim_vals(current_params, data_set,
                                                  ipar, target)
                if target == "all":
                    one_goal = neg_loglkh_multinom_norm(
                        m_vals,
                        tf.stack(sim_vals),
                        tf.Variable(m_stds, dtype=tf.float64),
                        tf.Variable(m_shots, dtype=tf.float64),
                    )
                else:
                    one_goal = g_LL_prime(
                        m_vals,
                        tf.stack(sim_vals),
                        tf.Variable(m_stds, dtype=tf.float64),
                        tf.Variable(m_shots, dtype=tf.float64),
                    )
                exp_stds.extend(m_stds)
                exp_shots.extend(m_shots)

                goals.append(one_goal.numpy())
                seq_weigths.append(num_seqs)
                sim_values.extend(sim_vals)
                exp_values.extend(m_vals)

                self._log_one_dataset(data_set, ipar, indeces, sim_vals, count)

        goal = g_LL_prime_combined(goals, seq_weigths)
        # TODO make gradient free function use any fom

        with open(self.logdir + self.logname, "a") as logfile:
            logfile.write("\nFinished batch with ")
            logfile.write("{}: {}\n".format("g_LL_prime_combined", goal))
            for cb_fom in self.callback_foms:
                val = float(
                    cb_fom(exp_values, sim_values, exp_stds,
                           exp_shots).numpy())
                logfile.write("{}: {}\n".format(cb_fom.__name__, val))
            logfile.flush()

        self.optim_status["params"] = [
            par.numpy().tolist() for par in self.pmap.get_parameters()
        ]
        self.optim_status["goal"] = goal
        self.optim_status["time"] = time.asctime()
        self.evaluation += 1
        return goal
Ejemplo n.º 6
0
    def goal_run_with_grad(self, current_params):
        """
        Same as goal_run but with gradient. Very resource intensive. Unoptimized at the moment.
        """
        # display.plot_C3([self.logdir])
        exp_values = []
        sim_values = []
        exp_stds = []
        exp_shots = []
        goals = []
        grads = []
        seq_weigths = []
        count = 0
        seqs_pp = self.seqs_per_point

        for target, data in self.learn_data.items():

            self.learn_from = data['seqs_grouped_by_param_set']
            self.gateset_opt_map = data['opt_map']
            indeces = self.select_from_data(self.batch_sizes[target])

            for ipar in indeces:

                count += 1
                m = self.learn_from[ipar]
                gateset_params = m['params']
                gateset_opt_map = self.gateset_opt_map
                m_vals = m['results'][:seqs_pp]
                m_stds = np.array(m['results_std'][:seqs_pp])
                m_shots = m['shots'][:seqs_pp]
                sequences = m['seqs'][:seqs_pp]
                num_seqs = len(sequences)
                if target == 'all':
                    num_seqs = len(sequences) * 3

                with tf.GradientTape() as t:
                    t.watch(current_params)
                    self.exp.set_parameters(current_params, self.opt_map, scaled=True)
                    self.exp.gateset.set_parameters(
                        self.init_gateset_params,
                        self.init_gateset_opt_map,
                        scaled=False
                    )
                    self.exp.gateset.set_parameters(
                        gateset_params, gateset_opt_map, scaled=False
                    )
                    # We find the unique gates used in the sequence and compute
                    # only them.
                    self.exp.opt_gates = list(
                        set(itertools.chain.from_iterable(sequences))
                    )
                    self.exp.get_gates()
                    self.exp.evaluate(sequences)
                    sim_vals = self.exp.process(labels=self.state_labels[target])

                    exp_stds.extend(m_stds)
                    exp_shots.extend(m_shots)

                    if target == 'all':
                        g = neg_loglkh_multinom_norm(
                            m_vals,
                            tf.stack(sim_vals),
                            tf.constant(m_stds, dtype=tf.float64),
                            tf.constant(m_shots, dtype=tf.float64)
                        )
                    else:
                        g = g_LL_prime(
                            m_vals,
                            tf.stack(sim_vals),
                            tf.constant(m_stds, dtype=tf.float64),
                            tf.constant(m_shots, dtype=tf.float64)
                        )

                seq_weigths.append(num_seqs)
                goals.append(g.numpy())
                grads.append(t.gradient(g, current_params).numpy())

                sim_values.extend(sim_vals)
                exp_values.extend(m_vals)

                with open(self.logdir + self.logname, 'a') as logfile:
                    logfile.write(
                        "\n  Parameterset {}, #{} of {}:\n {}\n {}\n".format(
                            ipar + 1,
                            count,
                            len(indeces),
                            json.dumps(self.gateset_opt_map),
                            self.exp.gateset.get_parameters(
                                self.gateset_opt_map, to_str=True
                            ),
                        )
                    )
                    logfile.write(
                        "Sequence    Simulation  Experiment  Std         Shots"
                        "       Diff\n"
                    )

                for iseq in range(len(sequences)):
                    m_val = np.array(m_vals[iseq])
                    m_std = np.array(m_stds[iseq])
                    shots = np.array(m_shots[iseq])
                    sim_val = sim_vals[iseq].numpy()
                    int_len = len(str(num_seqs))
                    with open(self.logdir + self.logname, 'a') as logfile:
                        for ii in range(len(sim_val)):
                            logfile.write(
                                f"{iseq + 1:8}    "
                                f"{float(sim_val[ii]):8.6f}    "
                                f"{float(m_val[ii]):8.6f}    "
                                f"{float(m_std[ii]):8.6f}    "
                                f"{float(shots[0]):8}    "
                                f"{float(m_val[ii]-sim_val[ii]):8.6f}\n"
                            )
                        logfile.flush()


        # exp_values = tf.constant(exp_values, dtype=tf.float64)
        # sim_values =  tf.stack(sim_values)
        # exp_stds = tf.constant(exp_stds, dtype=tf.float64)
        # exp_shots = tf.constant(exp_shots, dtype=tf.float64)

        goal = g_LL_prime_combined(goals, seq_weigths)
        grad = dv_g_LL_prime(goals, grads, seq_weigths)

        with open(self.logdir + self.logname, 'a') as logfile:
            logfile.write("\nFinished batch with ")
            logfile.write("{}: {}\n".format(self.fom.__name__, goal))
            # print("{}: {}".format(self.fom.__name__, goal))
            for cb_fom in self.callback_foms:
                val = float(
                    cb_fom(exp_values, sim_values, exp_stds, exp_shots).numpy()
                )
                logfile.write("{}: {}\n".format(cb_fom.__name__, val))
                # print("{}: {}".format(cb_fom.__name__, val))
            # print("")
            logfile.flush()

        for cb_fig in self.callback_figs:
            fig = cb_fig(exp_values, sim_values.numpy(), exp_stds)
            fig.savefig(
                self.logdir
                + cb_fig.__name__ + '/'
                + 'eval:' + str(self.evaluation) + "__"
                + self.fom.__name__ + str(round(goal, 3))
                + '.png'
            )
            plt.close(fig)

        self.optim_status['params'] = [
            par.numpy().tolist()
            for par in self.exp.get_parameters(self.opt_map)
        ]
        self.optim_status['goal'] = goal
        self.optim_status['gradient'] = list(grad.flatten())
        self.optim_status['time'] = time.asctime()
        self.evaluation += 1
        return goal, grad