Ejemplo n.º 1
0
 def get_cost(self, steps):
     learner = CircuitLearnerTF(hyperparams=self.hyperp)
     learner.train_circuit(X=self.X, Y=self.Y, steps=steps)
     evalu = learner.score_circuit(X=self.X, Y=self.Y)
     cost = evalu['loss']
     return cost
Ejemplo n.º 2
0
 def get_circuit_params(self, steps):
     learner = CircuitLearnerTF(hyperparams=self.hyperp)
     learner.train_circuit(X=self.X, Y=self.Y, steps=steps)
     params = learner.get_circuit_parameters()
     param_value = params['regularized/dummy']
     return param_value
Ejemplo n.º 3
0
class MaxCutSolver():
    """
    The MaxCut solver algorithm.

    The MaxCut problem seeks to find the maximum number of edges on a graph that when cut, each exactly once,
    their total weight is closest to the total weight of the graph edges.
    The problem is known to be NP-complete hence the search for solutions that allow us to solve it
    for larger graphs.
    This class allows embedding the graph as a quantum circuit and finding the maximum cut of the embedded graph.
    For reference, you can also calculate all solutions classically.

    Using this class:
        1) initialize with `solver = MaxCutSolver(learner_params, training_params, matrices, gates_structure, log=log)` where 
        `learner_params` is a dictionary holding parameters that pertains to optimization,
        `training_params` is a dictionary that indicates the steps to be taken by the optimizer and the cutoff dimension of the results,
        `matrices` is a list of the graph adjacency matrix and the interferometer matrix,
        `gates_structures` is a list that contains configuration options (parameters) for various gates and
        `log` is a dictionary of settings for logging.

        2) call `solver.train_and_evaluate_circuit()` to train and evaluate the circuit.

        3) call `solver.assess_all_solutions_clasically()` to get all solutions classically.

    :learner_params: (dict) dictionary of the learner parameters. The expected fields are:
                    - `task`: (str) selected `optimization` to solve the maxcut problem as an optimization problem.
                    - `regularization_strength`: (float) a low value (about 1e-5) for regularization strength.
                    - `optimizer`: (str) the optimizer to use such `SGD` for stochastic gradient descent.
                    - `init_learning_rate`: (float) the initial learning rate of the optimizer such as 0.1.
                    - `log_every`: (float) the logging interval in seconds.
    :training_params: (dict) dictionary of settings that pertain to training. These settings control the amount of
                    resources that will used to solve the problem and are dependent of the graph size. The expected fields are:
                    - `steps`: (int) the number of steps the optimizer is to run for.
                    - `cutoff_dim`: (int) cap on the results of the algorithm.
    :matrices: (list) list containing the adjacency matrix at index `0` and the interferometer matrix at index `1`.
    :gate_structures: (list) list of lists where each such list contains configuration of a gate parameters.
                    An example of such a list is `[Sgate, 0, {"constant": np.random.random() - 0.5, "name": 'squeeze_0', 'regularize': True, 'monitor': True}]`
                    where we seek to configure the squeeze gate.
    """
    def __init__(self,
                 learner_params,
                 training_params,
                 matrices,
                 gates_structure,
                 log=None):
        self.learner_params = learner_params
        self.learner_params["loss"] = self._loss_function
        self.learner_params["_regularizer"] = self._regularizer
        self.training_params = training_params
        self.gates_structure = gates_structure
        self.adj_matrix = matrices[0]
        self.interferometer_matrix = matrices[1]
        self.n_qumodes = self.adj_matrix.shape[0]
        self.cost_array = self._prepare_cost_array()
        self.learner = None

        if log is None:
            self.log = {}
        else:
            self.log = log

    def train_and_evaluate_circuit(self):
        """
        Training and evalutation of the circuit.

        %TODO: Give an explanation of how this training and evaluation happens for users.
        """
        self.learner_params['circuit'] = self._create_circuit_evaluator
        self.learner = CircuitLearner(hyperparams=self.learner_params)
        self.learner.train_circuit(steps=self.training_params["steps"],
                                   tensors_to_log=self.log)

        final_params = self.learner.get_circuit_parameters()

        for name, value in final_params.items():
            if "Variable" not in name:
                print("Parameter {} has the final value {}.".format(
                    name, value))

        for gate in self.gates_structure:
            gate_name = gate[2]["name"]
            for param_name in final_params:
                if gate_name in param_name:
                    final_value = final_params[param_name]
                    gate[2]["constant"] = final_value
                    break

        all_results = []
        circuit_output = self._get_circuit_output()
        cost_tensor = self._loss_function(circuit_output)
        init = tf.global_variables_initializer()
        with tf.Session() as sess:
            sess.run(init)
            circuit_output = sess.run(circuit_output)
            cost_value = sess.run(cost_tensor)

        print("Total cost:", cost_value)
        return cost_value

    def assess_all_solutions_clasically(self):
        """
        Training and evalutation of the circuit.

        %TODO: Give an explanation of how this assessment happens for users.
        """
        all_possible_solutions = list(
            itertools.product([0, 1], repeat=len(self.adj_matrix)))
        for solution in all_possible_solutions:
            print(solution, self._calculate_cost_once(solution))

    def _create_circuit_evaluator(self):
        return self._get_circuit_output()

    def _build_circuit(self):
        params_counter = 0
        sgates = []
        dgates = []
        kgates = []
        for gate_structure in self.gates_structure:
            if gate_structure[0] is Sgate:
                sgates.append(
                    ParametrizedGate(gate_structure[0], gate_structure[1],
                                     [make_param(**gate_structure[2])]))
            if gate_structure[0] is Dgate:
                dgates.append(
                    ParametrizedGate(gate_structure[0], gate_structure[1],
                                     [make_param(**gate_structure[2])]))
            if gate_structure[0] is Kgate:
                kgates.append(
                    ParametrizedGate(gate_structure[0], gate_structure[1],
                                     [make_param(**gate_structure[2])]))

        eng, q = sf.Engine(self.n_qumodes)

        rl, U = takagi(self.adj_matrix)
        initial_squeezings = np.tanh(rl)

        with eng:
            for i, squeeze_value in enumerate(initial_squeezings):
                Sgate(squeeze_value) | i

            Interferometer(U) | q

            for gate in sgates:
                gate.gate(gate.params[0]) | gate.qumodes

            Interferometer(self.interferometer_matrix) | q

            for gate in dgates:
                gate.gate(gate.params[0]) | gate.qumodes

            Interferometer(self.interferometer_matrix) | q

            for gate in kgates:
                gate.gate(gate.params[0]) | gate.qumodes

        circuit = {}
        circuit["eng"] = eng
        circuit["q"] = q

        return circuit

    def _get_circuit_output(self, test=False):
        circuit = self._build_circuit()
        eng = circuit['eng']
        encoding = []
        state = eng.run('tf',
                        cutoff_dim=self.training_params["cutoff_dim"],
                        eval=False)
        all_probs = state.all_fock_probs()
        circuit_output = all_probs
        trace = tf.identity(state.trace(), name="trace")

        if test:
            init = tf.global_variables_initializer()
            with tf.Session() as sess:
                sess.run(init)
                all_probs_num = sess.run(all_probs)
            pdb.set_trace()

        return circuit_output

    def _loss_function(self, circuit_output):
        cost_tensor = tf.constant(self.cost_array,
                                  dtype=tf.float32,
                                  name="cost_tensor")
        weighted_cost_tensor = tf.multiply(cost_tensor, circuit_output)
        result = tf.reduce_sum(weighted_cost_tensor)
        result = tf.multiply(result, tf.constant(-1.0))
        return result

    def _regularizer(self, regularized_params):
        return tf.nn.l2_loss(regularized_params)

    def _calculate_cost_once(self, encoding):
        cost_value = 0
        for i in range(len(encoding)):
            for j in range(len(encoding)):
                cost_value += 0.5 * self.adj_matrix[i][j] * (encoding[i] -
                                                             encoding[j])**2
        return cost_value

    def _prepare_cost_array(self):
        cutoff = self.training_params["cutoff_dim"]
        cost_array = np.zeros([cutoff] * self.n_qumodes)
        for indices in np.ndindex(cost_array.shape):
            cost_array[indices] = self._calculate_cost_once(
                np.clip(indices, 0, 1))
        return cost_array
Ejemplo n.º 4
0
class MaxCutSolver():
    """This method allows to embed graphs as """
    def __init__(self, learner_params, training_params, matrices, gates_structure, log=None):
        self.learner_params = learner_params
        self.learner_params['loss'] = self.loss_function
        self.learner_params['regularizer'] = self.regularizer
        self.training_params = training_params
        self.gates_structure = gates_structure
        self.adj_matrix = matrices[0]
        self.interferometer_matrix = matrices[1]
        self.n_qumodes = self.adj_matrix.shape[0]
        self.cost_array = self.prepare_cost_array()

        self.learner_params['circuit'] = self.create_circuit_evaluator
        self.learner = CircuitLearner(hyperparams=self.learner_params, model_dir=training_params['model_dir'])
        self.final_params = None

        if log is None:
            self.log = {}
        else:
            self.log = log

    def train_and_evaluate_circuit(self, verbose=False):
        self.learner.train_circuit(steps=self.training_params['steps'], tensors_to_log=self.log)
        final_params = self.learner.get_circuit_parameters()
        
        if verbose:
            for name, value in final_params.items():
                print("Parameter {} has the final value {}.".format(name, value))

        for gate in self.gates_structure:
            gate_name = gate[2]['name']
            for param_name in final_params:
                if gate_name in param_name:
                    final_value = final_params[param_name]
                    gate[2]['constant'] = final_value
                    break

        self.final_params = final_params
        all_results = []
        circuit_output = self.get_circuit_output()
        cost_tensor = self.loss_function(circuit_output)
        init = tf.global_variables_initializer()
        with tf.Session() as sess:
            sess.run(init)
            circuit_output = sess.run(circuit_output)
            cost_value = sess.run(cost_tensor)

        if verbose:
            print("Total cost:", cost_value)
        return cost_value, circuit_output
 
    def create_circuit_evaluator(self):
        return self.get_circuit_output()

    def build_circuit(self):
        params_counter = 0
        sgates = []
        dgates = []
        kgates = []
        for gate_structure in self.gates_structure:
            if gate_structure[0] is Sgate:
                sgates.append(ParametrizedGate(gate_structure[0], gate_structure[1], [make_param(**gate_structure[2]), make_param(**gate_structure[3])]))
            if gate_structure[0] is Dgate:
                dgates.append(ParametrizedGate(gate_structure[0], gate_structure[1], [make_param(**gate_structure[2]), make_param(**gate_structure[3])]))
            if gate_structure[0] is Kgate:
                kgates.append(ParametrizedGate(gate_structure[0], gate_structure[1], [make_param(**gate_structure[2])]))

        eng, q = sf.Engine(self.n_qumodes)

        rl, U = takagi(self.adj_matrix)
        initial_squeezings = np.tanh(rl)

        with eng:
            for i ,squeeze_value in enumerate(initial_squeezings):
                Sgate(squeeze_value) | i

            Interferometer(U) | q

            for gate in sgates:
                gate.gate(gate.params[0], gate.params[1]) | gate.qumodes

            Interferometer(self.interferometer_matrix) | q

            for gate in dgates:
                gate.gate(gate.params[0], gate.params[1]) | gate.qumodes

            Interferometer(self.interferometer_matrix) | q

            for gate in kgates:
                gate.gate(gate.params[0]) | gate.qumodes

        circuit = {}
        circuit['eng'] = eng
        circuit['q'] = q

        return circuit

    def get_circuit_output(self, test=False):
        circuit = self.build_circuit()
        eng = circuit['eng']
        encoding = []
        state = eng.run('tf', cutoff_dim=self.training_params['cutoff_dim'], eval=False)
        all_probs = state.all_fock_probs()
        circuit_output = all_probs
        trace = tf.identity(tf.abs(state.trace()), name='trace')
        tf.summary.scalar(name='trace', tensor=trace)
        
        if test:
            init = tf.global_variables_initializer()
            with tf.Session() as sess:
                sess.run(init)
                all_probs_num = sess.run(all_probs)
            pdb.set_trace()

        return circuit_output

    def loss_function(self, circuit_output, use_reduced_probs=True):
        if use_reduced_probs:
            cost_array = self.cost_array[[slice(2)]*self.n_qumodes]
            cost_tensor = tf.constant(cost_array, dtype=tf.float32, name='cost_tensor')
            circuit_output = tf.slice(circuit_output, [0]*self.n_qumodes, [2]*self.n_qumodes)
        else:
            cost_tensor = tf.constant(self.cost_array, dtype=tf.float32, name='cost_tensor')
        weighted_cost_tensor = tf.multiply(cost_tensor, circuit_output)
        total_probability = tf.reduce_sum(circuit_output)
        result = tf.reduce_sum(weighted_cost_tensor)
        result = tf.divide(result, total_probability)
        result = tf.multiply(result, tf.constant(-1.0))
        return result

    def regularizer(self, regularized_params):
        return tf.nn.l2_loss(regularized_params)

    def calculate_cost_once(self, encoding):
        cost_value = 0
        for i in range(len(encoding)):
            for j in range(len(encoding)):
                cost_value += 0.5 * self.adj_matrix[i][j] * (encoding[i] - encoding[j])**2
        return cost_value

    def assess_all_solutions_clasically(self):
        all_possible_solutions = list(itertools.product([0, 1], repeat=len(self.adj_matrix)))
        for solution in all_possible_solutions:
            print(solution, self.calculate_cost_once(solution))

    def prepare_cost_array(self):
        cutoff = self.training_params['cutoff_dim']
        cost_array = np.zeros([cutoff] * self.n_qumodes)
        for indices in np.ndindex(cost_array.shape):
            cost_array[indices] = self.calculate_cost_once(np.clip(indices,0,1))
        return cost_array
Ejemplo n.º 5
0
hyperparams = {
    'circuit': circuit,
    'task': 'optimization',
    'loss': myloss,
    'regularizer': myregularizer,
    'regularization_strength': 0.5,
    'optimizer': 'SGD',
    'init_learning_rate': 0.1
}

learner = CircuitLearner(hyperparams=hyperparams)

# Define the tensors we want displayed in the training log that gets printed,
# and a name to display it.
log = {'Prob': 'prob', 'Trace': 'trace'}

learner.train_circuit(steps=50, tensors_to_log=log)

# Print out the final parameters
final_params = learner.get_circuit_parameters()
# final_params is a dictionary
for name, value in final_params.items():
    print("Parameter {} has the final value {}.".format(name, value))

# To monitor the training, install tensorboard, navigate with a terminal to the directory that contains
# the newly created folder "logAUTO" and run "tensorboard --logdir=logAUTO". This should return a link
# which can be opened in a browser.

# You can track the changes in the variable alpha. Tensorboard gets live updated if you rerun this script.
# Play around with the 'regularization_strength' and see how a large value forces alpha to zero.
Ejemplo n.º 6
0
# that the global step starts where it ended the last time you ran the script.
hyperparams = {
    'circuit': circuit,
    'task': 'supervised',
    'loss': myloss,
    'optimizer': 'SGD',
    'init_learning_rate': 0.5,
    'decay': 0.01,
    'print_log': True,
    'log_every': 10,
    'warm_start': False
}

learner = CircuitLearner(hyperparams=hyperparams)

learner.train_circuit(X=X_train, Y=Y_train, steps=steps, batch_size=batch_size)

test_score = learner.score_circuit(
    X=X_test, Y=Y_test, outputs_to_predictions=outputs_to_predictions)
# The score_circuit() function returns a dictionary of different metrics.
print("\nPossible scores to print: {}".format(list(test_score.keys())))
# We select the accuracy and loss.
print("Accuracy on test set: ", test_score['accuracy'])
print("Loss on test set: ", test_score['loss'])

outcomes = learner.run_circuit(X=X_pred,
                               outputs_to_predictions=outputs_to_predictions)
# The run_circuit() function returns a dictionary of different outcomes.
print("\nPossible outcomes to print: {}".format(list(outcomes.keys())))
# We select the predictions
print("Predictions for new inputs: {}".format(outcomes['predictions']))
Ejemplo n.º 7
0
    return tf.nn.l2_loss(regularized_params)


X_train = np.array([[0, 1], [0, 2], [0, 3], [0, 4]])

hyperparams = {
    'circuit': circuit,
    'task': 'unsupervised',
    'optimizer': 'SGD',
    'init_learning_rate': 0.1,
    'loss': myloss,
    'regularizer': myregularizer,
    'regularization_strength': 0.1
}

learner = CircuitLearner(hyperparams=hyperparams)

learner.train_circuit(X=X_train, steps=steps)

outcomes = learner.run_circuit()
final_distribution = outcomes['outputs']

# Use a helper function to sample fock states from this state.
# They should show a similar distribution to the training data
for i in range(10):
    sample = sample_from_distribution(distribution=final_distribution)
    print("Fock state sample {}:{} \n".format(i, sample))

# Print out the final circuit parameters
learner.get_circuit_parameters(only_print=True)
Ejemplo n.º 8
0
Y_test = [1, 0]
X_pred = [[0.4, 0.5], [0.5, 0.4]]


def outputs_to_predictions(circuit_output):
    return tf.round(circuit_output)


hyperparams = {
    'circuit': circuit,
    'task': 'supervised',
    'loss': myloss,
    'optimizer': 'SGD',
    'init_learning_rate': 0.5
}

learner = CircuitLearner(hyperparams=hyperparams)

learner.train_circuit(X=X_train, Y=Y_train, steps=100)

test_score = learner.score_circuit(
    X=X_test, Y=Y_test, outputs_to_predictions=outputs_to_predictions)
print("\nPossible scores to print: {}".format(list(test_score.keys())))
print("Accuracy on test set: ", test_score['accuracy'])
print("Loss on test set: ", test_score['loss'])

outcomes = learner.run_circuit(X=X_pred,
                               outputs_to_predictions=outputs_to_predictions)

print("\nPossible outcomes to print: {}".format(list(outcomes.keys())))
print("Predictions for new inputs: {}".format(outcomes['predictions']))
Ejemplo n.º 9
0
    state = eng.run('tf', cutoff_dim=7, eval=False)

    # As the output we take the probability of measuring one photon in the mode
    prob = state.fock_prob([1])
    circuit_output = tf.identity(prob, name="prob")
    return circuit_output


# Define a loss function on the outputs of circuit().
# We use the negative probability of measuring |1>
# so that minimization increases the probability.
def myloss(circuit_output):
    return -circuit_output


# Set the hyperparameters of the model and the training algorithm
hyperparams = {
    'circuit': circuit,
    'task': 'optimization',
    'optimizer': 'SGD',
    'init_learning_rate': 0.1,
    'loss': myloss
}

# Create the learner
learner = CircuitLearner(hyperparams=hyperparams)

# Train the learner
learner.train_circuit(steps=50)
Ejemplo n.º 10
0
X_test = df_test[0:10, 0:4]
Y_test = df_test[0:10, 4]

hyperparams = {
    'circuit': circuit,
    'task': 'supervised',
    'loss': myloss,
    'optimizer': 'SGD',
    # 'regularizer': myregularizer,
    # 'regularization_strength': 0.1,
    'init_learning_rate': 0.1
}

learner = CircuitLearner(hyperparams=hyperparams)

num_train_inputs = X_train.shape[0]

learner.train_circuit(X=X_train, Y=Y_train, steps=50, batch_size=10)

test_score = learner.score_circuit(
    X=X_test, Y=Y_test, outputs_to_predictions=outputs_to_predictions)
print("\nPossible scores to print: {}".format(list(test_score.keys())))
print("Accuracy on test set: ", test_score['accuracy'])
print("Loss on test set: ", test_score['loss'])

outcomes = learner.run_circuit(X=X_test,
                               outputs_to_predictions=outputs_to_predictions)

print("\nPossible outcomes to print: {}".format(list(outcomes.keys())))
print("Predictions for new inputs: {}".format(outcomes['predictions']))
print("Real outputs for new inputs: {}".format(Y_test))
Ejemplo n.º 11
0
class MaxCutSolver():
    """This method allows to embed graphs as """
    def __init__(self,
                 learner_params,
                 training_params,
                 adj_matrices,
                 gates_structure,
                 log=None):
        self.learner_params = learner_params
        self.learner_params['loss'] = self.loss_function
        self.learner_params['regularizer'] = self.regularizer
        self.training_params = training_params
        self.gates_structure = gates_structure
        self.adj_matrices = adj_matrices

        interferometer_matrix = \
        np.array(
            [[1, -1, 1, -1],
            [1, 1, 1, 1],
            [-1, -1, 1, 1],
            [1, -1, -1, 1]
            ]) / 2
        self.interferometer_matrix = interferometer_matrix

        self.n_qumodes = self.adj_matrices[0].shape[0]

        self.learner_params['circuit'] = self.create_circuit_evaluator
        self.learner = CircuitLearner(adj_matrices=self.adj_matrices,
                                      hyperparams=self.learner_params,
                                      model_dir=training_params['model_dir'])
        self.final_params = None

        if log is None:
            self.log = {}
        else:
            self.log = log

    def train_and_evaluate_circuit(self, verbose=True):
        self.learner.train_circuit(steps=self.training_params['steps'],
                                   tensors_to_log=self.log)
        final_params = self.learner.get_circuit_parameters()

        if verbose:
            for name, value in final_params.items():
                print("Parameter {} has the final value {}.".format(
                    name, value))

        for gate in self.gates_structure:
            for gate_element_id in range(len(gate)):
                if gate_element_id < 2:
                    continue
                gate_name = gate[gate_element_id]['name']
                for param_name in final_params:
                    if gate_name in param_name:
                        final_value = final_params[param_name]
                        gate[gate_element_id]['constant'] = final_value
                        break

        self.final_params = final_params
        all_results = []
        circuit_outputs = []
        cost_values = []
        for adj_matrix in self.adj_matrices:
            circuit_output = self.get_circuit_output(adj_matrix)

            cost_tensor = self.loss_function([circuit_output], [adj_matrix])
            init = tf.global_variables_initializer()
            with tf.Session() as sess:
                sess.run(init)
                circuit_output = sess.run(circuit_output)
                cost_value = sess.run(cost_tensor)
            circuit_outputs.append(circuit_output)
            cost_values.append(cost_value)
            if verbose:
                print("Total cost:", cost_value)
        return cost_values, circuit_outputs

    def create_circuit_evaluator(self, adj_matrix):
        return self.get_circuit_output(adj_matrix)

    def build_circuit(self, adj_matrix):
        params_counter = 0
        number_of_layers = 2
        all_sgates = [[]] * number_of_layers
        all_dgates = [[]] * number_of_layers
        all_kgates = [[]] * number_of_layers
        all_vgates = [[]] * number_of_layers

        for gate_structure in self.gates_structure:
            current_layer = int(gate_structure[2]['name'].split('_')[-1][0])
            if gate_structure[0] is Sgate:
                current_gate = ParametrizedGate(
                    gate_structure[0], gate_structure[1], [
                        make_param(**gate_structure[2]),
                        make_param(**gate_structure[3])
                    ])
                all_sgates[current_layer].append(current_gate)
            if gate_structure[0] is Dgate:
                current_gate = ParametrizedGate(
                    gate_structure[0], gate_structure[1], [
                        make_param(**gate_structure[2]),
                        make_param(**gate_structure[3])
                    ])
                all_dgates[current_layer].append(current_gate)
            if gate_structure[0] is Kgate:
                current_gate = ParametrizedGate(
                    gate_structure[0], gate_structure[1],
                    [make_param(**gate_structure[2])])
                all_kgates[current_layer].append(current_gate)
            if gate_structure[0] is Vgate:
                current_gate = ParametrizedGate(
                    gate_structure[0], gate_structure[1],
                    [make_param(**gate_structure[2])])
                all_vgates[current_layer].append(current_gate)

        eng, q = sf.Engine(self.n_qumodes)
        rl, U = takagi(adj_matrix)
        initial_squeezings = np.arctanh(rl)

        with eng:
            for i, squeeze_value in enumerate(initial_squeezings):
                Sgate(squeeze_value) | i

            Interferometer(U) | q
            for layer in range(number_of_layers):
                sgates = all_sgates[layer]
                dgates = all_dgates[layer]
                kgates = all_kgates[layer]
                vgates = all_vgates[layer]

                if len(sgates) != 0:
                    Interferometer(self.interferometer_matrix) | q
                    for gate in sgates:
                        gate.gate(gate.params[0],
                                  gate.params[1]) | gate.qumodes

                if len(dgates) != 0:
                    Interferometer(self.interferometer_matrix) | q
                    for gate in dgates:
                        gate.gate(gate.params[0],
                                  gate.params[1]) | gate.qumodes

                for gate in kgates:
                    gate.gate(gate.params[0]) | gate.qumodes

                for gate in vgates:
                    gate.gate(gate.params[0]) | gate.qumodes

        circuit = {}
        circuit['eng'] = eng
        circuit['q'] = q

        return circuit

    def get_circuit_output(self, adj_matrix, test=False):
        circuit = self.build_circuit(adj_matrix)
        eng = circuit['eng']
        encoding = []
        state = eng.run('tf',
                        cutoff_dim=self.training_params['cutoff_dim'],
                        eval=False)
        all_probs = state.all_fock_probs()
        circuit_output = all_probs
        trace = tf.identity(tf.abs(state.trace()), name='trace')
        tf.summary.scalar(name='trace', tensor=trace)

        if test:
            init = tf.global_variables_initializer()
            with tf.Session() as sess:
                sess.run(init)
                all_probs_num = sess.run(all_probs)
            pdb.set_trace()

        return circuit_output

    def loss_function(self, circuit_outputs, adj_matrices):
        result = tf.constant(0, dtype=tf.float32)
        for circuit_output, adj_matrix in zip(circuit_outputs, adj_matrices):
            result = tf.add(
                result, self.single_loss_function(circuit_output, adj_matrix))
        return result

    def single_loss_function(self, circuit_output, adj_matrix):
        cost_tensor = tf.constant(self.prepare_cost_array(adj_matrix),
                                  dtype=tf.float32,
                                  name='cost_tensor')
        weighted_cost_tensor = tf.multiply(cost_tensor, circuit_output)
        result = tf.reduce_sum(weighted_cost_tensor)
        result = tf.multiply(result, tf.constant(-1.0))
        return result

    def regularizer(self, regularized_params):
        return tf.nn.l2_loss(regularized_params)

    def prepare_cost_array(self, adj_matrix):
        cutoff = self.training_params['cutoff_dim']
        cost_array = np.zeros([cutoff] * self.n_qumodes)
        for indices in np.ndindex(cost_array.shape):
            cost_array[indices] = calculate_cost_once(np.clip(indices, 0, 1),
                                                      adj_matrix)
        return cost_array