Beispiel #1
0
def refinement(target,
               num_qubits,
               gate_size,
               fun_vals,
               loc_fixed,
               refinement_distance=1e-7,
               learning_rate=1e-6):
    """
    Refines synthesized circuit to better implement the target unitary.
    This is achieved by using fixed circuit structure (gate location)
    and a more fine-grained optimizer.

    Args:
        target (np.ndarray): Target unitary

        num_qubits (int): The target unitary's number of qubits

        gate_size (int): number of active qubits in a gate

        fun_vals (List[List[float]]): Gate function values

        loc_fixed (List[Tuple[int]]): Gate locations

        refinement_distance (float): Refinement's goal distance

        learning_rate (float): Learning rate of the optimizer

    Returns:
        (List[List[float]]): Refined gate function values
    """
    #    #can read the arguments like this. python3 synthesize_qft4_refinement.py -n 1
    #    print("All arguments")
    #    print(sys.argv) # synthesize_qft4_refinement.py -n 1
    #    print("-n arguments")
    #    print(sys.argv[-1]) # 1
    ##    n_cpus = sys.argv[-1];

    tf.reset_default_graph()
    reset_tensor_cache()

    with tf.device("/job:localhost/replica:0/task:0/device:CPU:0"):
        layers = FixedGate("Gate%d" % 0,
                           num_qubits,
                           gate_size,
                           loc=loc_fixed[0],
                           fun_vals=fun_vals[0])
        tensor = layers.get_tensor()

    for i in range(1, len(fun_vals)):
        with tf.device("/job:localhost/replica:0/task:%d/device:CPU:0" %
                       (i % n_cpus)):
            layers = FixedGate("Gate%d" % i,
                               num_qubits,
                               gate_size,
                               loc=loc_fixed[i],
                               fun_vals=fun_vals[i])
            tensor = tf.matmul(layers.get_tensor(), tensor)
    """
    layers = [ FixedGate( "Gate%d" % i, num_qubits, gate_size,
                          loc = loc_fixed[i], fun_vals = fun_vals[i] )
               for i in range( len( fun_vals ) ) ]

    tensor = layers[0].get_tensor()
    
    for layer in layers[1:]:
        tensor = tf.matmul( layer.get_tensor(), tensor )
    """
    with tf.device(tf.train.replica_device_setter(ps_tasks=n_cpus)):
        loss_fn = hilbert_schmidt_distance(target, tensor)
        optimizer = tf.train.AdamOptimizer(learning_rate)
        train_op = optimizer.minimize(loss_fn)
        init_op = tf.global_variables_initializer()

    loss_values = []

    run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
    run_metadata = tf.RunMetadata()

    with tf.Session(config=tf.ConfigProto(
            device_count={"CPU": n_cpus},
            inter_op_parallelism_threads=n_cpus,
            intra_op_parallelism_threads=1,
    )) as sess:
        #writer = tf.summary.FileWriter('./graphs', sess.graph) #visualize tensorflow graph
        sess.run(init_op, options=run_options, run_metadata=run_metadata)
        loss = sess.run(loss_fn,
                        options=run_options,
                        run_metadata=run_metadata)
        logger.info("Starting refinement at %f distance." % loss)

        for i in range(10000):
            for j in range(50):
                loss = sess.run([train_op, loss_fn],
                                options=run_options,
                                run_metadata=run_metadata)[1]

            if loss < refinement_distance:
                break

            loss_values.append(loss)
            logger.log(0, loss)

            if len(loss_values) > 100:
                min_value = np.min(loss_values[-100:])
                max_value = np.max(loss_values[-100:])

                # Plateau Detected
                if max_value - min_value < 1e-5:
                    break

            if len(loss_values) > 500:
                min_value = np.min(loss_values[-500:])
                max_value = np.max(loss_values[-500:])

                # Plateau Detected
                if max_value - min_value < 1e-3:
                    break

        logger.info("Ending refinement at %f distance." % loss)

        for device in run_metadata.step_stats.dev_stats:
            device_name = device.device
            print(device.device)
            for node in device.node_stats:
                print("   ", node.node_name)

        return [l.get_fun_vals(sess) for l in layers]
Beispiel #2
0
def fixed_depth_exploration(target,
                            num_qubits,
                            gate_size,
                            fun_vals,
                            loc_vals,
                            lm,
                            exploration_distance=0.01,
                            learning_rate=0.01):
    """
    Attempts to synthesize the target unitary with a fixed number
    of gates of size gate_size.

    Args:
        target (np.ndarray): Target unitary

        num_qubits (int): The target unitary's number of qubits

        gate_size (int): number of active qubits in a gate

        fun_vals (List[List[float]]): Gate function values

        loc_vals (List[List[float]]): Gate location values

        lm (LocationModel): The model that maps loc_vals to locations

        exploration_distance (float): Exploration's goal distance

        learning_rate (float): Learning rate of the optimizer

    Returns:
        (Tuple[bool, List[List[float]], List[List[float]]]):
            True if succeeded in hitting exploration distance
            and the final fun_vals and loc_vals
    """

    tf.reset_default_graph()
    reset_tensor_cache()

    layers = [
        GenericGate("Gate%d" % i,
                    num_qubits,
                    gate_size,
                    lm,
                    fun_vals[i],
                    loc_vals[i],
                    parity=(i + 1) % lm.num_buckets)
        for i in range(len(fun_vals))
    ]

    tensor = layers[0].get_tensor()
    for layer in layers[1:]:
        tensor = tf.matmul(layer.get_tensor(), tensor)

    loss_fn = hilbert_schmidt_distance(target, tensor)
    optimizer = tf.train.AdamOptimizer(learning_rate)
    train_op = optimizer.minimize(loss_fn)
    init_op = tf.global_variables_initializer()

    loss_values = []

    with tf.Session() as sess:
        sess.run(init_op)

        while (True):
            for i in range(20):
                loss = sess.run([train_op, loss_fn])[1]

            if loss < exploration_distance:
                logger.info("Ending exploration at %f distance." % loss)
                return (True, [l.get_fun_vals(sess) for l in layers],
                        [l.get_loc_vals(sess) for l in layers])

            loss_values.append(loss)
            logger.debug("Loss: %f" % loss)

            if len(loss_values) > 100:
                min_value = np.min(loss_values[-100:])
                max_value = np.max(loss_values[-100:])

                # Plateau Detected
                if max_value - min_value < learning_rate / 10:
                    logger.debug("Ending exploration at %f distance." % loss)
                    return (False, [l.get_fun_vals(sess) for l in layers],
                            [l.get_loc_vals(sess) for l in layers])

            if len(loss_values) > 500:
                min_value = np.min(loss_values[-500:])
                max_value = np.max(loss_values[-500:])

                # Plateau Detected
                if max_value - min_value < learning_rate:
                    logger.debug("Ending exploration at %f distance." % loss)
                    return (False, [l.get_fun_vals(sess) for l in layers],
                            [l.get_loc_vals(sess) for l in layers])
def refinement(target,
               num_qubits,
               gate_size,
               fun_vals,
               loc_fixed,
               refinement_distance=1e-7,
               learning_rate=1e-6):
    """
    Refines synthesized circuit to better implement the target unitary.
    This is achieved by using fixed circuit structure (gate location)
    and a more fine-grained optimizer.

    Args:
        target (np.ndarray): Target unitary

        num_qubits (int): The target unitary's number of qubits

        gate_size (int): number of active qubits in a gate

        fun_vals (List[List[float]]): Gate function values

        loc_fixed (List[Tuple[int]]): Gate locations

        refinement_distance (float): Refinement's goal distance

        learning_rate (float): Learning rate of the optimizer

    Returns:
        (List[List[float]]): Refined gate function values
    """

    tf.reset_default_graph()
    reset_tensor_cache()

    layers = [
        FixedGate("Gate%d" % i,
                  num_qubits,
                  gate_size,
                  loc=loc_fixed[i],
                  fun_vals=fun_vals[i]) for i in range(len(fun_vals))
    ]

    tensor = layers[0].get_tensor()
    for layer in layers[1:]:
        tensor = tf.matmul(layer.get_tensor(), tensor)

    loss_fn = hilbert_schmidt_distance(target, tensor)
    optimizer = tf.train.AdamOptimizer(learning_rate)
    train_op = optimizer.minimize(loss_fn)
    init_op = tf.global_variables_initializer()

    loss_values = []

    with tf.Session() as sess:
        sess.run(init_op)
        loss = sess.run(loss_fn)
        logger.info("Starting refinement at %f distance." % loss)

        for i in range(10000):
            for j in range(50):
                loss = sess.run([train_op, loss_fn])[1]

            if loss < refinement_distance:
                break

            loss_values.append(loss)
            logger.log(0, loss)

            if len(loss_values) > 100:
                min_value = np.min(loss_values[-100:])
                max_value = np.max(loss_values[-100:])

                # Plateau Detected
                if max_value - min_value < 1e-5:
                    break

            if len(loss_values) > 500:
                min_value = np.min(loss_values[-500:])
                max_value = np.max(loss_values[-500:])

                # Plateau Detected
                if max_value - min_value < 1e-3:
                    break

        logger.info("Ending refinement at %f distance." % loss)

        return [l.get_fun_vals(sess) for l in layers]
Beispiel #4
0
def refinement ( target, num_qubits, gate_size, fun_vals, loc_fixed,
                 refinement_distance = 1e-7, learning_rate = 1e-6 ):
    """
    Refines synthesized circuit to better implement the target unitary.
    This is achieved by using fixed circuit structure (gate location)
    and a more fine-grained optimizer.

    Args:
        target (np.ndarray): Target unitary

        num_qubits (int): The target unitary's number of qubits

        gate_size (int): number of active qubits in a gate

        fun_vals (List[List[float]]): Gate function values

        loc_fixed (List[Tuple[int]]): Gate locations

        refinement_distance (float): Refinement's goal distance

        learning_rate (float): Learning rate of the optimizer

    Returns:
        (List[List[float]]): Refined gate function values
    """
#    #can read the arguments like this. python3 synthesize_qft4_refinement.py -n 1
#    print("All arguments")
#    print(sys.argv) # synthesize_qft4_refinement.py -n 1
#    print("-n arguments")
#    print(sys.argv[-1]) # 1
##    n_cpus = sys.argv[-1];

    if task_index != 0:
        #1 set up server Done in front
        #2 set up session_config
        logger.info("set up ps session_config task index %f TODO:" %task_index)
        sess = tf.Session(server.target,
            config=tf.ConfigProto(
            device_count={ "CPU": n_cpus },
            inter_op_parallelism_threads=n_cpus,
            intra_op_parallelism_threads=1,
        ))
        logger.info("set up ps session_config task index %f Done:" %task_index)
        '''#3 initialize variable on this shard
        tf.reset_default_graph()
        reset_tensor_cache()
        layers = []
        
        
        with tf.device("/job:localhost/replica:0/task:0/device:CPU:0"):
            layers.append(FixedGate( "Gate%d" % 0, num_qubits, gate_size, loc = loc_fixed[0], fun_vals = fun_vals[0] ))
            tensor = layers[0].get_tensor()
       
        each_num = np.ceil(len(fun_vals)/n_cpus)
        for i in range(1, len(fun_vals)):
            with tf.device("/job:localhost/replica:0/task:%d/device:CPU:0" % (i // each_num)):
                layers.append( FixedGate( "Gate%d" % i, num_qubits, gate_size, loc = loc_fixed[i], fun_vals = fun_vals[i] ))
                tensor = tf.matmul(layers[-1].get_tensor(), tensor)

       # with tf.device(tf.train.replica_device_setter(ps_tasks = n_cpus)):
        #with tf.device("/job:localhost/replica:0/task:0/device:CPU:0"):
        with tf.device(tf.train.replica_device_setter(worker_device="/job:localhost/replica:0/task:%d/device:CPU:0" % task_index, cluster=cluster)):
            loss_fn   = hilbert_schmidt_distance( target, tensor )
            optimizer = tf.train.AdamOptimizer( learning_rate )
            train_op  = optimizer.minimize( loss_fn )
            init_op   = tf.global_variables_initializer()

        loss_values = []
        #4 sess.run(initizlizer)
        with tf.Session(server.target, config=tf.ConfigProto( device_count={ "CPU": n_cpus }, inter_op_parallelism_threads=n_cpus, intra_op_parallelism_threads=1,)) as sess:
            logger.info("server.target:%s" %server.target)
            sess.run( init_op )
            loss = sess.run( loss_fn )'''
        #5 create done queue
        queue = create_done_queue(task_index)
        
        #6 dequeu all worker
        sess.run(queue.dequeue())
        logger.info("ps %d received done chief" %task_index)
     

    else:
        logger.info("task index %f inside refinement:" %task_index)
        tf.reset_default_graph()
        reset_tensor_cache()
        layers = []
        
        
        with tf.device("/job:localhost/replica:0/task:0/device:CPU:0"):
            layers.append(FixedGate( "Gate%d" % 0, num_qubits, gate_size, loc = loc_fixed[0], fun_vals = fun_vals[0] ))
            tensor = layers[0].get_tensor()
       
        each_num = np.ceil(len(fun_vals)/n_cpus)
        for i in range(1, len(fun_vals)):
            with tf.device("/job:localhost/replica:0/task:%d/device:CPU:0" % (i // each_num)):
                layers.append( FixedGate( "Gate%d" % i, num_qubits, gate_size, loc = loc_fixed[i], fun_vals = fun_vals[i] ))
                tensor = tf.matmul(layers[-1].get_tensor(), tensor)
        
        """
        layers = [ FixedGate( "Gate%d" % i, num_qubits, gate_size,
                              loc = loc_fixed[i], fun_vals = fun_vals[i] )
                   for i in range( len( fun_vals ) ) ]

        tensor = layers[0].get_tensor()
        
        for layer in layers[1:]:
            tensor = tf.matmul( layer.get_tensor(), tensor )
        """

       # with tf.device(tf.train.replica_device_setter(ps_tasks = n_cpus)):
        #with tf.device("/job:localhost/replica:0/task:0/device:CPU:0"):
        with tf.device(tf.train.replica_device_setter(worker_device="/job:localhost/replica:0/task:%d/device:CPU:0" % task_index, cluster=cluster)):
            loss_fn   = hilbert_schmidt_distance( target, tensor )
            optimizer = tf.train.AdamOptimizer( learning_rate )
            train_op  = optimizer.minimize( loss_fn )
            init_op   = tf.global_variables_initializer()

        loss_values = []
        
       # run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
       # run_metadata = tf.RunMetadata()
        
        with tf.Session(server.target,
            config=tf.ConfigProto(
            device_count={ "CPU": n_cpus },
            inter_op_parallelism_threads=n_cpus,
            intra_op_parallelism_threads=1,
        )) as sess:
        #with tf.Session() as sess:
            #writer = tf.summary.FileWriter('./graphs', sess.graph) #visualize tensorflow graph
            #sess.run( init_op, options=run_options, run_metadata=run_metadata )
            #loss = sess.run( loss_fn, options=run_options, run_metadata=run_metadata )
            
            logger.info("server.target:%s" %server.target)
            sess.run( init_op )
            loss = sess.run( loss_fn )
            

            logger.info( "Starting refinement at %f distance,task index %f" % (loss,task_index))

            for i in range( 10000 ):
                for j in range( 50 ):
                    loss = sess.run( [ train_op, loss_fn ] )[1]
                    
                    #loss = sess.run( [ train_op, loss_fn ], options=run_options, run_metadata=run_metadata )[1]
                
                if loss < refinement_distance:
                    break

                loss_values.append( loss )
                logger.log( 0, loss )

                if len( loss_values ) > 100:
                    min_value = np.min( loss_values[-100:] )
                    max_value = np.max( loss_values[-100:] )

                    # Plateau Detected
                    if max_value - min_value < 1e-5:
                        break

                if len( loss_values ) > 500:
                    min_value = np.min( loss_values[-500:] )
                    max_value = np.max( loss_values[-500:] )

                    # Plateau Detected
                    if max_value - min_value < 1e-3:
                        break
                        

            logger.info( "Ending refinement at %f distance.task index %f" % (loss,task_index) )

    #        for device in run_metadata.step_stats.dev_stats:
    #            device_name = device.device
    #            print(device.device)
    #            for node in device.node_stats:
    #                print("   ", node.node_name)"
            logger.info("reach the end in refinement")
            result = [ l.get_fun_vals( sess ) for l in layers ]
            q_result = []
            for r in range(n_cpus):
                if r==0: 
                    continue
                else:
                    q_result.append(create_done_queue(r))
            
            for q in q_result:
                sess.run(q.enqueue(1))
            return result
def fixed_depth_exploration(target,
                            num_qubits,
                            gate_size,
                            fun_vals,
                            loc_vals,
                            lm,
                            exploration_distance=0.01,
                            learning_rate=0.01):
    """
    Attempts to synthesize the target unitary with a fixed number
    of gates of size gate_size.

    Args:
        target (np.ndarray): Target unitary

        num_qubits (int): The target unitary's number of qubits

        gate_size (int): number of active qubits in a gate

        fun_vals (List[List[float]]): Gate function values

        loc_vals (List[List[float]]): Gate location values

        lm (LocationModel): The model that maps loc_vals to locations

        exploration_distance (float): Exploration's goal distance

        learning_rate (float): Learning rate of the optimizer

    Returns:
        (Tuple[bool, List[List[float]], List[List[float]]]):
            True if succeeded in hitting exploration distance
            and the final fun_vals and loc_vals
    """
    if rank != 0:
        #1 set up server Done in front
        #2 set up session_config
        logger.info("set up ps session_config task index %f TODO:" %
                    task_index)
        sess = tf.Session(server.target,
                          config=tf.ConfigProto(
                              device_count={"CPU": n_cpus},
                              inter_op_parallelism_threads=n_cpus,
                              intra_op_parallelism_threads=1,
                          ))
        logger.info("set up ps session_config task index %f Done:" %
                    task_index)
        '''#3 initialize variable on this shard
        tf.reset_default_graph()
        reset_tensor_cache()
        layers = []
        
        
        with tf.device("/job:localhost/replica:0/task:0/device:CPU:0"):
            layers.append(FixedGate( "Gate%d" % 0, num_qubits, gate_size, loc = loc_fixed[0], fun_vals = fun_vals[0] ))
            tensor = layers[0].get_tensor()
       
        each_num = np.ceil(len(fun_vals)/n_cpus)
        for i in range(1, len(fun_vals)):
            with tf.device("/job:localhost/replica:0/task:%d/device:CPU:0" % (i // each_num)):
                layers.append( FixedGate( "Gate%d" % i, num_qubits, gate_size, loc = loc_fixed[i], fun_vals = fun_vals[i] ))
                tensor = tf.matmul(layers[-1].get_tensor(), tensor)

       # with tf.device(tf.train.replica_device_setter(ps_tasks = n_cpus)):
        #with tf.device("/job:localhost/replica:0/task:0/device:CPU:0"):
        with tf.device(tf.train.replica_device_setter(worker_device="/job:localhost/replica:0/task:%d/device:CPU:0" % task_index, cluster=cluster)):
            loss_fn   = hilbert_schmidt_distance( target, tensor )
            optimizer = tf.train.AdamOptimizer( learning_rate )
            train_op  = optimizer.minimize( loss_fn )
            init_op   = tf.global_variables_initializer()

        loss_values = []
        #4 sess.run(initizlizer)
        with tf.Session(server.target, config=tf.ConfigProto( device_count={ "CPU": n_cpus }, inter_op_parallelism_threads=n_cpus, intra_op_parallelism_threads=1,)) as sess:
            logger.info("server.target:%s" %server.target)
            sess.run( init_op )
            loss = sess.run( loss_fn )'''
        #5 create done queue
        queue = create_done_queue(task_index)

        #6 dequeu all worker
        sess.run(queue.dequeue())
        logger.info("ps %d received done chief" % task_index)
        req = comm.irecv(source=0, tag=12)
        result = req.wait()
        return result

    else:

        tf.reset_default_graph()
        reset_tensor_cache()
        layers = []

        with tf.device("/job:localhost/replica:0/task:0/device:CPU:0"):
            layers.append(
                GenericGate("Gate%d" % 0,
                            num_qubits,
                            gate_size,
                            lm,
                            fun_vals[0],
                            loc_vals[0],
                            parity=(1) % lm.num_buckets))
            tensor = layers[0].get_tensor()

        #ceil(5/4) = 2
        each_num = np.ceil(len(fun_vals) / n_cpus)
        for i in range(1, len(fun_vals)):
            #i // each_num = 0,0,1,1,2,2,3
            with tf.device("/job:localhost/replica:0/task:%d/device:CPU:0" %
                           (i // each_num)):
                layers.append(
                    GenericGate("Gate%d" % i,
                                num_qubits,
                                gate_size,
                                lm,
                                fun_vals[i],
                                loc_vals[i],
                                parity=(i + 1) % lm.num_buckets))
                tensor = tf.matmul(layers[-1].get_tensor(), tensor)
        """
        layers = [ GenericGate( "Gate%d" % i, num_qubits, gate_size, lm,
                                fun_vals[i], loc_vals[i],
                                parity = (i + 1) % lm.num_buckets )
                   for i in range( len( fun_vals ) ) ]

        tensor = layers[0].get_tensor()
        for layer in layers[1:]:
            tensor = tf.matmul( layer.get_tensor(), tensor )
        """
        with tf.device(
                tf.train.replica_device_setter(
                    worker_device=
                    "/job:localhost/replica:0/task:%d/device:CPU:0" %
                    task_index,
                    cluster=cluster)):
            #with tf.device("/job:localhost/replica:0/task:0/device:CPU:0"):
            loss_fn = hilbert_schmidt_distance(target, tensor)
            optimizer = tf.train.AdamOptimizer(learning_rate)
            train_op = optimizer.minimize(loss_fn)
            init_op = tf.global_variables_initializer()

        loss_values = []

        #with tf.Session() as sess:
        with tf.Session(server.target,
                        config=tf.ConfigProto(
                            device_count={"CPU": n_cpus},
                            inter_op_parallelism_threads=n_cpus,
                            intra_op_parallelism_threads=1,
                        )) as sess:
            sess.run(init_op)

            while (True):
                for i in range(20):
                    loss = sess.run([train_op, loss_fn])[1]

                if loss < exploration_distance:
                    logger.info("Ending exploration at %f distance." % loss)
                    result = (True, [l.get_fun_vals(sess) for l in layers],
                              [l.get_loc_vals(sess) for l in layers])
                    q_result = []
                    for r in range(n_cpus):
                        if r == 0:
                            continue
                        else:
                            q_result.append(create_done_queue(r))

                    for q in q_result:
                        sess.run(q.enqueue(1))
                    for rank_ in range(n_cpus):
                        if rank_ == 0:
                            continue
                        else:
                            req = comm.isend(result, dest=rank_, tag=12)
                            req.wait()
                    return result

                loss_values.append(loss)
                logger.debug("Loss: %f" % loss)

                if len(loss_values) > 100:
                    min_value = np.min(loss_values[-100:])
                    max_value = np.max(loss_values[-100:])

                    # Plateau Detected
                    if max_value - min_value < learning_rate / 10:
                        logger.debug("Ending exploration at %f distance." %
                                     loss)
                        result = (False,
                                  [l.get_fun_vals(sess) for l in layers
                                   ], [l.get_loc_vals(sess) for l in layers])
                        q_result = []
                        for r in range(n_cpus):
                            if r == 0:
                                continue
                            else:
                                q_result.append(create_done_queue(r))

                        for q in q_result:
                            sess.run(q.enqueue(1))
                        for rank_ in range(n_cpus):
                            if rank_ == 0:
                                continue
                            else:
                                req = comm.isend(result, dest=rank_, tag=12)
                                req.wait()
                        return result

                if len(loss_values) > 500:
                    min_value = np.min(loss_values[-500:])
                    max_value = np.max(loss_values[-500:])

                    # Plateau Detected
                    if max_value - min_value < learning_rate:
                        logger.debug("Ending exploration at %f distance." %
                                     loss)
                        result = (False,
                                  [l.get_fun_vals(sess) for l in layers
                                   ], [l.get_loc_vals(sess) for l in layers])
                        q_result = []
                        for r in range(n_cpus):
                            if r == 0:
                                continue
                            else:
                                q_result.append(create_done_queue(r))

                        for q in q_result:
                            sess.run(q.enqueue(1))
                        for rank_ in range(n_cpus):
                            if rank_ == 0:
                                continue
                            else:
                                req = comm.isend(result, dest=rank_, tag=12)
                                req.wait()
                        return result