for sim_flag in range(sim_num):
        print "with %d sources and %d nodes at the %d-th simulation..."%(source_num_per_lane*lane_num,\
        relay_num_per_lane*lane_num, sim_flag)
        reward_array_temp2 = []
        residual_energy_array_temp2 = []
        # do run
        net_info = Net_info(lane_num, source_num_per_lane, relay_num_per_lane)
        net_info.initialize_channel_parameters(assumption_flag="iid")
        net_info.initialize_power_level(p_max, initial_energy)
        scenario = Scenario(mobility, distribution, net_info, const,
                            lane_width, communication_delay, movement_delay,
                            total_sim_time)
        scenario.initialization()
        for epoch in range(epoch_num):
            #print "adaptation at %d-th epoch..."%(epoch,)
            scenario.adaptation()
            scenario.update_position()
            scenario.update()
        #record results
        for lane_pool in scenario.node_pool:
            for node in lane_pool:
                residual_energy_array_temp2.append(node.residual_energy)
        for player in scenario.relayNodes:
            reward_array_temp2.append(player.reward)

        reward_array_temp.append(np.mean(reward_array_temp2))
        reward_JFI_array_temp.append(JFI_func(reward_array_temp2))
        residual_energy_array_temp.append(np.mean(residual_energy_array_temp2))
        residual_energy_JFI_array_temp.append(
            JFI_func(residual_energy_array_temp2))
net_info.initialize_power_level(p_max, initial_energy)

#%% initialize the simulation scenario

from scenario import Scenario

scenario = Scenario(mobility, distribution, net_info, const, 
                 lane_width, 
                 communication_delay,
                 movement_delay,                 
                 total_sim_time)
scenario.initialization()
#%% run the simulation

for epoch in range(epoch_num):
    print "adaptation at %d-th epoch..."%(epoch,)
    scenario.adaptation()
    scenario.update_position()
    scenario.update()
   
#%% record the normalized rewards received by each player in the Nash Equalibrium state
reward_results_NE = np.array([scenario.relayNodes[relayID].normalized_reward \
for relayID in range(scenario.net_info.RELAY_NUM)]):
    
#%% unilateral testing experiments

import copy
from DLbSoRS_Alg import unilateral_update

reward_results_DE = np.array([unilateral_update(copy.deepcopy(scenario), playerID) \
for playerID in range(scenario.net_info.RELAY_NUM)])