Exemple #1
0
 def test_retrieve_parameter(self, m_boto3):
     aws_response = {"Parameter": {"Value": "stored_parameter"}}
     m_boto3.client.return_value.get_parameter.return_value = aws_response
     parameter_value = parameters.get("stored_parameter")
     self.assertEqual(parameter_value, aws_response["Parameter"]["Value"])
     parameters.get("stored_parameter")
     self.assertEqual(m_boto3.client.call_count, 1)
        help="(str) Number of iterations",
    )
    parser.add_argument('-animate',
                        action='store_true',
                        help="(bool) Animate flag to true")
    parser.add_argument('-observe',
                        action='store_true',
                        help="(bool) Animate flag to true")
    parser.add_argument('-log',
                        action='store_true',
                        help="(bool) Animate flag to true")
    args = parser.parse_args()

    # Simulation parameters
    fitness, controller, agent, pr_states, pr_actions = \
      parameters.get(args.controller)

    # Load and build the simulator
    sim = sim(savefolder="data/%s/optimization_%i/" % (controller, args.id))

    # Load the transition models
    filelist_training = [f for f in os.listdir(args.folder_training) \
              if f.endswith('.npz')]
    ####################################################################

    ####################################################################
    # Policy optimization

    # First we iterate over each log to build the transition model
    for j, filename in enumerate(sorted(filelist_training)):
        # The first time, set up the model, then just update it
def main(args):
    ####################################################################
    # Initialize

    # Argument parser
    parser = argparse.ArgumentParser(
        description='Simulate a task to gather the data for optimization')
    parser.add_argument('controller', type=str, help="(str) Controller to use")
    parser.add_argument('folder', type=str, help="(str) Folder to use")
    parser.add_argument('-format',
                        type=str,
                        default="pdf",
                        help="(str) Save figure format")
    parser.add_argument('-plot',
                        action='store_true',
                        help="(bool) Animate flag to true")
    parser.add_argument('-verbose',
                        action='store_true',
                        help="(bool) Animate flag to true")
    args = parser.parse_args(args)

    # Load parameters
    fitness, controller, agent, pr_states, pr_actions = \
     parameters.get(args.controller)
    ####################################################################

    ####################################################################
    # Load optimization files
    files_train = [f for f in os.listdir(args.folder) \
     if f.startswith("optimization") and f.endswith('.npz')]

    # Unpack last file
    data = np.load(args.folder + files_train[-1])
    H0 = data["H0"].astype(float)
    H1 = data["H1"].astype(float)
    # Fix rounding errors
    H0[H0 < 0.01] = 0.00000
    H1[H1 < 0.01] = 0.00000
    E = matop.normalize_rows(data["E"])
    policy = data["policy"]
    des = data["des"]
    alpha = data["alpha"]
    ####################################################################

    ####################################################################
    # if -plot
    # Plot and display relevant results

    if args.plot:

        # Calculate parameters
        ## Calculate Google matrices
        G0 = np.diag(alpha).dot(H0) + np.diag(1 - alpha).dot(E)
        G1 = np.diag(alpha).dot(H1) + np.diag(1 - alpha).dot(E)

        ## PageRank scores
        prH0 = matop.pagerank(H0)
        prE = matop.pagerank(E)
        pr0 = matop.pagerank(G0)
        pr1 = matop.pagerank(G1)

        ## Initialize pagerank optimizer for evaluation
        ## Using dummy inputs, since init not needed
        p = propt.pagerank_evolve(des, np.array([H0, H1]), E)

        ## Get original fitness and new fitness
        f0 = p.pagerank_fitness(pr0, des)
        f1 = p.pagerank_fitness(pr1, des)

        # Make a folder to store the figures
        folder = "figures/pagerank"
        if not os.path.exists(os.path.dirname(folder)):
            os.makedirs(os.path.dirname(folder))

        #Now let's plot some figures
        import math
        xint = range(0, math.ceil(pr1[0].size), 2)

        # Figure: Plot pagerank H and E
        plt = pp.setup()
        plt.bar(np.array(range(prH0[0].size)),
                prH0[0],
                alpha=0.5,
                label="$PR^\pi$, $\mathbf{H^\pi}$ only")
        plt.bar(np.array(range(prE[0].size)),
                prE[0],
                alpha=0.5,
                label="$PR^\pi$, $\mathbf{E}$ only")
        plt = pp.adjust(plt)
        plt.xlabel("State")
        plt.ylabel("PageRank [-]")
        matplotlib.pyplot.xticks(xint)
        plt.legend()
        plt.savefig("%s/pagerank_original_%s.%s" \
         %(folder,controller,args.format))
        plt.close()

        # Figure: Diff plot of pagerank values
        plt = pp.setup()
        c = ["blue", "green"]
        color_list = list(map(lambda x: c[1] if x > 0.01 else c[0], des))
        if controller == "forage":
            plt.bar(range(pr1[0].size), (pr1[0] - pr0[0]) * 1000,
                    label="$PR^\pi-PR^{\pi^\star}$",
                    color=color_list)
            plt.ylabel("$\Delta$ PageRank (" r"$\times$" r"1000) [-]")
        else:
            plt.bar(range(pr1[0].size), (pr1[0] - pr0[0]),
                    label="$PR^\pi-PR^{\pi^\star}$",
                    color=color_list)
            plt.ylabel("$\Delta$ PageRank [-]")
        plt = pp.adjust(plt)
        plt.xlabel("State [-]")
        matplotlib.pyplot.xticks(xint)

        # Custom legend
        custom_lines = [
            matplotlib.lines.Line2D([0], [0], color="blue", lw=20),
            matplotlib.lines.Line2D([0], [0], color="green", lw=20)
        ]
        plt.legend(custom_lines, ['Transitional', 'Desired'])
        plt.savefig("%s/pagerank_diff_%s.%s" %
                    (folder, controller, args.format))
        plt.close()
        return
    ####################################################################

    ####################################################################
    # if -verbose
    # Display relevant results to terminal
    if args.verbose:
        print("\n------- MODEL -------\n")
        print("\nH0 matrix:\n", H0)
        print("\nH1 matrix:\n", H1)
        print("\nE matrix:\n", E)
        print("\nalpha vector:\n", alpha)
        print("\n------- POLICY -------\n", policy)
        # print("\n------- STATS -------\n")
        # print("Original fitness =", f0[0])
        # print("New fitness =", f1[0])

    # Check conditions on last file
    e = 0.00000001
    H0[H0 > e] = 1
    H1[H1 > e] = 1
    E[E > e] = 1
    H0 = H0.astype(int)
    H1 = H1.astype(int)
    E = E.astype(int)
    c = verification.verification(H0, H1, E, policy, des)
    c.verify()
Exemple #4
0
parser.add_argument('-n',
                    type=int,
                    default=30,
                    help="(int) Size of swarm, default = 30")
parser.add_argument('-runs',
                    type=int,
                    default=100,
                    help="(int) Evaluation runs per policy, default = 100")
parser.add_argument('-iterations',
                    type=int,
                    default=100,
                    help="(int) Evaluated random policies, default = 100")
args = parser.parse_args()

# Simulation parameters
fitness, controller, agent, pr_states, pr_actions = parameters.get(
    args.controller)

# Load and build simulator
sim = simulator.simulator()
sim.make(controller,
         agent,
         clean=True,
         animation=False,
         logger=False,
         verbose=False)

# Run it
f = []
for j in range(args.iterations):
    print("----------------------- %i ----------------------" % j)
    # Generate a random policy
def main(args):
	####################################################################		
	# Initialize

	# Argument parser
	parser = argparse.ArgumentParser(
		description='Simulate a task to gather the data for optimization'
		)

	parser.add_argument('controller', type=str, 
		help="Controller to use")
	parser.add_argument('-generations', type=int, default=50,
		help="Max generations after which the evolution quits, default = 50")
	parser.add_argument('-pop', type=int, default=100,
		help="Population size used in the evolution, default = 100")
	parser.add_argument('-t', type=int, default=200,
		help="Time for which each simulation is executed, deafult = 200s")
	parser.add_argument('-nmin', type=int, default=10,
		help="Minimum number of robots simulated, default = 10")
	parser.add_argument('-nmax', type=int, default=20,
		help="Maximum number of robots simulated, default = 20")
	parser.add_argument('-reruns', type=int, default=5,
		help="Number of policy re-evaluations, default = 5")
	parser.add_argument('-plot', type=str, default=None,
		help="Specify the relative path to a pkl \
			evolution file to plot the evolution.")
	parser.add_argument('-environment', type=str, default="square20",
		help=" Environment used in the simulations, \
			default is a square room of size 20 by 20.")
	parser.add_argument('-id', type=int, default=np.random.randint(0,10000),
		help="ID of evolutionary run, default = random integer")
	parser.add_argument('-resume', type=str, default=None,
		help="If specified, it will resume the evolution \
			from a previous pkl checkpoint file")
	parser.add_argument('-evaluate', type=str, default=None,
		help="If specified, it will evaluate the best result \
			in an evolution pkl file")

	args = parser.parse_args(args)

	# Load parameters
	fitness, controller, agent, pr_states, pr_actions = \
								parameters.get(args.controller)

	# Set up path to filename to save the evolution
	folder = "data/%s/evolution/" % (controller)
	directory = os.path.dirname(folder)
	if not os.path.exists(directory):
		os.makedirs(directory)
	filename = folder + "evolution_standard_%s_t%i_%i" % (controller, args.t, args.id)

	# Evolution API setup
	e = evolution.evolution()
	e.setup(fitnessfunction, 
		GENOME_LENGTH=pr_states*pr_actions, 
		POPULATION_SIZE=args.pop)
	####################################################################
	

	####################################################################
	# Plot file from file args.plot if indicated
	####################################################################
	if args.plot is not None:
    	plot(args.plot)
	
	####################################################################
	# Evolve or evaluate
	# Swarmulator API set up
	sim = simulator()
	sim.sim.runtime_setting("time_limit", str(args.t))
	sim.sim.runtime_setting("simulation_realtimefactor", str("300"))
	sim.sim.runtime_setting("environment", args.environment)
	sim.sim.runtime_setting("fitness", fitness)
	sim.sim.runtime_setting("pr_states", str(pr_states))
	sim.sim.runtime_setting("pr_actions", str(pr_actions))

	# if -evaluate <path_to_evolution_savefile>
	# Evaluate the performance of the best individual from the evolution file
	if args.evaluate is not None:
		sim.make(controller=controller, agent=agent, 
			clean=True, animation=False, logger=True, verbose=False)

		# Load the evolution parameters
		e.load(args.evaluate)
		individual = e.get_best()

		# Evaluate and save the log
		import copy
		runs = copy.deepcopy(args.reruns)
		args.reruns = 1
		f = []
		for i in range(runs):
			f.append(fitnessfunction(individual))
			sim.save_log(filename_ext="%s/evolution/evo_log_%i"%(controller,i))

		# Save evaluation data
		fh.save_pkl(f,"data/%s/benchmark_evolution_%s_t%i_r%i_%i_runs%i_%i.pkl"
			%(controller,controller,args.t,args.nmin,args.nmax,runs,args.id))
		
	# if -resume <path_to_evolution_savefile>
	# Resume evolution from file args.resume
	elif args.resume is not None:
		sim.make(controller=controller, agent=agent, 
			clean=True, animation=False, logger=False, verbose=False)

		# Load the evolution from the file
		e.load(args.resume)

		# Evolve starting from there
		p = e.evolve(generations=args.generations, 
			checkpoint=filename, 
			population=e.pop,
			verbose=True)

		# Save the evolution
		e.save(filename)

	# Otherwise, just run normally and start a new evolution from scratch
	else:
		sim.make(controller=controller, agent=agent, 
			clean=True, animation=False, logger=False, verbose=False)

		p = e.evolve(generations=args.generations, 
			checkpoint=filename, 
			verbose=True)

		# Save the evolution
		e.save(filename)
	####################################################################

if __name__=="__main__":
    main(sys.argv)