def extract_states(self, file, load_pkl=False, store_pkl=True):
        '''Extract the inputs needed to maximize output'''

        # If a pkl file does not exist, then we still need to do some dirty
        # work and load everything from the log files.
        # We will also store the pkl version to save time in future runs.
        if load_pkl is False or os.path.exists(file + ".pkl") is False:
            # Pre-process data
            sim = simulator.simulator()  # Environment
            sim.load(file, verbose=False)  # Load npz log file
            time, local_states, fitness = sim.extract()  # Pre-process data
            s = matop.normalize_rows(local_states)  # Normalize rows

            # Save a pkl file with the pre-processed data
            # so that we can be faster later
            # if we want to reuse the same logfile
            if store_pkl:
                fh.save_pkl([time, s, fitness], file + ".pkl")

        # If the pkl file exists, we are in luck, we can just
        # use the processed log files directly.
        else:
            data = fh.load_pkl(file + ".pkl")
            time = data[0]
            s = data[1]
            fitness = data[2]

        # Set dimensions of state vector
        self.dim = s.shape[1]

        # Return tuple with data
        return time, s, fitness
                 logger=False)

        ## Run
        settings["time_limit"] = 0  # Infinite time
        settings["policy_filename"] = sim.save_policy(policy, pr_actions)
        sim.run(**settings)

    elif args.log:
        # Build the simulator with the desired settings
        sim.make(controller,
                 agent,
                 animation=args.animate,
                 verbose=False,
                 logger=True)

        ## Run
        settings["policy_filename"] = sim.save_policy(policy, pr_actions)
        sim.run(**settings)

        # Save the log files
        sim.save_log(filename_ext="sample_log")

    else:
        # Run a benchmark
        f = sim.benchmark(policy, make=True, **settings)

        # Save all received fitnesses
        fh.save_pkl(
            f, "data/%s/benchmark_optimized_%s_t%i_r%i_runs%i_id%i.pkl" %
            (controller, controller, args.t, args.n, args.runs, args.id))
    ####################################################################
Пример #3
0
		sim.make(controller=controller, agent=agent, 
			clean=True, animation=False, logger=True, verbose=False)

		# Load the evolution parameters
		e.load(args.evaluate)
		individual = e.get_best()

		# Evaluate and save the log
		runs = args.reruns
		args.reruns = 1
		for i in range(runs):
			f = fitnessfunction(individual)
			sim.save_log(filename_ext="%s/evolution_mbe/evo_log_%i"%(controller,i))

		# Save evaluation data
		fh.save_pkl(f,"data/%s/benchmark_evolution_mbe_%s_t%i_r%i_runs%i.pkl"
			%(controller,controller,args.t,args.nmax,args.reruns))
		
	# if -resume <path_to_evolution_savefile>
	# Resume evolution from file args.resume
	elif args.resume is not None:
		sim.make(controller=controller, agent=agent, 
			clean=True, animation=False, logger=True, verbose=False)

		# Load the evolution from the file
		e.load(args.resume)

		# Evolve starting from there
		p = e.evolve(generations=args.generations, 
			checkpoint=filename, 
			population=e.pop,
			verbose=True,
def main(args):
    ####################################################################
    # Initialize
    # Input arguments
    parser = argparse.ArgumentParser(
        description='Simulate a task to gather the data for optimization')
    parser.add_argument('folder_train',
                        type=str,
                        help="(str) Training data folder",
                        default=None)
    parser.add_argument('folder_test',
                        type=str,
                        help="(str) Validation data folder",
                        default=None)
    parser.add_argument('savefolder',
                        type=str,
                        help="(str) Save folder",
                        default=None)
    parser.add_argument('-id',
                        type=int,
                        help="Model ID (for save/load)",
                        default=np.random.randint(0, 10000))
    parser.add_argument('-train',
                        action='store_true',
                        help="(bool) Train flag to true")
    parser.add_argument(
        '-validate',
        action='store_true',
        help="(bool) Validate flag to true (checks all models)")
    parser.add_argument(
        '-evaluate',
        action='store_true',
        help="(bool) Evaluate flag to true (checks last model only)")
    parser.add_argument(
        '-plot',
        action='store_true',
        help="(bool) Plot all validation sets (checks last model only)")
    parser.add_argument('-layer_size',
                        type=int,
                        help="Nodes in hidden layers",
                        default=100)
    parser.add_argument('-layers',
                        type=int,
                        help="Number of hiddent layers",
                        default=3)
    parser.add_argument('-lr',
                        type=float,
                        help="Number of hiddent layers",
                        default=1e-6)

    args = parser.parse_args(args)

    # Load files
    folder_train = args.folder_train
    folder_test = args.folder_test
    save_folder = args.savefolder

    # Make the save_folder if it does not exist
    if not os.path.exists(os.path.dirname(save_folder)):
        os.makedirs(os.path.dirname(save_folder))

    files_train = [f for f in os.listdir(folder_train) if f.endswith('.npz')]
    files_test = [
        f for f in os.listdir(folder_test + "/") if f.endswith('.npz')
    ]

    # Initialize desired states extractor
    dse = desired_states_extractor.desired_states_extractor()
    ####################################################################

    ####################################################################
    # if -train
    # Else try to load pre-trained sets in a file called "models.pkl"
    if args.train:
        nets = []
        i = 0
        for filename in tqdm(sorted(files_train)):
            model = dse.train(folder_train + filename,
                              layer_size=args.layer_size,
                              layers=args.layers,
                              lr=args.lr)
            print(model[0].network)
            nets.append(copy.deepcopy(model))
            i += 1
        fh.save_pkl(nets, "%s/models.pkl" % (save_folder))
    else:
        nets = fh.load_pkl("%s/models.pkl" % (save_folder))
    ####################################################################

    ####################################################################
    # If -validate
    # Crosscheck all models against all validation files
    if args.validate:
        v = []
        for model in tqdm(nets):
            e = []
            for filename in sorted(files_test):
                _, s, f = dse.extract_states(folder_test + "/" + filename,
                                             load_pkl=True)
                _, corr, _ = dse.evaluate_model(model[0], s, f)
                e.append(corr)
            v.append(e)
            print(np.mean(e))  # Display progress

        # Save to file
        vname = os.path.basename(os.path.dirname(folder_test))
        fh.save_pkl(
            v, "%s/validation_%s_id%i.pkl" % (save_folder, vname, args.id))
    ####################################################################

    ####################################################################
    # If -evaluate
    # Crosscheck the correlation of the last model against validation set
    # This is mainly for debugging purposes on the last model
    if args.evaluate:
        # Get the most recent network
        model = nets[-1]

        # Evaluate the correlation for the most recent network
        # to the validation dataset
        e = []
        y_pred = []
        for i, filename in enumerate(sorted(files_test)):
            t, s, f = dse.extract_states(args.folder_test + "/" + filename,
                                         load_pkl=True)
            _, corr, y_pred_i = dse.evaluate_model(model[0], s, f)
            e.append(corr)
            if args.plot:
                fname = "nn_test_%s_%i.%s" % (os.path.dirname(save_folder), i,
                                              "pdf")
                folder = "figures/nn/"
                if not os.path.exists(os.path.dirname(folder)):
                    os.makedirs(os.path.dirname(folder))
                filename_raw = os.path.splitext(os.path.basename(fname))[0]
                plt = pp.setup()
                plt.plot(t, f, color="blue", label="Real")
                plt.plot(t, y_pred_i, color="red", label="Predicted")
                plt.ylabel("Fitness")
                plt.xlabel("Time [s]")
                plt.legend(loc="upper left", ncol=2)
                plt = pp.adjust(plt)
                plt.savefig(folder + "%s.%s" % (filename_raw, "pdf"))
                plt.close()

        # Display some data
        if args.plot is False:
            print(np.mean(e))  # Mean error
            print(model[0].optimizer)  # Optimizer parameters
            print(model[0].network)  # Network parameters
Пример #5
0
sim.make(controller,
         agent,
         clean=True,
         animation=False,
         logger=False,
         verbose=False)

# Run it
f = []
for j in range(args.iterations):
    print("----------------------- %i ----------------------" % j)
    # Generate a random policy
    policy = np.random.rand(pr_states, pr_actions)
    policy = np.reshape(policy,
                        (policy.size // pr_actions, pr_actions))  # Resize pol
    if pr_actions > 1: policy = matop.normalize_rows(policy)  # Normalize rows

    # Benchmark its performance
    f.append(
        sim.benchmark(controller,
                      agent,
                      policy,
                      fitness,
                      robots=args.n,
                      runs=args.runs,
                      time_limit=args.t,
                      make=False))

fh.save_pkl(
    f, "data/%s/benchmark_random_%s_t%i_r%i_runs%i.pkl" %
    (controller, controller, args.t, args.n, args.runs))
Пример #6
0
def main(args):
	####################################################################		
	# Initialize

	# Argument parser
	parser = argparse.ArgumentParser(
		description='Simulate a task to gather the data for optimization'
		)

	parser.add_argument('controller', type=str, 
		help="Controller to use")
	parser.add_argument('-generations', type=int, default=50,
		help="Max generations after which the evolution quits, default = 50")
	parser.add_argument('-pop', type=int, default=100,
		help="Population size used in the evolution, default = 100")
	parser.add_argument('-t', type=int, default=200,
		help="Time for which each simulation is executed, deafult = 200s")
	parser.add_argument('-nmin', type=int, default=10,
		help="Minimum number of robots simulated, default = 10")
	parser.add_argument('-nmax', type=int, default=20,
		help="Maximum number of robots simulated, default = 20")
	parser.add_argument('-reruns', type=int, default=5,
		help="Number of policy re-evaluations, default = 5")
	parser.add_argument('-plot', type=str, default=None,
		help="Specify the relative path to a pkl \
			evolution file to plot the evolution.")
	parser.add_argument('-environment', type=str, default="square20",
		help=" Environment used in the simulations, \
			default is a square room of size 20 by 20.")
	parser.add_argument('-id', type=int, default=np.random.randint(0,10000),
		help="ID of evolutionary run, default = random integer")
	parser.add_argument('-resume', type=str, default=None,
		help="If specified, it will resume the evolution \
			from a previous pkl checkpoint file")
	parser.add_argument('-evaluate', type=str, default=None,
		help="If specified, it will evaluate the best result \
			in an evolution pkl file")

	args = parser.parse_args(args)

	# Load parameters
	fitness, controller, agent, pr_states, pr_actions = \
								parameters.get(args.controller)

	# Set up path to filename to save the evolution
	folder = "data/%s/evolution/" % (controller)
	directory = os.path.dirname(folder)
	if not os.path.exists(directory):
		os.makedirs(directory)
	filename = folder + "evolution_standard_%s_t%i_%i" % (controller, args.t, args.id)

	# Evolution API setup
	e = evolution.evolution()
	e.setup(fitnessfunction, 
		GENOME_LENGTH=pr_states*pr_actions, 
		POPULATION_SIZE=args.pop)
	####################################################################
	

	####################################################################
	# Plot file from file args.plot if indicated
	####################################################################
	if args.plot is not None:
    	plot(args.plot)
	
	####################################################################
	# Evolve or evaluate
	# Swarmulator API set up
	sim = simulator()
	sim.sim.runtime_setting("time_limit", str(args.t))
	sim.sim.runtime_setting("simulation_realtimefactor", str("300"))
	sim.sim.runtime_setting("environment", args.environment)
	sim.sim.runtime_setting("fitness", fitness)
	sim.sim.runtime_setting("pr_states", str(pr_states))
	sim.sim.runtime_setting("pr_actions", str(pr_actions))

	# if -evaluate <path_to_evolution_savefile>
	# Evaluate the performance of the best individual from the evolution file
	if args.evaluate is not None:
		sim.make(controller=controller, agent=agent, 
			clean=True, animation=False, logger=True, verbose=False)

		# Load the evolution parameters
		e.load(args.evaluate)
		individual = e.get_best()

		# Evaluate and save the log
		import copy
		runs = copy.deepcopy(args.reruns)
		args.reruns = 1
		f = []
		for i in range(runs):
			f.append(fitnessfunction(individual))
			sim.save_log(filename_ext="%s/evolution/evo_log_%i"%(controller,i))

		# Save evaluation data
		fh.save_pkl(f,"data/%s/benchmark_evolution_%s_t%i_r%i_%i_runs%i_%i.pkl"
			%(controller,controller,args.t,args.nmin,args.nmax,runs,args.id))
		
	# if -resume <path_to_evolution_savefile>
	# Resume evolution from file args.resume
	elif args.resume is not None:
		sim.make(controller=controller, agent=agent, 
			clean=True, animation=False, logger=False, verbose=False)

		# Load the evolution from the file
		e.load(args.resume)

		# Evolve starting from there
		p = e.evolve(generations=args.generations, 
			checkpoint=filename, 
			population=e.pop,
			verbose=True)

		# Save the evolution
		e.save(filename)

	# Otherwise, just run normally and start a new evolution from scratch
	else:
		sim.make(controller=controller, agent=agent, 
			clean=True, animation=False, logger=False, verbose=False)

		p = e.evolve(generations=args.generations, 
			checkpoint=filename, 
			verbose=True)

		# Save the evolution
		e.save(filename)
	####################################################################

if __name__=="__main__":
    main(sys.argv)