def get_functions_borders(num_vars = 3, grid_size = 1000000):

	grid = sobol_grid.generate( num_vars , grid_size )

        # Scale grid.

        grid[:,0] = grid[:,0] * ( 0.01 - 0.0001 ) + 0.0001
        grid[:,1] = grid[:,1] * ( 0.01 - 0.0001 ) + 0.0001
        grid[:,2] = grid[:,2] * ( 3.0 - 1.0 ) + 1.0

	print("Statistics over the objectives and constraints")
	print("==============================================")	
	first_obj_observations = obj1(grid)
	second_obj_observations = obj2(grid)
	first_con_observations = c1(grid)
	max_first_obj = np.max(first_obj_observations)
	min_first_obj = np.min(first_obj_observations)
	max_second_obj = np.max(second_obj_observations)
        min_second_obj = np.min(second_obj_observations)
	max_first_con = np.max(first_con_observations)
        min_first_con = np.min(first_con_observations)
	print("Maximum observation of the first objective")
	print(max_first_obj)
	print("Minimum observation of the first objective")
        print(min_first_obj)
	print("Maximum observation of the second objective")
        print(max_second_obj)
	print("Minimum observation of the second objective")
        print(min_second_obj)
	print("Maximum observation of the first constraint")
        print(max_first_con)
	print("Minimum observation of the first constraint")
        print(min_first_con)
Ejemplo n.º 2
0
	def solve_using_grid(self, grid = None):

          if grid is None:
              grid = sobol_grid.generate(self.num_dims, 20000)

          values = np.ones((grid.shape[ 0 ], len(self.funs)))
          for i in range(len(self.funs)):
              values[ :, i ] = self.funs[ i ](grid, gradient = False)
           
          #Values must satisfy constraints, if there are constraints in the problem.

          if len(self.constraints) > 0:

              con_evals = np.ones(grid.shape[0]).astype('bool')
              values_satisfying_constraints = values

              for con_fun in self.constraints:
                  con_evals = np.logical_and(con_evals, con_fun(grid, gradient = False) >= 0)

              values_satisfying_constraints = values[ con_evals, : ]

          else:
              con_evals = np.ones(grid.shape[0]).astype('bool')
              values_satisfying_constraints = values
         
          pareto_indices = _cull_algorithm(values_satisfying_constraints)
          
          grid_points_satisfying_constraints = grid[con_evals, : ]
          grid_to_add = grid_points_satisfying_constraints[ pareto_indices, : ]
          self.pop = population(self, 0)

          for i in range(grid_to_add.shape[ 0 ]):
		self.pop.push_back(grid_to_add[ i, : ].tolist())
Ejemplo n.º 3
0
def get_functions_borders(num_vars=4, grid_size=1000000):

    grid = sobol_grid.generate(num_vars, grid_size)

    # Scale grid.

    grid[:, 0] = grid[:, 0] * (5.0 - 0.125) + 0.125
    grid[:, 1] = grid[:, 1] * (5.0 - 0.125) + 0.125
    grid[:, 2] = grid[:, 2] * (10.0 - 0.1) + 0.1
    grid[:, 3] = grid[:, 3] * (10.0 - 0.1) + 0.1

    print("Statistics over the objectives and constraints")
    print("==============================================")
    first_obj_observations = obj1(grid)
    second_obj_observations = obj2(grid)
    first_con_observations = c1(grid)
    second_con_observations = c2(grid)
    third_con_observations = c3(grid)
    fourth_con_observations = c4(grid)
    max_first_obj = np.max(first_obj_observations)
    min_first_obj = np.min(first_obj_observations)
    max_second_obj = np.max(second_obj_observations)
    min_second_obj = np.min(second_obj_observations)
    max_first_con = np.max(first_con_observations)
    min_first_con = np.min(first_con_observations)
    max_second_con = np.max(second_con_observations)
    min_second_con = np.min(second_con_observations)
    max_third_con = np.max(third_con_observations)
    min_third_con = np.min(third_con_observations)
    max_fourth_con = np.max(fourth_con_observations)
    min_fourth_con = np.min(fourth_con_observations)
    print("Maximum observation of the first objective")
    print(max_first_obj)
    print("Minimum observation of the first objective")
    print(min_first_obj)
    print("Maximum observation of the second objective")
    print(max_second_obj)
    print("Minimum observation of the second objective")
    print(min_second_obj)
    print("Maximum observation of the first constraint")
    print(max_first_con)
    print("Minimum observation of the first constraint")
    print(min_first_con)
    print("Maximum observation of the second constraint")
    print(max_second_con)
    print("Minimum observation of the second constraint")
    print(min_second_con)
    print("Maximum observation of the third constraint")
    print(max_third_con)
    print("Minimum observation of the third constraint")
    print(min_third_con)
    print("Maximum observation of the fourth constraint")
    print(max_fourth_con)
    print("Minimum observation of the fourth constraint")
    print(min_fourth_con)
Ejemplo n.º 4
0
def get_functions_borders(num_vars=2, grid_size=1000000, noise=0.1):

    grid = sobol_grid.generate(num_vars, grid_size)

    # Scale grid.

    grid[:, 0] = grid[:, 0] * (3.141592 - 0.000001) + 0.000001
    grid[:, 1] = grid[:, 1] * (3.141592 - 0.000001) + 0.000001

    print("Statistics over the objectives and constraints")
    print("==============================================")
    first_obj_observations = obj1(grid)
    second_obj_observations = obj2(grid)
    first_con_observations = c1(grid)
    second_con_observations = c2(grid)
    max_first_obj = np.max(first_obj_observations)
    min_first_obj = np.min(first_obj_observations)
    max_second_obj = np.max(second_obj_observations)
    min_second_obj = np.min(second_obj_observations)
    max_first_con = np.max(first_con_observations)
    min_first_con = np.min(first_con_observations)
    max_second_con = np.max(second_con_observations)
    min_second_con = np.min(second_con_observations)
    print("Maximum observation of the first objective")
    print(max_first_obj)
    print("Minimum observation of the first objective")
    print(min_first_obj)
    print("Noise factor")
    print((max_first_obj - min_first_obj) * noise)
    print("Maximum observation of the second objective")
    print(max_second_obj)
    print("Minimum observation of the second objective")
    print(min_second_obj)
    print("Noise factor")
    print((max_second_obj - min_second_obj) * noise)
    print("Maximum observation of the first constraint")
    print(max_first_con)
    print("Minimum observation of the first constraint")
    print(min_first_con)
    print("Noise factor")
    print((max_first_con - min_first_con) * noise)
    print("Maximum observation of the second constraint")
    print(max_second_con)
    print("Minimum observation of the second constraint")
    print(min_second_con)
    print("Noise factor")
    print((max_second_con - min_second_con) * noise)
Ejemplo n.º 5
0
    def solve_using_grid(self, grid = None):

	if grid is None or len(grid)==0:
		grid = sobol_grid.generate(self.input_space.num_dims, 20000)

	values = np.ones((grid.shape[ 0 ], len(self.models.keys())))
	n_task = 0
	for key in self.tasks:

		if self.avg_over_hypers:
			values[ :, n_task ] = self.models[ key ].function_over_hypers(self.models[ key ].predict, grid)[ 0 ]
		else:
			values[ :, n_task ] = self.models[ key ].predict(grid)[ 0 ]

		n_task += 1


        if self.extended_domination_rule:
            values = self.apply_extended_domination_rule_to_values(values)
            
	pareto_indices = _cull_algorithm(values)

	values = values[ pareto_indices, : ]
	grid = grid[ pareto_indices, : ]

	# We remove repeated entries from the pareto front and set

	frontier = values

	X = frontier[ 0 : 1, : ]
	pareto_set = grid[ 0 : 1, : ]

	for i in range(frontier.shape[ 0 ]):
		if np.min(cdist(frontier[ i : (i + 1), : ], X)) > 1e-8:
			X = np.vstack((X, frontier[ i, ])) 
			pareto_set = np.vstack((pareto_set, grid[ i, ])) 

	grid = pareto_set
	frontier = X

	self.pop = population(self, 0)

	for i in range(grid.shape[ 0 ]):
		self.pop.push_back(grid[ i, : ].tolist())
Ejemplo n.º 6
0
    def solve_using_grid(self, grid=None):

        if grid is None:
            grid = sobol_grid.generate(self.num_dims, 20000)

        values = np.ones((grid.shape[0], len(self.funs)))

        for i in range(len(self.funs)):
            values[:, i] = self.funs[i](grid, gradient=False)

        pareto_indices = _cull_algorithm(values)

        grid_to_add = grid[pareto_indices, :]
        values = values[pareto_indices, :]

        self.pop = population(self, 0)

        for i in range(grid_to_add.shape[0]):
            self.pop.push_back(grid_to_add[i, :].tolist())
Ejemplo n.º 7
0
def main(expt_dir):

    os.chdir(expt_dir)
    sys.path.append(expt_dir)

    options = parse_config_file(expt_dir, 'config.json')
    experiment_name = options["experiment-name"]

    #	main_file = options['main_file']
    main_file = 'OSY_no_noisy'
    if main_file[-3:] == '.py':
        main_file = main_file[:-3]
    module = __import__(main_file)

    input_space = InputSpace(options["variables"])
    chooser_module = importlib.import_module('spearmint.choosers.' +
                                             options['chooser'])
    chooser = chooser_module.init(input_space, options)
    db = MongoDB(database_address=options['database']['address'])
    jobs = load_jobs(db, experiment_name)
    hypers = db.load(experiment_name, 'hypers')
    tasks = parse_tasks_from_jobs(jobs, experiment_name, options, input_space)

    if len(tasks) < 2:
        print 'Not a multi-objective problem!'
        return -1

    if options['language'] != "PYTHON":
        print 'Only python programs supported!'
        return -1

    objectives = dict()
    contraints = dict()

    for task in tasks:
        if tasks[task].type == 'objective':
            objectives[task] = tasks[task]
        else:
            contraints[task] = tasks[task]

    assert len(objectives) >= 2 and len(contraints) >= 1

    def create_fun(task):
        def fun(params, gradient=False):

            if len(params.shape) > 1 and params.shape[1] > 1:

                values = np.zeros(params.shape[0])
                params_orig = params

                for i in range(params_orig.shape[0]):
                    param = params[i, :]
                    param = param.flatten()
                    param = input_space.from_unit(np.array([param])).flatten()

                    values[i] = module.main(
                        0,
                        paramify_no_types(input_space.paramify(param)))[task]

            else:
                return module.main(
                    0, paramify_no_types(input_space.paramify(params)))[task]

            return values

        return fun

    funs_o = [create_fun(task) for task in objectives]
    funs_c = [create_fun(task) for task in contraints]

    moop = MOOP_basis_functions(funs_o,
                                input_space.num_dims,
                                constraints=funs_c)

    grid = sobol_grid.generate(input_space.num_dims,
                               grid_size=1000 * input_space.num_dims)

    # We only retain the feasible points

    moop.solve_using_grid(grid)

    reference = np.ones(len(objectives)) * 1e3

    hyper_volume_solution = moop.get_hypervolume(reference.tolist())

    result = moop.compute_pareto_front_and_set()

    front = result['frontier']
    pareto_set = result['pareto_set']

    with open('hypervolume_solution.txt', 'a') as f:
        print >> f, "%lf" % (hyper_volume_solution)

    # We iterate through each recommendation made

    i = 0
    more_recommendations = True
    while more_recommendations:

        recommendation = db.load(experiment_name, 'recommendations',
                                 {'id': i + 1})

        if recommendation == None:
            more_recommendations = False
        else:

            solution = input_space.to_unit(
                input_space.vectorify(recommendation['params']))

            if len(solution.shape) == 1:
                solution = solution.reshape((1, len(solution)))

            # We compute the objective values associated to this recommendation

            values_solution = np.zeros((solution.shape[0], len(objectives)))

            for j in range(values_solution.shape[0]):
                for k in range(values_solution.shape[1]):
                    values_solution[j, k] = funs_o[k](solution[j:(j + 1), :])

            moop = MOOP_basis_functions(funs_o, input_space.num_dims)

            moop.set_population(solution)

            hyper_volume = moop.get_hypervolume(reference.tolist())

            # We make sure that there are no infeasible points recommended
            # If there are infeasible recommendations we return 0 as the hypervolume

            all_feasible = True

            for k in range(len(funs_c)):
                all_feasible = all_feasible and not np.any(
                    funs_c[k](solution) < 0)

            if not all_feasible:
                hyper_volume = 0.0

            with open('hypervolumes.txt', 'a') as f:
                print >> f, "%lf" % (hyper_volume)

            with open('evaluations.txt', 'a') as f_handle:
                np.savetxt(
                    f_handle,
                    np.array([recommendation['num_complete_tasks'].values()]),
                    delimiter=' ',
                    newline='\n')

        i += 1
Ejemplo n.º 8
0
def test_generate():
    grid = sobol_grid.generate(10, grid_size=100, grid_seed=1)

    assert grid.shape == (100, 10)
    assert np.all(grid[0] == 0.5)
	def compute_cell_information(self, obj_model_dict):

		cached_information = dict()

		# First we obtain a sample from the Pareto Frontier of NUM_POINTS_FRONTIER

		moop = MOOP(obj_model_dict, obj_model_dict, self.input_space, False)
		
		grid = sobol_grid.generate(self.input_space.num_dims, self.input_space.num_dims * GRID_SIZE)

		if USE_GRID_ONLY == True:

			moop.solve_using_grid(grid)

			for i in range(len(obj_model_dict.keys())):
				result = self.find_optimum_gp(obj_model_dict[ obj_model_dict.keys()[ i ] ], grid)
				moop.append_to_population(result)
		else:

			assert NSGA_POP > len(obj_model_dict.keys()) + 1

			moop.solve_using_grid(grid)

			for i in range(len(obj_model_dict.keys())):
				result = self.find_optimum_gp(obj_model_dict[ obj_model_dict.keys()[ i ] ], grid)
				moop.append_to_population(result)

			pareto_set = moop.compute_pareto_front_and_set_summary(NSGA_POP)['pareto_set']

			moop.initialize_population(np.maximum(NSGA_POP - pareto_set.shape[ 0 ], 0))

			for i in range(pareto_set.shape[ 0 ]):
				moop.append_to_population(pareto_set[ i, : ])

			moop.evolve_population_only(NSGA_EPOCHS)

			for i in range(pareto_set.shape[ 0 ]):
				moop.append_to_population(pareto_set[ i, : ])

		result = moop.compute_pareto_front_and_set_summary(NUM_POINTS_FRONTIER)

		print 'Inner multi-objective problem solved!'

		means_objectives = np.zeros((obj_model_dict[ obj_model_dict.keys()[ 0 ] ].inputs.shape[ 0 ], len(obj_model_dict)))

		k = 0
		for obj in obj_model_dict:
			means_objectives[ :, k ] = obj_model_dict[ obj ].predict(obj_model_dict[ obj ].inputs)[ 0 ]
			k += 1

		v_inf = np.ones((1, len(obj_model_dict))) * np.inf
		v_ref = np.ones((1, len(obj_model_dict))) * 1e3

		# We add the non-dominated prediction and the observed inputs to the frontier

		frontier = result['frontier']
		frontier = np.vstack((frontier, means_objectives))
		frontier = frontier[ _cull_algorithm(frontier), ]

		# We remove repeated entries from the pareto front

		X = frontier[ 0 : 1, : ]

		for i in range(frontier.shape[ 0 ]):
			if np.min(cdist(frontier[ i : (i + 1), : ], X)) > 1e-8:
			    	X = np.vstack((X, frontier[ i, ])) 

		frontier = X

		cached_information['frontier'] = frontier

		# We sort the entries in the pareto frontier

		frontier_sorted = np.vstack((-v_inf, cached_information['frontier'], v_ref, v_inf))

		for i in range(len(obj_model_dict)):
			frontier_sorted[ :, i ] = np.sort(frontier_sorted[ :, i ])

		# Now we build the info associated to each cell

		n_repeat = (frontier_sorted.shape[ 0 ] - 2) ** frontier_sorted.shape[ 1 ]

		cached_information['cells'] = dict()

		added_cells = 0
		for i in range(n_repeat):

			cell = dict()

			indices = np.zeros(len(obj_model_dict)).astype(int)

			j = i

			for k in range(len(obj_model_dict)):
				indices[ k ] = int(j % (frontier_sorted.shape[ 0 ] - 2))
				j = np.floor(j / (frontier_sorted.shape[ 0 ] - 2))

			u = np.zeros(len(obj_model_dict))

			for k in range(len(obj_model_dict)):
				u[ k ] = frontier_sorted[ int(indices[ k ] + 1), k ]

			l = np.zeros(len(obj_model_dict))
				
			for k in range(len(obj_model_dict)):
				l[ k ] = frontier_sorted[ indices[ k ], k ]

			# If the cell is dominated we discard it

			is_dominated = False
			for k in range(frontier.shape[ 0 ]):
				if np.all(l >= frontier[ k, : ]):
					is_dominated = True

			if is_dominated:
				continue

			# We find the vector v

			v = np.zeros(len(obj_model_dict))

			for k in range(len(obj_model_dict)):

				l_tmp = np.copy(l)

				for j in range(int(frontier_sorted.shape[ 0 ] - indices[ k ] - 1)):
					l_tmp[ k ] = frontier_sorted[ indices[ k ] + j, k ]

					dominates_all = True
					for h in range(frontier.shape[ 0 ]):
						if np.all(frontier[ h, : ] <= l_tmp):
							dominates_all = False
							break

					if dominates_all == False:
						break
					
				if dominates_all == False:
					v[ k ] = l_tmp[ k ]
				else:
					v[ k ] = v_ref[ 0, k ]

			# We compute the quantities required for evaluating the gain in hyper-volume

			# We find the points dominated by u

			dominated_by_u = frontier
			h = 0
			while (h < dominated_by_u.shape[ 0 ]):
				if (not np.any(u < dominated_by_u[ h, : ])) and (not np.all(u == dominated_by_u[ h, : ])):
					dominated_by_u = np.delete(dominated_by_u, (h), axis = 0)
				else:
					h+= 1

			# The value of minusQ2plusQ3 is given by the hypervolume of the dominated points with reference v

			if dominated_by_u.shape[ 0 ] == 0:
				minusQ2plusQ3 = 0.0
			else:
				hv = HyperVolume(v.tolist())
				minusQ2plusQ3 = -hv.compute(dominated_by_u.tolist())
			
			cell['u'] = u
			cell['l'] = l
			cell['v'] = v
			cell['dominated_by_u'] = dominated_by_u
			cell['minusQ2plusQ3'] = minusQ2plusQ3
			
			cached_information['cells'][ str(added_cells) ] = cell
			added_cells += 1
			
		n_cells = added_cells

		cached_information['n_cells'] = n_cells
		cached_information['v_ref'] = v_ref[ 0, : ]
		cached_information['n_objectives'] = len(obj_model_dict)

#		self.print_cell_info(cached_information)

		return cached_information
Ejemplo n.º 10
0
    def compute_information(self, obj_model_dict):

        cached_information = dict()

        # First we obtain a sample from the Pareto Frontier of NUM_POINTS_FRONTIER

        moop = MOOP(obj_model_dict, obj_model_dict, self.input_space, False)

        grid = sobol_grid.generate(self.input_space.num_dims,
                                   self.input_space.num_dims * GRID_SIZE)

        if USE_GRID_ONLY == True:

            moop.solve_using_grid(grid)

            for i in range(len(obj_model_dict.keys())):
                result = self.find_optimum_gp(
                    obj_model_dict[obj_model_dict.keys()[i]], grid)
                moop.append_to_population(result)

        else:

            assert NSGA_POP > len(obj_model_dict.keys()) + 1

            moop.solve_using_grid(grid)

            for i in range(len(obj_model_dict.keys())):
                result = self.find_optimum_gp(
                    obj_model_dict[obj_model_dict.keys()[i]], grid)
                moop.append_to_population(result)

            pareto_set = moop.compute_pareto_front_and_set_summary(
                NSGA_POP)['pareto_set']

            moop.initialize_population(
                np.maximum(NSGA_POP - pareto_set.shape[0], 0))

            for i in range(pareto_set.shape[0]):
                moop.append_to_population(pareto_set[i, :])

            moop.evolve_population_only(NSGA_EPOCHS)

            for i in range(pareto_set.shape[0]):
                moop.append_to_population(pareto_set[i, :])

        result = moop.compute_pareto_front_and_set_summary(NUM_POINTS_FRONTIER)

        print 'Internal optimization finished'

        # We remove repeated entries from the pareto front

        frontier = result['frontier']

        X = frontier[0:1, :]

        for i in range(frontier.shape[0]):
            if np.min(cdist(frontier[i:(i + 1), :], X)) > 1e-8:
                X = np.vstack((X, frontier[i, ]))

        frontier = X

        cached_information['frontier'] = frontier
        cached_information['v_ref'] = np.ones(len(obj_model_dict))

        for k in range(frontier.shape[1]):
            cached_information['v_ref'][k] = np.max(frontier[:, k]) + 1.0

        return cached_information
	def compute_cell_information_2_objectives(self, obj_model_dict, cand):

		assert len(obj_model_dict) == 2

		cached_information = dict()

		# First we obtain a sample from the Pareto Frontier of NUM_POINTS_FRONTIER

		moop = MOOP(obj_model_dict, obj_model_dict, self.input_space, False)
		
		grid = sobol_grid.generate(self.input_space.num_dims, self.input_space.num_dims * GRID_SIZE)

		if USE_GRID_ONLY == True:

			moop.solve_using_grid(grid)

			for i in range(len(obj_model_dict.keys())):
				result = self.find_optimum_gp(obj_model_dict[ obj_model_dict.keys()[ i ] ], grid)
				moop.append_to_population(result)
		else:

			assert NSGA_POP > len(obj_model_dict.keys()) + 1

			moop.solve_using_grid(grid)

			for i in range(len(obj_model_dict.keys())):
				result = self.find_optimum_gp(obj_model_dict[ obj_model_dict.keys()[ i ] ], grid)
				moop.append_to_population(result)

			pareto_set = moop.compute_pareto_front_and_set_summary(NSGA_POP)['pareto_set']

			moop.initialize_population(np.maximum(NSGA_POP - pareto_set.shape[ 0 ], 0))

			for i in range(pareto_set.shape[ 0 ]):
				moop.append_to_population(pareto_set[ i, : ])

			moop.evolve_population_only(NSGA_EPOCHS)

			for i in range(pareto_set.shape[ 0 ]):
				moop.append_to_population(pareto_set[ i, : ])

		result = moop.compute_pareto_front_and_set_summary(NUM_POINTS_FRONTIER)

		# First we obtain a sample from the Pareto Frontier of NUM_POINTS_FRONTIER

		means_objectives = np.zeros((obj_model_dict[ obj_model_dict.keys()[ 0 ] ].inputs.shape[ 0 ], len(obj_model_dict)))

		k = 0
		for obj in obj_model_dict:
			means_objectives[ :, k ] = obj_model_dict[ obj ].predict(obj_model_dict[ obj ].inputs)[ 0 ]
			k += 1


		v_inf = np.ones((1, len(obj_model_dict))) * 1e3

		# We obtain the pareto frontier approximation which is added the observations

		frontier = result['frontier']
		frontier = np.vstack((frontier, means_objectives))
		frontier = frontier[ _cull_algorithm(frontier), ]

		# We remove repeated entries from the pareto front

		X = frontier[ 0 : 1, : ]


		for i in range(frontier.shape[ 0 ]):
			if np.min(cdist(frontier[ i : (i + 1), : ], X)) > 1e-8:
			    	X = np.vstack((X, frontier[ i, ])) 
	
		frontier = X

		cached_information['frontier'] = frontier

		# We sort the entries in the pareto frontier

		frontier_sorted = np.vstack((-v_inf, cached_information['frontier'], v_inf))

		frontier_sorted[ :, 0 ] = np.sort(frontier_sorted[ :, 0 ])
		frontier_sorted[ :, 1 ] = np.sort(frontier_sorted[ :, 1 ] * -1.0) * -1.0

		# Now we build the info associated to each cell

		n_cells = (frontier_sorted.shape[ 0 ] - 1) + (frontier_sorted.shape[ 0 ] - 2)

		# We add first the non dominated cells

		cached_information['cells'] = dict()
		added_cells = 0

		for i in range(frontier_sorted.shape[ 0 ] - 1):

			cell = dict()
			cell['l'] = np.array([ frontier_sorted[ i, 0 ], frontier_sorted[ 0, 0 ] ])
			cell['u'] = np.array([ frontier_sorted[ i + 1, 0 ], frontier_sorted[ i, 1 ] ])
			cell['is_dominated'] = False

			cached_information['cells'][ str(added_cells) ] = cell
			added_cells += 1
	
#		# Now the dominated cells
#
#		for i in range(frontier_sorted.shape[ 0 ] - 2):
#
#			cell = dict()
#			cell['l'] = np.array([ frontier_sorted[ i + 1, 0 ], frontier_sorted[ i + 1, 1 ] ])
#			cell['u'] = np.array([ frontier_sorted[ frontier_sorted.shape[ 0 ] - 1, 0 ], frontier_sorted[ i, 1 ] ])
#			cell['is_dominated'] = True
#			cached_information['cells'][ str(added_cells) ] = cell
#			added_cells += 1

#		cached_information['n_cells'] = n_cells

		cached_information['n_cells'] = added_cells
		cached_information['n_objectives'] = len(obj_model_dict)

		# We compute a sobol grid to approximate the integral over x. By default we use
		# 100 points per dimension (too much?)
		
		if not 'sur_points_per_dimension' in self.options.keys():
#	            	cached_information['grid'] = np.array(sobol_grid.generate(self.num_dims, 100 * self.num_dims))
	            	cached_information['grid'] = np.random.uniform(size = (100 * self.num_dims, self.num_dims))
		else:
#	            	cached_information['grid'] = np.array(sobol_grid.generate(self.num_dims, \
#				int(self.options['sur_points_per_dimension']) * self.num_dims))
	            	cached_information['grid'] = np.random.uniform(size = (int(self.options['sur_points_per_dimension']) \
				 * self.num_dims, self.num_dims))

		# We obtain the samples needed for a Monte Carlo approximation of the objective

		cached_information['gauss_samples_grid'] = np.random.normal(size = (cached_information['grid'].shape[ 0 ], \
			len(obj_model_dict), N_SAMPLES))

		cached_information['gauss_sample_cand'] = np.random.normal(size = N_SAMPLES)

#		self.print_cell_info(cached_information)

		return cached_information
Ejemplo n.º 12
0
def get_functions_borders(num_vars = 6, grid_size = 1000000, noise=0.1):

	grid = sobol_grid.generate( num_vars , grid_size )

        # Scale grid.

        grid[:,0] = grid[:,0] * ( 10.0 - 0.0 ) + 0.0
        grid[:,1] = grid[:,1] * ( 10.0 - 0.0 ) + 0.0
        grid[:,2] = grid[:,2] * ( 5.0 - 1.0 ) + 1.0
        grid[:,3] = grid[:,3] * ( 6.0 - 0.0 ) + 0.0
        grid[:,4] = grid[:,4] * ( 5.0 - 1.0 ) + 1.0
        grid[:,5] = grid[:,5] * ( 10.0 - 0.0 ) + 0.0

	print("Statistics over the objectives and constraints")
	print("==============================================")	
	first_obj_observations = obj1(grid)
	second_obj_observations = obj2(grid)
	first_con_observations = c1(grid)
	second_con_observations = c2(grid)
	third_con_observations = c3(grid)
	fourth_con_observations = c4(grid)
	fifth_con_observations = c5(grid)
	sixth_con_observations = c6(grid)
	max_first_obj = np.max(first_obj_observations)
	min_first_obj = np.min(first_obj_observations)
	max_second_obj = np.max(second_obj_observations)
        min_second_obj = np.min(second_obj_observations)
	max_first_con = np.max(first_con_observations)
        min_first_con = np.min(first_con_observations)
	max_second_con = np.max(second_con_observations)
        min_second_con = np.min(second_con_observations)
	max_third_con = np.max(third_con_observations)
        min_third_con = np.min(third_con_observations)
	max_fourth_con = np.max(fourth_con_observations)
        min_fourth_con = np.min(fourth_con_observations)
	max_fifth_con = np.max(fifth_con_observations)
        min_fifth_con = np.min(fifth_con_observations)
	max_sixth_con = np.max(sixth_con_observations)
        min_sixth_con = np.min(sixth_con_observations)
	print("Maximum observation of the first objective")
	print(max_first_obj)
	print("Minimum observation of the first objective")
        print(min_first_obj)
	print("Noise factor")
	print((max_first_obj-min_first_obj)*noise)
	print("Maximum observation of the second objective")
        print(max_second_obj)
	print("Minimum observation of the second objective")
        print(min_second_obj)
	print("Noise factor")
	print((max_second_obj-min_second_obj)*noise)
	print("Maximum observation of the first constraint")
        print(max_first_con)
	print("Minimum observation of the first constraint")
        print(min_first_con)
	print("Noise factor")
	print((max_first_con-min_first_con)*noise)
	print("Maximum observation of the second constraint")
        print(max_second_con)
	print("Minimum observation of the second constraint")
        print(min_second_con)
	print("Noise factor")
	print((max_second_con-min_second_con)*noise)
	print("Maximum observation of the third constraint")
        print(max_third_con)
        print("Minimum observation of the third constraint")
        print(min_third_con)
	print("Noise factor")
	print((max_third_con-min_third_con)*noise)
	print("Maximum observation of the fourth constraint")
        print(max_fourth_con)
        print("Minimum observation of the fourth constraint")
        print(min_fourth_con)
	print("Noise factor")
	print((max_fourth_con-min_fourth_con)*noise)
	print("Maximum observation of the fifth constraint")
        print(max_fifth_con)
        print("Minimum observation of the fifth constraint")
        print(min_fifth_con)
	print("Noise factor")
	print((max_fifth_con-min_fifth_con)*noise)
	print("Maximum observation of the sixth constraint")
        print(max_sixth_con)
        print("Minimum observation of the sixth constraint")
        print(min_sixth_con)
	print("Noise factor")
	print((max_sixth_con-min_sixth_con)*noise)
Ejemplo n.º 13
0
def test_generate():
    grid = sobol_grid.generate(10, grid_size=100, grid_seed=1)

    assert grid.shape == (100,10)
    assert np.all(grid[0] == 0.5)
def find_reference_point(tasks, module, input_space, grid_size = 20000):

	def create_fun_neg(task):
		def fun(params, gradient = False):

			if len(params.shape) > 1 and params.shape[ 1 ] > 1:
				params = params.flatten()

			params = input_space.from_unit(np.array([ params ])).flatten()

			return -1.0 * module.main(0, paramify_no_types(input_space.paramify(params)))[ task ]

		return fun

	funs_neg = [ create_fun_neg(task) for task in tasks ]


	moop_neg = MOOP_basis_functions(funs_neg, input_space.num_dims)
	moop_neg.evolve(400, 400)
	result = moop_neg.compute_pareto_front_and_set()
	front = result['frontier']
	pareto_set = result['pareto_set']

	grid = sobol_grid.generate(input_space.num_dims, grid_size = grid_size, grid_seed = npr.randint(0, grid_size))
	grid = np.vstack((grid, pareto_set))

	# We add the borders of the hyper-cube to the grid since there it is likely to be the maximum

	for i in range(2**input_space.num_dims):

		vector = np.zeros(input_space.num_dims)
		
		for j in range(input_space.num_dims):
			if bin(i & 2**j) != bin(0):
				vector[ j ] = 1.0

		grid = np.vstack((grid, vector.reshape((1, input_space.num_dims))))

	reference_point = np.zeros(len(funs_neg))

	for i in range(len(funs_neg)):

		grid_values = np.zeros(grid.shape[ 0 ])

		for j in range(grid.shape[ 0 ]):
			grid_values[ j ] = funs_neg[ i ](grid[ j, : ])

		best = grid[ np.argmin(grid_values), : ]

                def f(x):
			if x.ndim == 1:
				x = x[None,:]
				value = funs_neg[ i ](x)

			return (value)

                bounds = [ (0.0, 1.0) ] * input_space.num_dims

                x_opt, y_opt, opt_info = spo.fmin_l_bfgs_b(f, best, bounds = bounds, disp = 0, approx_grad = True)

		reference_point[ i ] = -1.0 * y_opt + np.abs(-1.0 * y_opt * 0.01)

	return reference_point
def main(expt_dir):

	os.chdir(expt_dir)
	sys.path.append(expt_dir)

	options         = parse_config_file(expt_dir, 'config.json')
	experiment_name = options["experiment-name"]
	options['main_file'] = 'prog_no_noisy'

	main_file = options['main_file']
	if main_file[-3:] == '.py':
		main_file = main_file[:-3]
	module  = __import__(main_file)

	input_space     = InputSpace(options["variables"])
	chooser_module  = importlib.import_module('spearmint.choosers.' + options['chooser'])
	chooser         = chooser_module.init(input_space, options)
	db              = MongoDB(database_address=options['database']['address'])
	jobs            = load_jobs(db, experiment_name)
	hypers          = db.load(experiment_name, 'hypers')
	tasks           = parse_tasks_from_jobs(jobs, experiment_name, options, input_space)

	if len(tasks) < 2:
		print 'Not a multi-objective problem!'
		return -1

	if options['language'] != "PYTHON":
		print 'Only python programs supported!'
		return -1

	for task in tasks:
		if tasks[ task ].type != 'objective':
			print 'Not a multi-objective problem!'
			return -1

	def create_fun(task):
		def fun(params, gradient = False):

			if len(params.shape) > 1 and params.shape[ 1 ] > 1:
				params = params.flatten()

			params = input_space.from_unit(np.array([ params ])).flatten()

			return module.main(0, paramify_no_types(input_space.paramify(params)))[ task ]

		return fun

	funs = [ create_fun(task) for task in tasks ]

	moop = MOOP_basis_functions(funs, input_space.num_dims)

#	moop.evolve(1, 8)

	grid = sobol_grid.generate(input_space.num_dims, grid_size = 1000 * input_space.num_dims)

	moop.solve_using_grid(grid)

#	reference = find_reference_point_using_direct(tasks, module, input_space)

#	reference = reference + np.abs(reference) * 0.1

	reference = np.ones(len(tasks)) * 7

	hyper_volume_solution = moop.get_hypervolume(reference.tolist())

	result = moop.compute_pareto_front_and_set()
	front = result['frontier']
	pareto_set = result['pareto_set']

#	os.remove('hypervolume_solution.txt')

	with open('hypervolume_solution.txt', 'a') as f:
		print >> f, "%lf" % (hyper_volume_solution)

#	os.remove('hypervolumes.txt')

	# We iterate through each recommendation made

	i = 0
	more_recommendations = True
	while more_recommendations:

                recommendation = db.load(experiment_name, 'recommendations', {'id' : i + 1})

		if recommendation == None:
			more_recommendations = False
		else:

			solution = input_space.to_unit(input_space.vectorify(recommendation[ 'params' ]))

			if len(solution.shape) == 1:
				solution = solution.reshape((1, len(solution)))
			
			# We compute the objective values associated to this recommendation
	
			values_solution = np.zeros((solution.shape[ 0 ], len(tasks)))
		
			for j in range(values_solution.shape[ 0 ]):
				for k in range(values_solution.shape[ 1 ]):
					values_solution[ j, k ] = funs[ k ](solution[ j : (j + 1), : ])

			moop = MOOP_basis_functions(funs, input_space.num_dims)

			moop.set_population(solution)

			hyper_volume = moop.get_hypervolume(reference.tolist())

			with open('hypervolumes.txt', 'a') as f:
				print >> f, "%lf" % (hyper_volume)

			with open('mean_min_distance_to_frontier.txt', 'a') as f: 
				print >> f, "%lf" % (average_min_distance(values_solution, front))

			with open('mean_min_distance_from_frontier.txt', 'a') as f: 
				print >> f, "%lf" % (average_min_distance(front, values_solution))

			with open('mean_min_distance_to_pareto_set.txt', 'a') as f: 
				print >> f, "%lf" % (average_min_distance(input_space.from_unit(solution), \
				input_space.from_unit(pareto_set)))

			with open('mean_min_distance_from_pareto_set.txt', 'a') as f: 
				print >> f, "%lf" % (average_min_distance(input_space.from_unit(pareto_set), \
				input_space.from_unit(solution)))

			with open('evaluations.txt','a') as f_handle: 
				np.savetxt(f_handle, np.array([recommendation['num_complete_tasks'].values()]), delimiter = ' ', newline = '\n')

		i += 1