Beispiel #1
0
def generate_points(NMbudget, omodel):
    points = []  #Main holder
    new_points = genfromelites()
    random_points = genrandom(NMbudget)
    try:
        points = np.vstack([new_points, random_points, mp.saved_points])
        print('suceeded')
    except:
        points = np.vstack([new_points, random_points])
    indexlist = []
    for point in points:
        index = tuple(mp.map.nichefinder(mp.feature_fun(point)))
        indexlist.append(index)
    points = points[tuple(
        [sorted(range(len(indexlist)), key=indexlist.__getitem__)])]
    point_values = [myacqMINUS(x, omodel) for x in points]
    # Perform twice to get truer acquisition value
    point_values = [myacqMINUS(x, omodel) for x in points]
    ### Now order them by fitness and select the top performing ones.

    pv = np.array(point_values)

    #index = pv.argsort()[::-1]
    #random_points = random_points[index]
    #more_points = [np.array(rand_point) for rand_point in random_points]
    #points.extend(more_points)
    points = points[pv.argsort()[::-1]]
    #mp.saved_points = points[:NMbudget]

    return (points[:NMbudget])
Beispiel #2
0
def myacqKG(x, omodel, observations, net, basefit, fmodel=None):
    value = 0
    modifier = 1
    valid_ranges = mp.domain.valid_ranges
    mymap = copy.deepcopy(mp.map)
    basevalue = np.sum([item[1] for item in net])
    indexes = it.product(*[range(dim) for dim in mp.feature_resolution])
    #classprobs = classprobabilities( fmodel, x )

    if not validatepoint(x):
        x = conformpoint(x)
        modifier = 0  #violation_cost( x )
    ## We calculate the posterior mean of the GP at the current point

    #posterior_mean = np.float(omodel(torch.Tensor(x).reshape(-1,len(valid_ranges))).mean.detach().numpy())
    posterior_mean = np.double(
        omodel(torch.Tensor(x).reshape(
            -1, len(valid_ranges))).mean.detach().numpy())
    new_point = [x, posterior_mean, mp.feature_fun(x)]
    observations.append(new_point)
    KG_model, _, _ = buildpymodel(observations)
    net_gens = [point[0] for point in net]
    for point in net:
        #point[1] = np.float(KG_model(torch.Tensor(point[0]).reshape(-1,len(valid_ranges))).mean.detach().numpy())
        point[1] = np.double(
            KG_model(torch.Tensor(point[0]).reshape(
                -1, len(valid_ranges))).mean.detach().numpy())
    mymap, _ = niche_compete(net, mymap)
    New_fitness = np.nansum(mymap.fitness.flatten())
    value = New_fitness - basefit
    print(value)
    print(x)
    return (-value * modifier)
Beispiel #3
0
def update_gen_map(map, points):
    '''Adds new observations to the map and calculates the 
    percentage of points that were accepted in to the map
    '''
    for point in points:
        index = tuple(mp.map.nichefinder(mp.feature_fun(point)))

    return (map)
Beispiel #4
0
def classprobabilities(models, x):
    '''Returns a list of probabilities of f(x) being in the classes provided
    by calculating the density of the posterior at point x 
    
    INPUT : 
    model               : GPy model object
    x                   : a single point. (Currently 1d)
    classcutofflist     : a list of cutoff points for classes
    ylower              : a minimum value to include in the lowest class

    OUTPUT :
    classcutoffprobs    : a list of probabilities of being in classes.

    EXAMPLE :

    classprobabilities(model, 3 , [0.3, 0.5, 1], 0)
    example return : [0.2,0.7,0.1] 
    '''
    if models == None:
        nicheprobs = np.copy(mp.map.blank_map)
        trueindex = nichefinder(mp.feature_fun(x))

        indexes = it.product(*[range(dim) for dim in mp.feature_resolution])
        for index in indexes:
            nicheprobs[index] = 0
        #if not validatepoint(x):
        #    return(nicheprobs)
        try:
            nicheprobs[tuple(trueindex)] = 1
            return (nicheprobs)
        except:
            return (nicheprobs)
    else:
        # Provide edges of niches from main map
        edges = mp.map.edges
        #Reshape input to correct genome length
        z = np.array([[x]]).reshape(-1, xdims)
        # Provide posterior mean/variance for current x in each feature dimension
        mulist, varlist = [model.predict(z) for model in models]
        #mulist = [ float(mu) for mu in mulist ] #Convert to floats
        #varlist = [ float(var) for var in varlist ] #Convert to floats
        mulist = [np.double(mu) for mu in mulist]  #Convert to floats
        varlist = [np.double(var) for var in varlist]  #Convert to floats

        probability_list = []
        for count, mu in enumerate(mulist):
            distro = scipy.stats.norm(loc=mu, scale=np.sqrt(var[count]))
            cdfvals = distro.cdf(edges[count])
            featdimprobs = [(cdfvals[i + 1] - cdfvals[i])
                            for i in range(len(cdfvals))]

        nicheprobs = np.copy(mp.map.blank_map)
        indexes = it.product(*[range(dim) for dim in mp.feature_resolution])
        for index in indexes:
            probs = [nicheprobs[index[i]] for i in range(len(index))]
            nicheprobs[index] = np.prod(probs)

        return (nicheprobs)
Beispiel #5
0
def generate_net(n, omodel):
    valid_ranges = mp.constraints
    net_points = sobol_sample(n, valid_ranges)
    net_points = np.array(net_points).T
    #net = [ [point , np.float(omodel(torch.Tensor(point).reshape(-1,len(valid_ranges))).mean.detach().numpy()), mp.feature_fun(point)]
    net = [[
        point,
        np.double(
            omodel(torch.Tensor(point).reshape(
                -1, len(valid_ranges))).mean.detach().numpy()),
        mp.feature_fun(point)
    ] for point in net_points]
    return (net)
Beispiel #6
0
#     sobolmap,fitness = getpredmap(initial_samples)
#     sobol_fit_list.append(fitness)
    
     
#     pickle.dump(sobol_fit_list, filehandler)


####
### Random Samples

def random_point(n, dims):
    x=[]
    for i in range(n):
        x.append(np.random.uniform(0,1,dims))
    return(x)

random_fit_list = []

for i in range(1):
    ss.initseed(i)
    x = random_point(10,10)
    rand_points = [[x[i],mp.fitness_fun(x[i]),mp.feature_fun(x[i])] for i in range(len(x))]
    for i in range(len(rand_points)):
        rand_points[i][1] = np.nansum(rand_points[i][1])
    filehandler = open('Results/Init-Test/Random/fitness.csv', 'wb')
    mp.map , edges = create_map(mp.feature_resolution, mp.domain )
    initialisemap(rand_points)
    randommap,fitness = getpredmap(rand_points)
    random_fit_list.append(fitness)

    pickle.dump(sobol_fit_list, filehandler)
Beispiel #7
0
def additional_sampling(n_add_samples, sobol_set, sobol_point, map):
    '''additional_sampling -

    Samples are produced using a Sobol sequence that evenly selects elites from 
    the current acquisition map. Those elites are then evaluated and added to the
    observation list (which will improve the surrogate model and also improve the
    prediction map) 
    
    ##TODO If samples are invalid (invalid geometry, or did
    not converge in simulator), the next sample in the Sobol sequence is
    chosen. Lather, rinse, repeat until all initial samples are clean.
    
    Example: new_points = additional_sampling(100 , my_sobol, current_point , map)
    
    Inputs (arguments):
       n_add_samples    - [ Integer ]   - number of samples to produce
       sobol_set        - [ List ]      - A set of random points in feature space
       sobol_point      - [ Integer ]   - An index for the sobol_set 
       map              - [ Map Class ] - The current acquisition map.

    Inputs (from config file):
        mp.domain - domain Class object
          .valid_ranges     - [ List ]  - To check validity of samples

    
    Outputs:
       valid_points - [ n_initial_samples * [ [ x ] , [ y ] , [ f ] ] ] ] 
       sample_end   - [ Integer ]      - An update to sobol_point (an index)  

    Code Author: Paul Kent 
    Warwick University
    email: [email protected]
    Oct 2020; Last revision: 16-Oct-2020 
    '''
    valid_ranges = mp.domain.valid_ranges  # Valid Search domain ranges
    new_value = []
    new_sample = []
    n_missing = n_add_samples
    new_points = []

    # randomly sample from map #########################################
    sample_end = sobol_point + 10 * n_add_samples
    random_genomes = [
        sobol_set[i][sobol_point:sample_end] for i in range(len(sobol_set))
    ]
    random_genomes = np.array(random_genomes).T
    #random_genomes = np.reshape(random_genomes , (-1 , len( valid_ranges ) ) )

    # identify which niche each point belongs to (in feature space)
    niche_index = [
        nichefinder(mp.feature_fun(random_genomes[i]), map)
        for i in range(len(random_genomes))
    ]
    # Remove duplicates, keep track of the random genomes index (n) and the feature values (i)
    niche_index = [
        i for n, i in enumerate(niche_index) if i not in niche_index[:n]
    ]

    while n_missing > 0:

        niche_id = niche_index[-1]
        if not np.isnan(map.fitness[tuple(niche_id)]).any():
            new_points.append(map.genomes[tuple(niche_id)])

            n_missing -= 1

        niche_index.pop()

        if len(niche_index) <= 0:
            print('Not enough unique samples to make all new evaluations')
            print('This can happen on the first few runs')
            print('or could indicate a problem with your functions')
            n_missing = 0

    if n_missing == 0:
        print(
            f'{Fore.GREEN}Success: {Style.RESET_ALL} New points to evaluate chosen from acquisition map'
        )
    #Reshaping
    # print('value check', validate(new_points[0].T))

    #new_points = [new_points[i].T for i in range(len(new_points))]
    # print(keep_valid(   new_points.T,
    #                     new_points ,
    #                     n_add_samples,
    #                     validate ))
    # new_points = np.array(new_points).T
    #validating and sample from fitness/feature functions
    valid_points = []

    valid_points, n_missing = keep_valid(valid_points,
                                         np.array(new_points).T, n_add_samples,
                                         validate)

    if len(valid_points) == n_add_samples:
        print(f'{Fore.GREEN}Success: {Style.RESET_ALL} New points evaluated')
    elif len(valid_points) == 0:
        print(f'{Fore.RED}FAILURE: {Style.RESET_ALL} No new Points selected ')
    else:
        print(
            f'{Fore.YELLOW}Warning: {Style.RESET_ALL} Not all new points evaluated correctly '
        )
    return (valid_points, sample_end, len(valid_points))
Beispiel #8
0
plot_fit_ls()

truedata = np.genfromtxt('Results/AdamParsec/data/fit_true_500.csv',
                         delimiter=',')
preddata = np.genfromtxt(
    'Results/AdamRastrigin/10d/data_10se/data/fit_pred_500.csv', delimiter=',')
true_points = np.genfromtxt('Results/AdamRastrigin/train_input.csv',
                            delimiter=',')

true_points = np.genfromtxt('Results/AdamResults/genomes_500.csv',
                            delimiter=',')

#true_points = [point[0] for point in points]
fitness = list(map(mp.fitness_fun, true_points))
behaviour = mp.feature_fun(true_points)
points = [(true_points[i], fitness[i], behaviour[i])
          for i in range(len(fitness))]
points
## Set Hypers
variance = 0.01
lengthscale = 0.5
noise_var = 0
GP_params = (variance, lengthscale, noise_var)
hyper = hypers[-1]
fit_gp, hypers = buildpymodel(points, hypers, retrain=True)
###
mp.ME_params = (0.0, 2**6, 0.1, 2**9)
predmap = create_prediction_map(fit_gp, points)

#heatmap(np.array(fitness).reshape(25,25), 'none')
Beispiel #9
0
        def acquisition_fun(points, give_var=False):
            #Check if it's a single observation
            xdims = len(mp.domain.valid_ranges)
            index = tuple(mp.map.nichefinder(mp.feature_fun(points)))

            if np.shape(points) == (xdims, ):
                index = pred_map.nichefinder(mp.feature_fun(points))
                if index not in mp.modelindex:
                    mp.modellist.append(
                        mp.map.adapt_model(omodel, index[0], index[1],
                                           globalmean))
                    mp.modelindex.append(index)
                niche_model = mp.modellist[mp.modelindex.index(index)]
                if len(mp.modellist) > 10:
                    del mp.modellist[0]
                    del mp.modelindex[0]
                    libc = ctypes.CDLL("libc.so.6")
                    libc.malloc_trim(0)

                point = torch.tensor(points,
                                     dtype=torch.double).reshape(-1, xdims)

                niche_model.eval()
                posterior = niche_model.posterior(point)
                fitness = posterior.mean
                posterior_variance = posterior.variance.clamp_min(1e-9).sqrt()

                return (np.double(fitness))

            else:
                #Process multiple points
                n = len(points)
                indexes = [
                    pred_map.nichefinder(mp.feature_fun(point))
                    for point in points
                ]
                points = np.array([point for point in points])
                points = torch.tensor(points,
                                      dtype=torch.double).reshape(-1, xdims)
                fitlist = []
                varlist = []
                for i in range(n):

                    index = tuple(indexes[i])
                    if index not in mp.modelindex:
                        mp.modellist.append(
                            mp.map.adapt_model(omodel, index[0], index[1],
                                               globalmean))
                        mp.modelindex.append(index)
                    niche_model = mp.modellist[mp.modelindex.index(index)]
                    if len(mp.modellist) > 10:
                        del mp.modellist[0]
                        del mp.modelindex[0]
                        libc = ctypes.CDLL("libc.so.6")
                        libc.malloc_trim(0)

                    fun = niche_model

                    niche_model.eval()
                    posterior = niche_model.posterior(points[i].reshape(
                        -1, xdims))
                    fitness = posterior.mean
                    pv = posterior.variance.clamp_min(1e-9).sqrt()

                    fitlist.append(fitness)
                    varlist.append(pv)
                # evaluation = [fun( points[i].reshape(-1,xdims) ) for i in range(n)]
                # fitness = [ float( e.mean *mp.std + mp.smean  )  for e in evaluation]
                # posterior_variance = [ float(e.variance )  for e in evaluation]
                # fitlist = [ float( f ) for f in fitness ]
                # varlist = [ float( v ) for v in posterior_variance ]
                if give_var:
                    return (fitlist, varlist)
                else:
                    return (np.array(fitlist))
Beispiel #10
0
    filehandler = open(
        'Results/Random_sampling' + '/' + str(seed) + '10x10BDC.csv', 'wb')
    pickle.dump(binary_data, filehandler)


seed = 2160
valueranges = [20, 50, 100, 250, 500, 750, 1000]
for i in range(20):
    values = []
    np.random.seed(seed)
    seed += 1
    initial_samples = np.array(lhsmdu.sample(1000, 10))
    print(str(valueranges[0]) + 'samples')
    points = initial_samples[:valueranges[0]]
    fitness = mp.fitness_fun(points)
    features = mp.feature_fun(points)
    observations = [[points[i], fitness[i], features[i]]
                    for i in range(valueranges[0])]
    mp.map, edges = create_map(mp.feature_resolution, mp.domain)
    initialisemap(observations)
    print('Score = ' + str(np.nansum(mp.map.fitness.flatten())))
    values.append(np.nansum(mp.map.fitness.flatten()))
    for count, val in enumerate(valueranges[1:]):
        new_samples = initial_samples[valueranges[count]:valueranges[count +
                                                                     1]]
        fitness = mp.fitness_fun(new_samples)
        features = mp.feature_fun(new_samples)
        new_observations = [[new_samples[i], fitness[i], features[i]]
                            for i in range(len(new_samples))]
        updatemapSAIL(new_observations)
        observations.append(new_observations)