def example_color_img():
    '''Produces a three channel color image of a Green fractal'''
    num_frames = 64
    theta_range = 2 * math.pi / 512
    d_theta = theta_range / num_frames

    params_r = np.array([[1.0, 1.0, 1.0]]).T
    params_g = np.array([[1.0, 1.0, 1.0]]).T
    params_b = np.array([[1.0, 1.0, 1.0]]).T

    xform_r = utils.rotate_xform(d_theta, 0, 0)
    xform_g = utils.rotate_xform(0, d_theta, 0)
    xform_b = utils.rotate_xform(0, 0, d_theta)

    generator_r = generate.Generator(params_r, xform_r, num_frames)
    generator_g = generate.Generator(params_g, xform_g, num_frames)
    generator_b = generate.Generator(params_b, xform_b, num_frames)

    image.generate_color_image(generator_r, generator_g, generator_b)
Exemple #2
0
def get_pop_similarity(pop):
    generator = generate.Generator(num_params=NUM_PARAMS)
    pop_points = []
    for vec in pop:
        points = generator.generate_return_pts(vec)
        pop_points.append(points)
    hsums = []
    for i in range(len(pop_points)):
        for j in range(i, len(pop_points)):
            hsums.append(directed_hausdorff(pop_points[i], pop_points[j])[0])
    return np.mean(hsums)
def example_img():
    '''Produces a image of a Green fractal'''
    num_frames = 128
    theta_range = 2 * math.pi / 1
    d_theta = theta_range / num_frames

    params = np.array([[0.0, 1.0, 1.0]]).T

    xform = utils.rotate_xform(d_theta, d_theta, d_theta)

    generate.Generator(params, xform, num_frames)
def visualize(lam_array, num_params, trials, load_dir):
    generator = generate.Generator(num_params)
    data_key = ['DIF', 'DIT']
    #data_key = ['DIT']
    sparse_key = ['SF', 'ST']
    #sparse_key = ['SF']
    exp_names = []
    sparse_flags = []

    for s in sparse_key:
        if s == 'SF':
            pass
            for d in data_key:
                sparse_flags.append(False)
                exp_names.append(d + '_' + s + '_')
        else:
            d = 'DIT'
            for lam in lam_array:
                sparse_flags.append(True)
                exp_names.append(d + '_' + s + '_' + str(lam) + '_')

    exp_latents = []
    for i in range(len(exp_names)):
        exp_latents.append(
            generator.get_latent(exp_names[i], sparse_flags[i], trials,
                                 load_dir))

    print(exp_names)
    for i in range(len(exp_latents)):
        #Plot performances of all objects
        pops = exp_latents[i]
        name = exp_names[i]
        print(name)
        for j in range(len(pops)):
            pop = pops[j]
            #pop = np.reshape(pops,(-1,num_params))
            rxs = []
            rys = []
            performances = []
            for latent in pop:
                pts = 10 * generator.generate_return_pts(latent)
                radii = ee.getMinVolEllipse(P=pts)
                rx = radii[0]
                ry = radii[1]
                rxs.append(rx)
                rys.append(ry)
                performances.append(ee.performance_from_radii(rx, ry))
            ed.plot_data(rxs, rys, performances)
def visualize_pts(lam_array, num_params, trials, load_dir):
    generator = generate.Generator(num_params)
    data_key = ['DIF', 'DIT']
    sparse_key = ['SF', 'ST']
    exp_names = []
    sparse_flags = []

    for s in sparse_key:
        if s == 'SF':
            pass
            for d in data_key:
                sparse_flags.append(False)
                exp_names.append(d + '_' + s + '_')
        else:
            d = 'DIT'
            for lam in lam_array:
                sparse_flags.append(True)
                exp_names.append(d + '_' + s + '_' + str(lam) + '_')
    exp_latents = []
    for i in range(len(exp_names)):
        exp_latents.append(
            generator.get_latent(exp_names[i], sparse_flags[i], trials,
                                 load_dir))

    print(exp_names)
    #for pops in exp_latents[1]:
    #Plot performances of all objects
    pops = exp_latents
    for pop in pops:
        pop = exp_latents[1]
        pop = np.reshape(pops, (-1, num_params))
        pop = pop.tolist()
        rxs = []
        rys = []
        performances = []
        random.shuffle(pop)
        for latent in pop[0:3]:
            latent = np.asarray(latent)
            pts = 10 * generator.generate_return_pts(latent)
            xpts = pts[0]
            ypts = pts[1]
            zpts = pts[2]
            ed.plot_ellipsoid_points(xpts, ypts, zpts)
Exemple #6
0
def get_trainset_similarity(pop):
    generator = generate.Generator(num_params=NUM_PARAMS)
    #Get GA population point set
    pop_points = []
    for vec in pop:
        points = generator.generate_return_pts(vec)
        pop_points.append(points)

    #Get Training Set point set
    train_vecs, _ = generator.load_training_objects(300, SEED)
    train_points = []
    for vec in train_vecs:
        points = generator.generate_return_pts(vec)
        train_points.append(points)

    hsums = []
    for ppoints in pop_points:
        for tpoints in train_points:
            hsums.append(directed_hausdorff(ppoints, tpoints)[0])
    return np.mean(hsums)
def visualize_training(num_samples, num_params=1024):
    generator = generate.Generator(num_params)
    for i in range(5):
        latents, names = generator.load_training_objects(num_samples, i)
        #print('Loading ' + name)
        rxs = []
        rys = []
        performances = []
        for latent in latents:
            pts = 10 * generator.generate_return_pts(
                latent)  #unnormalize from AtlasNet
            #Performance
            radii = ee.getMinVolEllipse(P=pts)
            rx = radii[0]
            ry = radii[1]
            performance = ee.performance_from_radii(rx, ry)
            rxs.append(rx)
            rys.append(ry)
            performances.append(performance)
        ed.plot_data(rxs, rys, performances)
Exemple #8
0
 def setUp(self):
     self.gener = generate.Generator("folder7")
     self.gener.generate()
     self.cl = clean.MyClean()
Exemple #9
0
import numpy as np
from PIL import Image, ImageDraw
import random, math
import generate as gen
import world
w = 5
h = 5


def main_loop(layers, width, height, generator):
    for layer in range(layers):
        generator.one_pass(100)
    generator.save_image()


if __name__ == "__main__":
    gen = gen.Generator(w, h)
    main_loop(10, w, h, gen)
    wrld = world.worldify(Image.open("output.png", "r"))
    image = wrld.clean(w, h)
    gen.set_image(image)
    return radii


def performance_from_radii(rx, ry):
    performances = [[5, 4, 3, 4, 5], [4, 3, 2, 3, 4], [3, 2, 1, 2, 3],
                    [4, 3, 2, 3, 4], [5, 4, 3, 4, 5]]
    cluster_centers = [1, 3, 5, 7, 9]
    x_cluster = cluster_centers.index(
        min(cluster_centers, key=lambda x: abs(x - rx)))
    y_cluster = cluster_centers.index(
        min(cluster_centers, key=lambda y: abs(y - ry)))
    performance = performances[x_cluster][y_cluster]
    return performance


if __name__ == '__main__':
    import generate
    generator = generate.Generator(num_params=1024)
    latent_vectors, _ = generator.load_training_objects(300)
    for latent in latent_vectors:
        points = 10 * generator.generate_return_pts(latent)
        radii = getMinVolEllipse(P=points)
        performance = performance_from_radii(radii[0], radii[1])
        print(performance)

    #load_file = './AtlasNet/data/ellipsoid_points/ellipsoid_2439.pkl'
    #import pickle
    #with open(load_file,'rb') as f:
    #    points = pickle.load(f)
    #radii = getMinVolEllipse(P=points)
    #performance = performance_from_radii(radii[0],radii[1])
Exemple #11
0
import pickle
import torch
import generate as g

with open("character_data.pickle", "rb") as f:
    (chars, indx_to_chars, chars_to_indx, n_chars) = pickle.load(f)

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
input_size = n_chars
hidden_size = 50
dropout = 0.25
n_layers = 2
path = "model.mod"
gen = g.Generator(input_size, hidden_size, dropout, device, n_layers, path)

print(gen.generate(indx_to_chars, chars_to_indx, -1))
"""
Test routines for generate.py

author: Eduardo Villasenor
created-on: 028/08/17
"""

import generate

generator = generate.Generator()

# Initial parameters
ps = 0.0
pm = 0.009
pg = 0.009
pn = 0.0
system_size = 4
parity = "Z"
function = "LOCAL"

# Calculate chi matrix
chi = generator.ask_model(ps=ps, pm=pm, pg=pg, pn=pn, stab_size=system_size,
                          parity=parity, protocol=function)
def run_evo(data_init=True,
            sparse=False,
            lam=1e-5,
            num_params=1024,
            load_file=None,
            maxiter=5,
            out_name=None,
            bound=1,
            pop_size=120,
            mutation_rate=0.05,
            recomb_rate=0.9,
            norm=0):
    pickle_file = './pickle/' + out_name + '.pkl'
    trial = int(out_name[-1])
    if os.path.exists(pickle_file) == False:
        Generator = generate.Generator(num_params)
        start = 0
        learning_curve = []
        if sparse:
            adj_learning_curve = []
        max_scores = []
        max_name = 'no score'
        population = []
        init_names = []
        l0_norms = []
        if data_init:
            population, init_names = Generator.load_training_objects(
                pop_size, trial
            )  #we seed the initialization per trial for fair comparisions
        else:
            for i in range(0, pop_size):
                indv = []
                for j in range(num_params):
                    indv.append(np.random.normal())
                population.append(indv)
    else:
        print('Loading from pickle file')
        with open(pickle_file, 'rb') as f:  # Python 3: open(..., 'rb')
            if sparse:
                population, learning_curve, adj_learning_curve, max_scores, max_name, l0_norms, init_pop, Generator = pickle.load(
                    f)
            else:
                population, learning_curve, max_scores, max_name, l0_norms, init_pop, Generator = pickle.load(
                    f)
        start = len(learning_curve)
        print("Start: {}".format(start))

    tictoc = TicToc()
    previous_generation = []
    num_cores = mp.cpu_count()
    if (not max_scores):
        max_score = 1e6
    else:
        max_score = max_scores[-1]

    for i in range(start, maxiter):
        tictoc.tic()
        worker_args = []
        res = []
        current_generation = []
        for j in range(0, pop_size):

            #--- MUTATION ---------------------+
            # select three random vector index positions [0, popsize), not including current vector (j)
            candidates = list(range(0, pop_size))
            candidates.remove(j)
            random_index = random.sample(candidates, 3)

            x_1 = population[random_index[0]]
            x_2 = population[random_index[1]]
            x_3 = population[random_index[2]]
            x_t = population[j]  # target individual

            # subtract x3 from x2, and create a new vector (x_diff)
            x_diff = [x_2_i - x_3_i for x_2_i, x_3_i in zip(x_2, x_3)]

            # multiply x_diff by the mutation factor (F) and add to x_1
            v_donor = [
                x_1_i + mutation_rate * x_diff_i
                for x_1_i, x_diff_i in zip(x_1, x_diff)
            ]
            v_donor = evalu.ensure_bounds(v_donor, (-bound, bound))

            #--- RECOMBINATION ----------------+
            v_trial = []
            for k in range(len(x_t)):
                crossover = random.random()
                if crossover <= recomb_rate:
                    v_trial.append(v_donor[k])
                else:
                    v_trial.append(x_t[k])
            name = "G" + str(i) + "_i" + str(j)
            points = 10 * Generator.generate_return_pts(
                v_trial
            )  #multiply by 10 to undo AtlasNet's normalization of PCs
            worker_args.append((name, points, v_trial, sparse, lam, norm))

        pool = mp.Pool(num_cores - 1)
        res = pool.starmap(evalu._EllipsoidEvalFunc,
                           worker_args)  #or _EvaluationFunction for boats
        current_generation = []
        for r in res:
            if r is not None:
                current_generation.append(r)
        pool.close()
        pool.join()

        # sort the dictionary by "output" and take the t
        current_generation.extend(previous_generation)
        current_generation = sorted(current_generation,
                                    key=lambda k: k["output"])
        curr_scores = [k["output"] for k in current_generation]
        curr_names = [k["name"] for k in current_generation]
        curr_l0 = [k["l0"] for k in current_generation]
        if (sparse):
            adj_scores = [k["adj_score"] for k in current_generation]
            adj_learning_curve.append(np.mean(adj_scores))
        l0_norms.append(np.mean(curr_l0))
        previous_generation = current_generation[:pop_size]
        population = [genome["input"] for genome in previous_generation]
        if i == 0:
            init_pop = population

        #adjust the scores for l_0 norm penalty for learning curve plot for direct comparisons
        # if sparse:
        # 	adj_scores = []
        # 	for j in range(len(curr_scores)):
        # 		adj_scores.append(curr_scores[j] - lam*curr_l0[j]/num_params)

        learning_curve.append(np.mean(curr_scores))
        if (curr_scores[0] < max_score):
            max_name = curr_names[0]
            max_score = curr_scores[0]
            max_scores.append(max_score)

        #dump pickle file
        with open(pickle_file, 'wb') as f:
            if sparse:
                pickle.dump([
                    population, learning_curve, adj_learning_curve, max_scores,
                    max_name, l0_norms, init_pop, Generator
                ], f)
            else:
                pickle.dump([
                    population, learning_curve, max_scores, max_name, l0_norms,
                    init_pop, Generator
                ], f)

        #print stuff
        print('GENERATION: {}'.format(i))
        print('Average Score This Generation: {}'.format(np.mean(curr_scores)))
        if sparse:
            print('Average Adjusted Score This Generation: {}'.format(
                np.mean(adj_scores)))
        print('Average l0-Norm This Generation: {}'.format(np.mean(curr_l0)))