コード例 #1
0
def output_fields(filename, id_filename, lookup_filename, magnets_filename, maglist):
    f2 = open(id_filename, 'r')
    info = json.load(f2)
    f2.close()
    f1 = h5py.File(lookup_filename, 'r')
    lookup = {}
    for beam in info['beams']:
        lookup[beam['name']] = f1[beam['name']][...]
    f1.close()

    mags = magnets.Magnets()
    mags.load(magnets_filename)
    ref_mags=generate_reference_magnets(mags)

    f = h5py.File(filename, 'w')
    
    per_beam_field = generate_per_beam_b_field(info, maglist, mags, lookup)
    total_id_field = generate_id_field(info, maglist, mags, lookup)
    for name in per_beam_field.keys():
        f.create_dataset("%s_per_beam" % (name), data=per_beam_field[name])
    f.create_dataset('id_Bfield', data=total_id_field)
    trajectory_information=mt.calculate_phase_error(info, total_id_field)
    f.create_dataset('id_phase_error', data = trajectory_information[0])
    f.create_dataset('id_trajectory', data = trajectory_information[1])
    
    per_beam_field = generate_per_beam_b_field(info, maglist, ref_mags, lookup)
    total_id_field = generate_id_field(info, maglist, ref_mags, lookup)
    for name in per_beam_field.keys():
        f.create_dataset("%s_per_beam_perfect" % (name), data=per_beam_field[name])
    f.create_dataset('id_Bfield_perfect', data=total_id_field)
    trajectory_information=mt.calculate_phase_error(info, total_id_field)
    f.create_dataset('id_phase_error_perfect', data = trajectory_information[0])
    f.create_dataset('id_trajectory_perfect', data = trajectory_information[1])

    f.close()
コード例 #2
0
def generate_reference_magnets(mags):
    ref_mags=magnets.Magnets()
    for magtype in mags.magnet_sets.keys():
        mag_dir = mags.magnet_sets[magtype].values()[0].argmax()
        unit = np.zeros(3)
        unit[mag_dir] = mags.mean_field[magtype]
        #ref_mags.add_perfect_magnet_set(magtype, len(mags.magnet_sets[magtype]) , unit, mags.magnet_flip[magtype])
        ref_mags.add_perfect_magnet_set_duplicate(magtype, mags.magnet_sets[magtype] , unit, mags.magnet_flip[magtype])
    return ref_mags
コード例 #3
0
def generate_reference_magnets(mags):
    ref_mags = magnets.Magnets()
    for magtype in mags.magnet_sets.keys():
        mag_dir = mags.magnet_sets[magtype].values()[0].argmax()
        unit = np.zeros(3)
        unit[mag_dir] = mags.mean_field[magtype]
        #ref_mags.add_perfect_magnet_set(magtype, len(mags.magnet_sets[magtype]) , unit, mags.magnet_flip[magtype])
        ref_mags.add_perfect_magnet_set_duplicate(magtype,
                                                  mags.magnet_sets[magtype],
                                                  unit,
                                                  mags.magnet_flip[magtype])
        #logging.debug("ref_mags shape %s"%(str(ref_mags.shape))) magnets object has no attribute shape
    return ref_mags
コード例 #4
0
def calculate_fitness(id_filename, lookup_filename, magnets_filename, maglist):
    # TODO this will be slow, but should be optimizable with lookups
    lookup = h5py.File(lookup_filename, 'r')
    f2 = open(id_filename, 'r')
    info = json.load(f2)
    f2.close()

    mags = magnets.Magnets()
    mags.load(magnets_filename)

    ref_mags = generate_reference_magnets(mags)
    ref_maglist = magnets.MagLists(ref_mags)
    ref_total_id_field = generate_id_field(info, ref_maglist, ref_mags, lookup)

    result = calculate_cached_fitness(info, lookup, magnets, maglist, ref_total_id_field)
    lookup.close()

    return result
コード例 #5
0
def process(options, args):

    if options.seed:
        random.seed(int(options.seed_value))

    if options.singlethreaded:
        rank = 0
        size = 1
    else:
        rank = MPI.COMM_WORLD.rank  # The process ID (integer 0-3 for 4-process run)
        size = MPI.COMM_WORLD.size  # The number of processes in the job.

    # get the hostname
    if options.singlethreaded:
        ip = 'localhost'
    else:
        ip = socket.gethostbyname(socket.gethostname())

    logging.debug("Process %d ip address is : %s" % (rank, ip))

    f2 = open(options.id_filename, 'r')
    info = json.load(f2)
    f2.close()

    logging.debug("Loading Lookup")
    f1 = h5py.File(options.lookup_filename, 'r')
    lookup = {}
    for beam in info['beams']:
        logging.debug("Loading beam %s" % (beam['name']))
        lookup[beam['name']] = f1[beam['name']][...]
    f1.close()

    barrier(options.singlethreaded)

    logging.debug("Loading Initial Bfield")
    f1 = h5py.File(options.bfield_filename, 'r')
    real_bfield = f1['id_Bfield'][...]
    f1.close()
    logging.debug(real_bfield)

    barrier(options.singlethreaded)

    logging.debug("Loading magnets")
    mags = magnets.Magnets()
    mags.load(options.magnets_filename)

    logging.debug('mpi runenr calling fg.generate_reference_magnets()')
    ref_mags = fg.generate_reference_magnets(mags)
    logging.debug('mpi runenr calling MagLists()')
    ref_maglist = magnets.MagLists(ref_mags)
    logging.debug('after ref_maglist')
    ref_total_id_field = fg.generate_id_field(info, ref_maglist, ref_mags,
                                              lookup)
    pherr, ref_trajectories = mt.calculate_phase_error(info,
                                                       ref_total_id_field)

    barrier(options.singlethreaded)

    #epoch_path = os.path.join(args[0], 'epoch')
    #next_epoch_path = os.path.join(args[0], 'nextepoch')
    # start by creating the directory to put the initial population in

    population = []
    estar = options.e

    # Load the initial genome
    initialgenome = ID_BCell()
    initialgenome.load(options.genome_filename)

    referencegenome = ID_BCell()
    referencegenome.load(options.genome_filename)

    # make the initial population
    for i in range(options.setup):
        # create a fresh maglist
        newgenome = ID_Shim_BCell()
        newgenome.create(info, lookup, mags, initialgenome.genome,
                         ref_trajectories, options.number_of_changes,
                         real_bfield)
        population.append(newgenome)

    # gather the population
    trans = []
    for i in range(size):
        trans.append(population)

    allpop = alltoall(options.singlethreaded, trans)

    barrier(options.singlethreaded)

    newpop = []
    for pop in allpop:
        newpop += pop

    # Need to deal with replicas and old genomes
    popdict = {}
    for genome in newpop:
        fitness_key = "%1.8E" % (genome.fitness)
        if fitness_key in popdict.keys():
            if popdict[fitness_key].age < genome.age:
                popdict[fitness_key] = genome
        else:
            popdict[fitness_key] = genome

    newpop = []
    for genome in popdict.values():
        if genome.age < options.max_age:
            newpop.append(genome)

    newpop.sort(key=lambda x: x.fitness)

    newpop = newpop[options.setup * rank:options.setup * (rank + 1)]

    for genome in newpop:
        logging.debug("genome fitness: %1.8E   Age : %2i   Mutations : %4i" %
                      (genome.fitness, genome.age, genome.mutations))

    #Checkpoint best solution
    if rank == 0:
        logging.debug("Best fitness so far is %f" % (newpop[0].fitness))
        newpop[0].save(args[0])

    # now run the processing
    for i in range(options.iterations):

        barrier(options.singlethreaded)
        logging.debug("Starting itteration %i" % (i))

        nextpop = []

        for genome in newpop:

            # now we have to create the offspring
            # TODO this is for the moment
            logging.debug("Generating children for %s" % (genome.uid))
            number_of_children = options.setup
            number_of_mutations = mutations(options.c, estar, genome.fitness,
                                            options.scale)
            children = genome.generate_children(number_of_children,
                                                number_of_mutations,
                                                info,
                                                lookup,
                                                mags,
                                                ref_trajectories,
                                                real_bfield=real_bfield)

            # now save the children into the new file
            for child in children:
                nextpop.append(child)

            # and save the original
            nextpop.append(genome)

        # gather the population
        trans = []
        for i in range(size):
            trans.append(nextpop)

        allpop = alltoall(options.singlethreaded, trans)

        newpop = []
        for pop in allpop:
            newpop += pop

        popdict = {}
        for genome in newpop:
            fitness_key = "%1.8E" % (genome.fitness)
            if fitness_key in popdict.keys():
                if popdict[fitness_key].age < genome.age:
                    popdict[fitness_key] = genome
            else:
                popdict[fitness_key] = genome

        newpop = []
        for genome in popdict.values():
            if genome.age < options.max_age:
                newpop.append(genome)

        newpop.sort(key=lambda x: x.fitness)

        estar = newpop[0].fitness * 0.99
        logging.debug("new estar is %f" % (estar))

        newpop = newpop[options.setup * rank:options.setup * (rank + 1)]

        #Checkpoint best solution
        if rank == 0:
            initialgenome.genome.mutate_from_list(newpop[0].genome)
            initialgenome.fitness = newpop[0].fitness
            initialgenome.uid = "A" + newpop[0].uid
            initialgenome.save(args[0])
            saveh5(args[0], initialgenome, referencegenome, info, mags,
                   real_bfield, lookup)
            # After the save reload the original data
            initialgenome.load(options.genome_filename)
            newpop[0].save(args[0])

        for genome in newpop:
            logging.debug(
                "genome fitness: %1.8E   Age : %2i   Mutations : %4i" %
                (genome.fitness, genome.age, genome.mutations))

        barrier(options.singlethreaded)

    barrier(options.singlethreaded)

    # gather the population
    trans = []
    for i in range(size):
        trans.append(nextpop)

    allpop = alltoall(options.singlethreaded, trans)

    newpop = []
    for pop in allpop:
        newpop += pop

    newpop.sort(key=lambda x: x.fitness)

    newpop = newpop[options.setup * rank:options.setup * (rank + 1)]

    #Checkpoint best solution
    if rank == 0:
        initialgenome.genome.mutate_from_list(newpop[0].genome)
        initialgenome.age_bcell()
        initialgenome.save(args[0])
        newpop[0].save(args[0])
コード例 #6
0
    parser = optparse.OptionParser(usage=usage)
    (options, args) = parser.parse_args()

    #f2 = open('/home/gdy32713/DAWN_stable/optid/Opt-ID/IDSort/src/v2/2015test.json', 'r')
    f2 = open(args[0], 'r')
    info = json.load(f2)
    f2.close()

    #f1 = h5py.File('/home/gdy32713/DAWN_stable/optid/Opt-ID/IDSort/src/v2/2015test.h5', 'r')
    f1 = h5py.File(args[1], 'r')
    lookup = {}
    for beam in info['beams']:
        lookup[beam['name']] = f1[beam['name']][...]
    f1.close()

    mags = magnets.Magnets()
    #mags.load('/home/gdy32713/DAWN_stable/optid/Opt-ID/IDSort/src/v2/magnets.mag')
    mags.load(args[2])

    ref_mags = generate_reference_magnets(mags)
    ref_maglist = magnets.MagLists(ref_mags)
    ref_total_id_field = generate_id_field(info, ref_maglist, ref_mags, lookup)
    ref_pherr, ref_trajectories = mt.calculate_phase_error(
        info, ref_total_id_field)

    maglist = magnets.MagLists(mags)
    maglist.shuffle_all()
    original_bfield, maglist_fitness = calculate_cached_trajectory_fitness(
        info, lookup, mags, maglist, ref_trajectories)

    mag_array = generate_per_magnet_array(info, maglist, mags)
コード例 #7
0
def process(options, args):

    if options.seed:
        random.seed(int(options.seed_value))

    if options.singlethreaded:
        rank = 0
        size = 1
    else:
        rank = MPI.COMM_WORLD.rank  # The process ID (integer 0-3 for 4-process run)
        size = MPI.COMM_WORLD.size  # The number of processes in the job.

    # get the hostname
    if options.singlethreaded:
        ip = 'localhost'
    else:
        ip = socket.gethostbyname(socket.gethostname())

    logging.debug("Process %d ip address is : %s" % (rank, ip))

    f2 = open(options.id_filename, 'r')
    info = json.load(f2)
    f2.close()

    logging.debug("Loading Lookup")
    f1 = h5py.File(options.lookup_filename, 'r')
    lookup = {}
    for beam in info['beams']:
        logging.debug("Loading beam %s" % (beam['name']))
        lookup[beam['name']] = f1[beam['name']][...]
    f1.close()

    barrier(options.singlethreaded)

    logging.debug("Loading magnets")
    mags = magnets.Magnets()
    mags.load(options.magnets_filename)

    ref_mags = fg.generate_reference_magnets(mags)
    ref_maglist = magnets.MagLists(ref_mags)
    ref_total_id_field = fg.generate_id_field(info, ref_maglist, ref_mags,
                                              lookup)
    #logging.debug("before phase calculate error call")
    #logging.debug(ref_total_id_field.shape())
    pherr, ref_trajectories = mt.calculate_phase_error(info,
                                                       ref_total_id_field)

    barrier(options.singlethreaded)

    #epoch_path = os.path.join(args[0], 'epoch')
    #next_epoch_path = os.path.join(args[0], 'nextepoch')
    # start by creating the directory to put the initial population in

    population = []
    estar = options.e

    if options.restart and (rank == 0):
        filenames = os.listdir(args[0])
        # sort the genome filenames to ensure that when given the same set of
        # files in a directory, population[0] is the same across different
        # orderings of the listed directory contents: this is to fix the test
        # MpiRunnerTest.test_process_initial_population() in mpi_runner_test.py
        # when run on travis
        filenames.sort()
        for filename in filenames:
            fullpath = os.path.join(args[0], filename)
            try:
                logging.debug("Trying to load %s" % (fullpath))
                genome = ID_BCell()
                genome.load(fullpath)
                population.append(genome)
                logging.debug("Loaded %s" % (fullpath))
            except:
                logging.debug("Failed to load %s" % (fullpath))
        if len(population) < options.setup:
            # Seed with children from first
            children = population[0].generate_children(
                options.setup - len(population), 20, info, lookup, mags,
                ref_trajectories)
            # now save the children into the new file
            for child in children:
                population.append(child)
    else:
        logging.debug("make the initial population")
        for i in range(options.setup):
            # create a fresh maglist
            maglist = magnets.MagLists(mags)
            maglist.shuffle_all()
            genome = ID_BCell()
            genome.create(info, lookup, mags, maglist, ref_trajectories)
            population.append(genome)

    logging.debug("Initial population created")

    # gather the population
    trans = []
    for i in range(size):
        trans.append(population)

    allpop = alltoall(options.singlethreaded, trans)

    barrier(options.singlethreaded)

    newpop = []
    for pop in allpop:
        newpop += pop

    # Need to deal with replicas and old genomes
    popdict = {}
    for genome in newpop:
        fitness_key = "%1.8E" % (genome.fitness)
        if fitness_key in popdict.keys():
            if popdict[fitness_key].age < genome.age:
                popdict[fitness_key] = genome
        else:
            popdict[fitness_key] = genome

    newpop = []
    for genome in popdict.values():
        if genome.age < options.max_age:
            newpop.append(genome)

    newpop.sort(key=lambda x: x.fitness)

    newpop = newpop[options.setup * rank:options.setup * (rank + 1)]

    for genome in newpop:
        logging.debug("genome fitness: %1.8E   Age : %2i   Mutations : %4i" %
                      (genome.fitness, genome.age, genome.mutations))

    #Checkpoint best solution
    if rank == 0:
        newpop[0].save(args[0])

    # now run the processing
    for i in range(options.iterations):

        barrier(options.singlethreaded)
        logging.debug("Starting itteration %i" % (i))

        nextpop = []

        for genome in newpop:

            # now we have to create the offspring
            # TODO this is for the moment
            logging.debug("Generating children for %s" % (genome.uid))
            number_of_children = options.setup
            number_of_mutations = mutations(options.c, estar, genome.fitness,
                                            options.scale)
            children = genome.generate_children(number_of_children,
                                                number_of_mutations, info,
                                                lookup, mags, ref_trajectories)

            # now save the children into the new file
            for child in children:
                nextpop.append(child)

            # and save the original
            nextpop.append(genome)

        # gather the population
        trans = []
        for i in range(size):
            trans.append(nextpop)

        allpop = alltoall(options.singlethreaded, trans)

        newpop = []
        for pop in allpop:
            newpop += pop

        popdict = {}
        for genome in newpop:
            fitness_key = "%1.8E" % (genome.fitness)
            if fitness_key in popdict.keys():
                if popdict[fitness_key].age < genome.age:
                    popdict[fitness_key] = genome
            else:
                popdict[fitness_key] = genome

        newpop = []
        for genome in popdict.values():
            if genome.age < options.max_age:
                newpop.append(genome)

        newpop.sort(key=lambda x: x.fitness)

        estar = newpop[0].fitness * 0.99
        logging.debug("new estar is %f" % (estar))

        newpop = newpop[options.setup * rank:options.setup * (rank + 1)]

        #Checkpoint best solution
        if rank == 0:
            newpop[0].save(args[0])

        for genome in newpop:
            logging.debug(
                "genome fitness: %1.8E   Age : %2i   Mutations : %4i" %
                (genome.fitness, genome.age, genome.mutations))

        barrier(options.singlethreaded)

    barrier(options.singlethreaded)

    # gather the population
    trans = []
    for i in range(size):
        trans.append(nextpop)

    allpop = alltoall(options.singlethreaded, trans)

    newpop = []
    for pop in allpop:
        newpop += pop

    newpop.sort(key=lambda x: x.fitness)

    newpop = newpop[options.setup * rank:options.setup * (rank + 1)]

    #Checkpoint best solution
    if rank == 0:
        newpop[0].save(args[0])
コード例 #8
0
def process(options, args):
    if options.create_genome:
        for filename in args[0::]:
            print("Turning file %s from Human Readable to Genome" % (filename))

            f2 = open(filename, 'r')
            buildlist = np.genfromtxt(filename, dtype=str)
            f2.close()

            mags = magnets.Magnets()
            mags.load(options.magnets_filename)

            maglist = magnets.MagLists(mags)

            heswap = 0
            veswap = 0
            hhswap = 0
            vvswap = 0

            for line in range(buildlist.shape[0]):

                if int(buildlist[line, 2]) == 4:
                    #maglist.magnet_lists['HE'][heswap][0]=buildlist[line,5]
                    maglist.swap(
                        'HE', maglist.magnet_lists['HE'].index(
                            [buildlist[line, 5], 1, 0]), heswap)
                    maglist.magnet_lists['HE'][heswap][1] = int(buildlist[line,
                                                                          4])

                    heswap += 1

                elif int(buildlist[line, 2]) == 3:
                    #maglist.magnet_lists['VE'][veswap][0]=buildlist[line,5]
                    maglist.swap(
                        'VE', maglist.magnet_lists['VE'].index(
                            [buildlist[line, 5], 1, 0]), veswap)
                    maglist.magnet_lists['VE'][veswap][1] = int(buildlist[line,
                                                                          4])
                    veswap += 1

                elif int(buildlist[line, 2]) == 2:
                    #maglist.magnet_lists['HH'][hhswap][0]=buildlist[line,5]
                    maglist.swap(
                        'HH', maglist.magnet_lists['HH'].index(
                            [buildlist[line, 5], 1, 0]), hhswap)
                    maglist.magnet_lists['HH'][hhswap][1] = int(buildlist[line,
                                                                          4])

                    hhswap += 1

                elif int(buildlist[line, 2]) == 1:
                    #maglist.magnet_lists['VV'][vvswap][0]=buildlist[line,5]
                    maglist.swap(
                        'VV', maglist.magnet_lists['VV'].index(
                            [buildlist[line, 5], 1, 0]), vvswap)
                    maglist.magnet_lists['VV'][vvswap][1] = int(buildlist[line,
                                                                          4])

                    vvswap += 1

            outfile = (os.path.split(filename)[1] + '.h5')
            #fg.output_fields(outfile, options.id_filename, options.id_template, options.magnets_filename, maglist)
            fp = open(os.path.split(filename)[1] + '.genome', 'w')
            pickle.dump(maglist, fp)
            fp.close()

    if options.readable:

        for filename in args[0::]:
            print("Making file %s human readable." % (filename))

            human_output(options.id_filename, filename)

    if options.analysis:
        for filename in args[0::]:
            print("Processing file %s" % (filename))
            # load the genome
            maglists = pickle.load(open(filename, "rb"))

            outfile = (os.path.split(filename)[1] + '.h5')
            fg.output_fields(outfile, options.id_filename, options.id_template,
                             options.magnets_filename, maglists)