コード例 #1
0
ファイル: lusolver.py プロジェクト: ellipsis14/dolfin-adjoint
    def solve(self, var, b):

      if reuse_factorization is False:
        return adjlinalg.Matrix.solve(self, var, b)

      if var.type in ['ADJ_TLM', 'ADJ_ADJOINT']:
        bcs = [dolfin.homogenize(bc) for bc in self.bcs if isinstance(bc, dolfin.DirichletBC)] + [bc for bc in self.bcs if not isinstance(bc, dolfin.DirichletBC)]
      else:
        bcs = self.bcs

      if var.type in ['ADJ_FORWARD', 'ADJ_TLM']:
        solver = lu_solvers[idx]
        if solver is None:
          A = assembly.assemble(self.data); [bc.apply(A) for bc in bcs]
          lu_solvers[idx] = LUSolver(A)
          lu_solvers[idx].parameters["reuse_factorization"] = True
        solver = lu_solvers[idx]

      else:
        if adj_lu_solvers[idx] is None:
          A = assembly.assemble(self.data); [bc.apply(A) for bc in bcs]
          adj_lu_solvers[idx] = LUSolver(A)
          adj_lu_solvers[idx].parameters["reuse_factorization"] = True

        solver = adj_lu_solvers[idx]

      x = adjlinalg.Vector(dolfin.Function(self.test_function().function_space()))

      if b.data is None:
        # This means we didn't get any contribution on the RHS of the adjoint system. This could be that the
        # simulation ran further ahead than when the functional was evaluated, or it could be that the
        # functional is set up incorrectly.
        dolfin.info_red("Warning: got zero RHS for the solve associated with variable %s" % var)
      else:
        if isinstance(b.data, dolfin.Function):
          b_vec = b.data.vector().copy()
        else:
          b_vec = dolfin.assemble(b.data)

        [bc.apply(b_vec) for bc in bcs]
        solver.solve(x.data.vector(), b_vec, annotate=False)

      return x
コード例 #2
0
ファイル: ybd.py プロジェクト: devcurmudgeon/cida
import sandboxlib

import os
import sys

import app
from assembly import assemble, deploy
from definitions import Definitions
import cache
import sandbox


print('')
app.setup(sys.argv)
with app.timer('TOTAL', '%s starts, version %s' % (app.settings['program'],
               app.settings['program-version'])):
    target = os.path.join(app.settings['defdir'], app.settings['target'])
    app.log('TARGET', 'Target is %s' % target, app.settings['arch'])
    with app.timer('DEFINITIONS', 'Parsing %s' % app.settings['def-version']):
        defs = Definitions()
    with app.timer('CACHE-KEYS', 'Calculating'):
        cache.get_cache(defs, app.settings['target'])
    defs.save_trees()

    sandbox.executor = sandboxlib.executor_for_platform()
    app.log(app.settings['target'], 'Sandbox using %s' % sandbox.executor)

    assemble(defs, app.settings['target'])
    deploy(defs, app.settings['target'])
コード例 #3
0
    def dealWithConstructionPhase(self, hom_fam_list, output_directory, log):
        self.ancestor_hom_fams = assembly.assemble(
            hom_fam_list,
            self.adj.realizable_adjacencies,
            self.RSI.realizable_RSIs,
            self.ancestor_name,
        )
        self.writeAncestorHomFams(output_directory + "/ancestor_hom_fams", )
        # To order the hom_fams in chromosomes, create a Genome object with
        # the new hom_fams.
        self.ancestor_genomes = genomes.get_genomes(self.ancestor_hom_fams,
                                                    [self.ancestor_name])
        ancestor_genome = next(self.ancestor_genomes.itervalues())

        # Create adjacencies list using an easier format to do the checkings
        i = 0
        adj_str_list = []
        adj_doubled_list = []
        for key in self.adj.realizable_adjacencies.endpoints.keys():
            for adj_pair in self.adj.realizable_adjacencies.endpoints[key]:
                save_pair = []
                save_pair.append(int(adj_pair[0][:-2]))
                save_pair.append(int(adj_pair[1][:-2]))
                save_pair.sort()
                if save_pair not in adj_str_list:
                    adj_str_list.append(save_pair)

                save_pair_str = []
                save_pair_str.append(adj_pair[0])
                save_pair_str.append(adj_pair[1])
                adj_doubled_list.append(save_pair_str)
            i = i + 1

        # Get the len of the greater RSI
        RSI_no_doubling = []
        max_RSI = 0
        for index, markers in enumerate(self.RSI.realizable_RSIs.itervalues()):
            RSI_no_doubling = [
                self.remove_head_tail(s) for s in markers.marker_ids
            ]
            self.RSI_strings.append(" ".join(
                self.remove_duplicates(RSI_no_doubling)))
            if len(RSI_no_doubling) > max_RSI:
                max_RSI = len(self.remove_duplicates(RSI_no_doubling))

        # Edit the ancestral genome (check RC's and RSI's and if is circular or not)
        try:
            genome_output = open(output_directory + "/ancestor_genome", 'w')
            genome_output.write(">" + self.ancestor_name)

            # DealWith RC's
            for index, rc in enumerate(self.adj.getRepeatClusterList()):
                genome_output.write("\n#RC " + str(index + 1) + "\n" + rc)
            print(self.adj.getRepeatClusterListInt())
            genome_output.write("\n")

            # DealWith CAR's
            RC_adjacencies = []
            CAR_total_list = []
            CAR_string_aux = ""
            for chrom_id, chrom in ancestor_genome.chromosomes.iteritems(
            ):  # Chrom = CARs
                CAR_string = ""
                index = 0
                previous_position = []
                while index < len(chrom):
                    flag = False
                    for index_rc, rc in enumerate(
                            self.adj.getRepeatClusterListInt()):
                        if int(chrom[index].id) in rc:
                            flag = True
                            idx_rc = str(index_rc + 1)

                    old_len = len(CAR_string)
                    CAR_string = CAR_string + str(chrom[index].id) + " "
                    new_len = len(CAR_string)
                    rc_len = new_len - old_len
                    if flag:  # check if is RSI
                        i = 0
                        chrom_index = index + 1
                        CAR_string_aux = CAR_string
                        while i < max_RSI and chrom_index < len(chrom):
                            CAR_string_aux = CAR_string_aux + str(
                                chrom[chrom_index].id) + " "
                            chrom_index = chrom_index + 1
                            i = i + 1
                        found_rsi = False
                        for rsi_unit in self.RSI_strings:
                            position = [
                                m.start()
                                for m in re.finditer(rsi_unit, CAR_string_aux)
                            ]
                            if position and position not in previous_position:
                                previous_position.append([
                                    m.start() for m in re.finditer(
                                        rsi_unit, CAR_string_aux)
                                ])
                                found_rsi = True
                                rsi_found = rsi_unit
                        if not found_rsi:
                            CAR_lst_int = [
                                int(x) for x in CAR_string[:-1].split(" ")
                            ]
                            CAR_adj_pair = []
                            CAR_adj_pair.append(CAR_lst_int[len(CAR_lst_int) -
                                                            2])
                            CAR_adj_pair.append(CAR_lst_int[len(CAR_lst_int) -
                                                            1])
                            CAR_adj_pair.sort()
                            RC_adjacencies.append(CAR_adj_pair)
                            CAR_string = CAR_string[:-rc_len] + "RC" + idx_rc + " "
                            break
                        else:  # found RSI
                            cut = CAR_string[1::-1].find(" ")
                            CAR_string = CAR_string[:-rc_len -
                                                    cut] + rsi_found + " "
                            index = index + max_RSI
                    else:
                        last_marker_id = chrom[index].id
                    index = index + 1

                # Check if is circular
                marker_pair = []
                marker_pair.append(chrom[0].id + "_h")
                marker_pair.append(last_marker_id + "_t")
                marker_pair2 = []
                marker_pair2.append(chrom[0].id + "_t")
                marker_pair2.append(last_marker_id + "_h")
                if marker_pair in adj_doubled_list or marker_pair2 in adj_doubled_list:
                    CAR_string = "_C " + CAR_string + "C_"
                else:
                    CAR_string = "_Q " + CAR_string + "Q_"

                CAR_total_list.append(CAR_string)

        except IOError:
            loglog.write(
                "{}  ERROR (master.py) - could not write ancestor genome to "
                "file: {}\n".format(strtime(),
                                    output_directory + "/ancestor_genome"))
            sys.exit()

        log.write(
            "{}  Assembled the ancestral genome, found a total of {} CARs and {} RCs.\n"
            .format(strtime(), len(ancestor_genome.chromosomes),
                    len(self.adj.getRepeatClusterList())))
        log.write("{}  Done.\n".format(strtime()))

        # Write the ancestral genome (CARs)
        genome_output.write("\n")
        CAR_total_list.sort(key=len, reverse=True)
        for idx_car, CAR in enumerate(CAR_total_list):
            print_CAR = ("#CAR " + str(idx_car + 1) + "\n" + CAR + "\n")
            genome_output.write(print_CAR)

        # Check if the adjacencies in Ancestral genome adjacencies are in the Realizable adjacencies (report when not in)
        # Also check if the adjacencies in realizable adjacencies are in the ancestral genome (report when not in)
        self.checkAncestralAdjacencies(ancestor_genome, adj_str_list,
                                       RC_adjacencies)
コード例 #4
0
ファイル: __main__.py プロジェクト: padrigali/ybd
app.cleanup(app.config['tmp'])

with app.timer('TOTAL'):
    lockfile = open(os.path.join(app.config['tmp'], 'lock'), 'r')
    fcntl.flock(lockfile, fcntl.LOCK_SH | fcntl.LOCK_NB)

    target = os.path.join(app.config['defdir'], app.config['target'])
    app.log('TARGET', 'Target is %s' % target, app.config['arch'])
    with app.timer('DEFINITIONS', 'parsing %s' % app.config['def-version']):
        defs = Definitions()
    with app.timer('CACHE-KEYS', 'cache-key calculations'):
        cache.cache_key(defs, app.config['target'])
    defs.save_trees()

    sandbox.executor = sandboxlib.executor_for_platform()
    app.log(app.config['target'], 'Sandbox using %s' % sandbox.executor)
    if sandboxlib.chroot == sandbox.executor:
        app.log(app.config['target'], 'WARNING: rogue builds in a chroot ' +
                'sandbox may overwrite your system')

    if app.config.get('instances'):
        app.spawn()

    done = False
    while not done:
       try:
           done = assemble(defs, app.config['target'])
       except:
           pass
    deploy(defs, app.config['target'])
コード例 #5
0
ファイル: ybd.py プロジェクト: jamespthomas/ybd
import cache
import app
from assembly import assemble, deploy
import sandbox
import platform


print('')
if len(sys.argv) not in [2,3]:
    sys.stderr.write("Usage: %s DEFINITION_FILE [ARCH]\n\n" % sys.argv[0])
    sys.exit(1)

target = sys.argv[1]
if len(sys.argv) == 3:
    arch = sys.argv[2]
else:
    arch = platform.machine()

with app.setup(target, arch):
    with app.timer('TOTAL', 'ybd starts, version %s' %
                   app.settings['ybd-version']):
        app.log('TARGET', 'Target is %s' % os.path.join(app.settings['defdir'],
                                                      target), arch)
        with app.timer('DEFINITIONS', 'Parsing %s' % app.settings['def-ver']):
            defs = Definitions()
        with app.timer('CACHE-KEYS', 'Calculating'):
            cache.get_cache(app.settings['target'])
        defs.save_trees()
        assemble(app.settings['target'])
        deploy(app.settings['target'])
コード例 #6
0
#mesh = '../mesh/mesh_test.txt'
#connect = '../mesh/connect_test.txt'
#sides = '../mesh/sides_test.txt'

mesh = 'basic.txt'
connect = 'basic_con.txt'
sides = 'basic_sides.txt'

mesh, con_mat, top, left, bottom, right = read_in(mesh, connect, sides)
#print(bottom)
#running of code!

dim = 5

K, F = assemble(con_mat, mesh)
#i_vals,j_vals,v_vals,F = assemble_ijv(con_mat,mesh)
#BC_vals = np.zeros(dim) #set top EBC to zero
#BC_vals = np.zeros(8)
#for i in range(len(BC_vals)):
# BC_vals[i] +=5
#BC_vals = [1]
#boundary = bottom+top
#BC_vals = [10,10,10,0,0,0]
boundary = top + left + bottom + right
BC_vals = np.zeros(len(boundary))
print("The determinant of K is: ", np.linalg.det(K))
#print(F)
K_EBC, F_EBC, N_wo = Apply_EBC(K, F, mesh, boundary, BC_vals)
#print(F_EBC)
#Basic - very basic - solving
コード例 #7
0
ファイル: __main__.py プロジェクト: mwilliams-ct/ybd
    defs.save_trees()

    sandbox.executor = sandboxlib.executor_for_platform()
    app.log(app.config['target'], 'Sandbox using %s' % sandbox.executor)
    if sandboxlib.chroot == sandbox.executor:
        app.log(
            app.config['target'], 'WARNING: using chroot is less safe ' +
            'than using linux-user-chroot')

    if app.config.get('instances'):
        app.spawn()

    target = defs.get(app.config['target'])
    while True:
        try:
            assemble(defs, target)
            break
        except KeyboardInterrupt:
            app.log(target, 'Interrupted by user')
            os._exit(1)
        except RetryException:
            pass
        except:
            import traceback
            traceback.print_exc()
            app.log(target, 'Exiting: uncaught exception')
            os._exit(1)

    if target.get('kind') == 'cluster' and app.config.get('fork') is None:
        with app.timer(target, 'cluster deployment'):
            deploy(defs, target)
コード例 #8
0
ファイル: __main__.py プロジェクト: locallycompact/ybd
        cache.cache_key(defs, app.config['target'])
    defs.save_trees()

    sandbox.executor = sandboxlib.executor_for_platform()
    app.log(app.config['target'], 'Sandbox using %s' % sandbox.executor)
    if sandboxlib.chroot == sandbox.executor:
        app.log(app.config['target'], 'WARNING: using chroot is less safe ' +
                'than using linux-user-chroot')

    if app.config.get('instances'):
        app.spawn()

    target = defs.get(app.config['target'])
    while True:
        try:
            assemble(defs, target)
            break
        except KeyboardInterrupt:
            app.log(target, 'Interrupted by user')
            os._exit(1)
        except RetryException:
            pass
        except:
            import traceback
            traceback.print_exc()
            app.log(target, 'Exiting: uncaught exception')
            os._exit(1)

    if target.get('kind') == 'cluster' and app.config.get('fork') is None:
        with app.timer(target, 'cluster deployment'):
            deploy(defs, target)
コード例 #9
0
ファイル: main.py プロジェクト: boshijingang/CCompiler
tree, context = parser.parse(tokens)

if settings.DISPLAY_TREE:
    tree.display()

prog = generation.generate_program(tree, context)

funcs = [str(p) for p in prog.functions]

optimzied = optimize.optimize(prog)

if settings.DISPLAY_OPTIMIZATION:
    for p_func, o_func in zip(funcs, optimzied.functions):
        print("\n")
        utils.compare(p_func, str(o_func))

if settings.DISPLAY_INTERMEDIATE and not settings.DISPLAY_OPTIMIZATION:
    for func in optimzied.functions:
        print("")
        print(str(func))

result = assembly.assemble(optimzied)

if settings.DISPLAY_ASM:
    print(result)

f = open(output_file_name, 'w')
f.write(result)
f.close()
コード例 #10
0
# Make working directories if they do not yet exist
# or just return them if they do
working_directory, download_directory, contigs_directory, blast_directory, annotations_directory, alignments_directory, trees_directory = create_dirs(
)

### 1  Downloading & de novo assembly of data ###

# Get SRA data
get_ascensions(download_directory)

# Subset downloaded read files
subset(download_directory)

# Assemble subsetted read files
assemble(download_directory)

# Collect & filter the assembly results into a single folder
collect_assemblies(download_directory, contigs_directory)

### 2  Annotating and finding mtplasmids ###

# Make database to BLAST ORF's to
makeBLASTdatabase(annotations_directory, blast_directory)

# BLAST found contigs to find mtplasmids
blast_orfs(blast_directory, contigs_directory)

### 3  Aligning & Generating trees ###

# Align fasta files obtained during annotation
コード例 #11
0
ファイル: ybd.py プロジェクト: grahamfinney/ybd
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# =*= License: GPL-2 =*=
'''A module to build a definition.'''

import os
import sys
from definitions import Definitions
import cache
import app
from assembly import assemble
import sandbox

target = os.path.splitext(os.path.basename(sys.argv[1]))[0]
arch = sys.argv[2]
with app.setup(target, arch):
    with app.timer('TOTAL', 'YBD starts'):
        defs = Definitions()
        definition = defs.get(target)
        with app.timer('CACHE-KEYS', 'Calculating'):
            cache.get_cache(target)
        defs.save_trees()
        assemble(definition)