Exemplo n.º 1
0
 def test_multiprocess(self):
     # now let's try with several processes
     pool = multiprocessing.Pool(10)
     try:
         inputs = [1] * 3000
         pool.map(run_worker, inputs)
         self.assertEqual(_DATA["test"].value, 3000)
     finally:
         pool.close()
Exemplo n.º 2
0
def forecast(nTime, wsats0, perms, Q_prod=None, desc="En. forecast"):
    """Forecast for an ensemble."""
    # Compose ensemble
    if Q_prod is None:
        E = zip(wsats0, perms)
    else:
        E = zip(wsats0, perms, Q_prod)

    def forecast1(x):
        model_n = deepcopy(model)

        if Q_prod is None:
            wsat0, perm = x
            # Set ensemble
            set_perm(model_n, perm)
        else:
            wsat0, perm, q_prod = x
            # Set production rates
            prod = model_n.producers
            prod = well_grid  # model_n.producers uses abs scale
            prod[:, 2] = q_prod
            model_n.init_Q(
                inj = model_n.injectors,
                prod = prod,
            )
            # Set ensemble
            set_perm(model_n, perm)

        # Simulate
        s, p = simulate(model_n.step, nTime, wsat0, dt, obs, pbar=False)
        return s, p

    # Allocate
    production = np.zeros((N, nTime, nProd))
    saturation = np.zeros((N, nTime+1, model.M))

    # Dispatch
    if multiprocess:
        import multiprocessing_on_dill as mpd
        with mpd.Pool() as pool:
            E = list(progbar(pool.imap(forecast1, E), total=N, desc=desc))
        # Write
        for n, member in enumerate(E):
            saturation[n], production[n] = member

    else:
        for n, xn in enumerate(progbar(list(E), "Members")):
            s, p = forecast1(xn)
            # Write
            saturation[n], production[n] = s, p

    return saturation, production
Exemplo n.º 3
0
def map(func, xx, **kwargs):
    """A parallelized version of map.

    Similar to:

    >>> result = [func(x, **kwargs) for x in xx]

    Also deals with:

     - passing kwargs
     - join(), close()

    Note: in contrast to reading operations, writing "in-place"
    does not work with multiprocessing. This changes with
    "shared" arrays, but this has not been tried out here.
    Multithreading shares memory,
    but was significantly slower in the tested (pertinent) cases.

    NB: multiprocessing does not mix with matplotlib,
        so ensure ``func`` does not reference ``self.stats.LP_instance``,
        where ``self`` is a ``@da_method`` object.
        In fact, ``func`` should not reference ``self`` at all,
        because its serialization is rather slow.

    See example use in `dapper.mods.QG`
    """

    NPROC = None  # None => multiprocessing.cpu_count()
    pool = mpd.Pool(NPROC)

    try:
        f = functools.partial(func, **kwargs)  # Fix kwargs

        # map vs imap: https://stackoverflow.com/a/26521507
        result = pool.map(f, xx)

    except Exception:
        pool.terminate()
        pool.close()
        pool.join()
        raise

    return result
Exemplo n.º 4
0
def Pool(NPROC=None):
    """Initialize a multiprocessing `Pool`.

    - Uses `pathos/dill` for serialisation.
    - Provides unified interface for multiprocessing on/off (as a function of NPROC).

    There is some overhead associated with the pool creation,
    so you likely want to re-use a pool rather than repeatedly creating one.
    Consider using `functools.partial` to fix kwargs.

    .. note::
        In contrast to *reading*, in-place writing does not work with multiprocessing.
        This changes with "shared" arrays, but that has not been tested here.
        By contrast, multi*threading* shares the process memory,
        but was significantly slower in the tested (pertinent) cases.

    .. caution::
        `multiprocessing` does not mix with `matplotlib`, so ensure `func` does not
        reference `xp.stats.LP_instance`. In fact, `func` should not reference `xp`
        at all, because it takes time to serialize.

    See example use in `dapper.mods.QG` and `dapper.da_methods.LETKF`.
    """
    if NPROC == False:
        # Yield plain old map
        class NoPool:
            def __enter__(self):
                return builtins

            def __exit__(self, *args):
                pass

        import builtins
        return NoPool()

    else:
        # from psutil import cpu_percent, cpu_count
        if NPROC in [True, None]:
            NPROC = mpd.cpu_count() - 1  # be nice

        return mpd.Pool(NPROC)
Exemplo n.º 5
0
def multiproc_map(func,xx,**kwargs):
  """
  A parallelized version of:
  result = [func(x, **kwargs) for x in xx]

  Note: unlike reading, writing "in-place" does not work with multiprocessing
  (unless "shared" arrays are used, but this has not been tried out here).

  See example use in mods/QG/core.py.

  Technicalities dealt with:
   - passing kwargs
   - join(), close()

  However, the main achievement of this helper function is to make
  "Ctrl+C", i.e. KeyboardInterruption,
  stop the execution of the program, and do so "gracefully",
  something which is quite tricky to achieve with multiprocessing.
  """

  # The Ctrl-C issue is mainly cosmetic, but an annoying issue. E.g.
  #  - Pressing ctrl-c should terminate execution.
  #  - It should only be necessary to press Ctrl-C once.
  #  - The traceback does not extend beyond the multiprocessing management
  #    (not into the worker codes), and should therefore be cropped before then.
  #
  # NB: Here be f****n dragons!
  # This solution is mostly based on stackoverflow.com/a/35134329.
  # I don't (fully) understand why the issues arise,
  # nor why my patchwork solution somewhat works.
  #
  # I urge great caution in modifying this code because
  # issue reproduction is difficult (coz behaviour depends on
  # where the execution is currently at when Ctrl-C is pressed)
  # => testing is difficult.
  #
  # Alternative to try: 
  # - Use concurrent.futures, as the bug seems to have been patched there:
  #   https://bugs.python.org/issue9205. However, does this work with dill?
  # - Multithreading: has the advantage of sharing memory,
  #   but was significantly slower than using processes,
  #   testing on DAPPER-relevant.


  # Ignore Ctrl-C.
  # Alternative: Pool(initializer=[ignore sig]).
  # But the following way seems to work better.
  import signal
  orig = signal.signal(signal.SIGINT, signal.SIG_IGN)

  # Setup multiprocessing pool (pool workers should ignore Ctrl-C)
  NPROC = None # None => multiprocessing.cpu_count()
  pool = multiprocessing.Pool(NPROC)

  # Restore Ctrl-C action
  signal.signal(signal.SIGINT, orig)

  try:
    f = functools.partial(func,**kwargs) # Fix kwargs

    # map vs imap: stackoverflow.com/a/26521507
    result = pool.map(f,xx) 

    # Relating to Ctrl-C issue, map_async was preferred: stackoverflow.com/a/1408476
    # However, this does not appear to be necessary anymore...
    # result = pool.map_async(f, xx)
    # timeout = 60 # Required for get() to not ignore signals.
    # result = result.get(timeout)

  except KeyboardInterrupt as e:
    try:
      pool.terminate()
      # Attempts to propagate "Ctrl-C" with reasonable traceback print:
      # ------------------------------------------------------------------
      # ALTERNATIVE 1: ------- shit coz: only includes multiprocessing trace.
      # traceback.print_tb(e.__traceback__,limit=1)
      # sys.exit(0)
      # ALTERNATIVE 2: ------- shit coz: includes multiprocessing trace.
      # raise e
      # ALTERNATIVE 3: ------- shit coz: includes multiprocessing trace.
      # raise KeyboardInterrupt
      # ALTERNATIVE 4:
      was_interrupted = True
    except KeyboardInterrupt as e2:
      # Sometimes the KeyboardInterrupt caught above just causes things to hang,
      # and another "Ctrl-C" is required, which is then caught by this 2nd try-catch.
      pool.terminate()
      was_interrupted = True
  else:
    # Resume normal execution
    was_interrupted = False
    pool.close() # => Processes will terminate once their jobs are done.

  try:
    # Helps with debugging,
    # according to stackoverflow.com/a/38271957
    pool.join() 
  except KeyboardInterrupt as e:
    # Also need to handle Ctrl-C in join()...
    # This might necessitate pressing Ctrl-C again, but it's
    # better than getting spammed by traceback full of garbage.
    pool.terminate()
    was_interrupted = True

  # Start the KeyboardInterrupt trace here.
  if was_interrupted:
    raise KeyboardInterrupt

  return result
Exemplo n.º 6
0
    N = len(layers)
    idx = 0
    __end = '\r'
    time0 = time.time()
    for l in layers:
        time1 = time.time()
        B = P.get_boundaries_in_layer(l,area_thresh = params.area_thresh,
                                      scale_bounding_box = params.scale_bounding_box)
        overlap = P.get_overlapping_boundaries(B)        

        if params.nproc == 1:
            adj = P.batch_compute_adjacency(overlap,
                                            pixel_radius=params.pixel_radius)
        else:
            overlap_split = [overlap[i::params.nproc] for i in range(params.nproc)]
            pool = mp.Pool(processes = params.nproc)
            results = [pool.apply_async(submit_batch,
                                        args=(P,o,params.pixel_radius,))
                       for o in overlap_split]
            adj = [o for p in results for o in p.get()]

          
        xlayer = root.find("layer[@name='%s']" %l)
        for (b1,b2,_adj) in adj:
            xarea = etree.SubElement(xlayer,'area')
            cell1 = etree.SubElement(xarea,'cell1')
            cell1.text = b1.name
            cell2 = etree.SubElement(xarea,'cell2')
            cell2.text = b2.name
            idx1 = etree.SubElement(xarea,'index1')
            idx1.text = str(b1.index)
Exemplo n.º 7
0
def FullSweepMulti(Object, Order, alpha, inorout, mur, sig, Array, CPUs,
                   BigProblem):
    Object = Object[:-4] + ".vol"
    #Set up the Solver Parameters
    Solver, epsi, Maxsteps, Tolerance = SolverParameters()

    #Loading the object file
    ngmesh = ngmeshing.Mesh(dim=3)
    ngmesh.Load("VolFiles/" + Object)

    #Creating the mesh and defining the element types
    mesh = Mesh("VolFiles/" + Object)
    mesh.Curve(5)  #This can be used to refine the mesh
    numelements = mesh.ne  #Count the number elements
    print(" mesh contains " + str(numelements) + " elements")

    #Set up the coefficients
    #Scalars
    Mu0 = 4 * np.pi * 10**(-7)
    NumberofFrequencies = len(Array)
    #Coefficient functions
    mu_coef = [mur[mat] for mat in mesh.GetMaterials()]
    mu = CoefficientFunction(mu_coef)
    inout_coef = [inorout[mat] for mat in mesh.GetMaterials()]
    inout = CoefficientFunction(inout_coef)
    sigma_coef = [sig[mat] for mat in mesh.GetMaterials()]
    sigma = CoefficientFunction(sigma_coef)

    #Set up how the tensor and eigenvalues will be stored
    N0 = np.zeros([3, 3])
    TensorArray = np.zeros([NumberofFrequencies, 9], dtype=complex)
    RealEigenvalues = np.zeros([NumberofFrequencies, 3])
    ImaginaryEigenvalues = np.zeros([NumberofFrequencies, 3])
    EigenValues = np.zeros([NumberofFrequencies, 3], dtype=complex)

    #########################################################################
    #Theta0
    #This section solves the Theta0 problem to calculate both the inputs for
    #the Theta1 problem and calculate the N0 tensor

    #Setup the finite element space
    fes = HCurl(mesh, order=Order, dirichlet="outer", flags={"nograds": True})
    #Count the number of degrees of freedom
    ndof = fes.ndof

    #Define the vectors for the right hand side
    evec = [
        CoefficientFunction((1, 0, 0)),
        CoefficientFunction((0, 1, 0)),
        CoefficientFunction((0, 0, 1))
    ]

    #Setup the grid functions and array which will be used to save
    Theta0i = GridFunction(fes)
    Theta0j = GridFunction(fes)
    Theta0Sol = np.zeros([ndof, 3])

    #Setup the inputs for the functions to run
    Theta0CPUs = min(3, multiprocessing.cpu_count(), CPUs)
    Runlist = []
    for i in range(3):
        if Theta0CPUs < 3:
            NewInput = (fes, Order, alpha, mu, inout, evec[i], Tolerance,
                        Maxsteps, epsi, i + 1, Solver)
        else:
            NewInput = (fes, Order, alpha, mu, inout, evec[i], Tolerance,
                        Maxsteps, epsi, "No Print", Solver)
        Runlist.append(NewInput)
    #Run on the multiple cores
    with multiprocessing.Pool(Theta0CPUs) as pool:
        Output = pool.starmap(Theta0, Runlist)
    print(' solved theta0 problems    ')

    #Unpack the outputs
    for i, Direction in enumerate(Output):
        Theta0Sol[:, i] = Direction

#Calculate the N0 tensor
    VolConstant = Integrate(1 - mu**(-1), mesh)
    for i in range(3):
        Theta0i.vec.FV().NumPy()[:] = Theta0Sol[:, i]
        for j in range(3):
            Theta0j.vec.FV().NumPy()[:] = Theta0Sol[:, j]
            if i == j:
                N0[i, j] = (alpha**3) * (VolConstant + (1 / 4) * (Integrate(
                    mu**(-1) *
                    (InnerProduct(curl(Theta0i), curl(Theta0j))), mesh)))
            else:
                N0[i, j] = (alpha**3 / 4) * (Integrate(
                    mu**(-1) *
                    (InnerProduct(curl(Theta0i), curl(Theta0j))), mesh))

#########################################################################
#Theta1
#This section solves the Theta1 problem and saves the solution vectors

#Setup the finite element space
    dom_nrs_metal = [0 if mat == "air" else 1 for mat in mesh.GetMaterials()]
    fes2 = HCurl(mesh,
                 order=Order,
                 dirichlet="outer",
                 complex=True,
                 gradientdomains=dom_nrs_metal)
    #Count the number of degrees of freedom
    ndof2 = fes2.ndof

    #Define the vectors for the right hand side
    xivec = [
        CoefficientFunction((0, -z, y)),
        CoefficientFunction((z, 0, -x)),
        CoefficientFunction((-y, x, 0))
    ]

    #Work out where to send each frequency
    Theta1_CPUs = min(NumberofFrequencies, multiprocessing.cpu_count(), CPUs)
    Core_Distribution = []
    Count_Distribution = []
    for i in range(Theta1_CPUs):
        Core_Distribution.append([])
        Count_Distribution.append([])

    #Distribute between the cores
    CoreNumber = 0
    count = 1
    for i, Omega in enumerate(Array):
        Core_Distribution[CoreNumber].append(Omega)
        Count_Distribution[CoreNumber].append(i)
        if CoreNumber == CPUs - 1 and count == 1:
            count = -1
        elif CoreNumber == 0 and count == -1:
            count = 1
        else:
            CoreNumber += count

    #Create the inputs
    Runlist = []
    manager = multiprocessing.Manager()
    counter = manager.Value('i', 0)
    for i in range(Theta1_CPUs):
        Runlist.append(
            (Core_Distribution[i], mesh, fes, fes2, Theta0Sol, xivec, alpha,
             sigma, mu, inout, Tolerance, Maxsteps, epsi, Solver, N0,
             NumberofFrequencies, False, True, counter, False))

    #Run on the multiple cores
    with multiprocessing.Pool(Theta1_CPUs) as pool:
        Outputs = pool.starmap(Theta1_Sweep, Runlist)

    #Unpack the results
    for i, Output in enumerate(Outputs):
        for j, Num in enumerate(Count_Distribution[i]):
            TensorArray[Num, :] = Output[0][j]
            EigenValues[Num, :] = Output[1][j]

    print("Frequency Sweep complete")

    return TensorArray, EigenValues, N0, numelements
Exemplo n.º 8
0
def Main(
    Function=None,
    ListOfArgSets=None,
    Algorithm=None,
    Method=None,
    BatchSize=None,
    BatchCount=None,
    HideProgressBar=None,
    DefaultValue=None,
    CheckArguments=True,
    PrintExtra=True,
):

    StartTime = time.time()

    if BatchSize is None and BatchCount is None:
        BatchSize = 1
    #elif BatchSize is not None and BatchCount is None:
    #    pass
    #elif BatchSize is None and BatchCount is not None:
    #    pass

    #TODO: rename 'Algorithm' as 'Method'

    if HideProgressBar is None:
        HideProgressBar = False

    if DefaultValue is None:
        DefaultValue = None

    if Algorithm is None:
        Algorithm = 'loop'

    if (CheckArguments):
        ArgumentErrorMessage = ""

        if BatchSize is not None and BatchCount is not None:
            ArgumentErrorMessage += ' BatchSize is not None and BatchCount is not None...\n'

        if (len(ArgumentErrorMessage) > 0):
            if (PrintExtra):
                print("ArgumentErrorMessage:\n", ArgumentErrorMessage)
            raise Exception(ArgumentErrorMessage)

    #Determine if the ArgSets are named, or not.
    #   if not, we will assume the function is of a single variable
    ArgSetsNamed = isinstance(ListOfArgSets[0], dict)
    if PrintExtra: print('ArgSetsNamed', ArgSetsNamed)

    #Build an object to collect the results
    ResultList = []

    #Make a function which uses the invoker, the functino, and handles errors gracefully
    def WrappedFunction(ArgSet):
        WrappedResult = None
        try:
            if ArgSetsNamed:
                WrappedResult = Function(
                    **
                    ArgSet)  #Library_FunctionInvoker.Main( Function, ArgSet )
            else:
                WrappedResult = Function(ArgSet)
        except Exception as ExceptionObject:
            print('FAIL: ArgSet ', ArgSet)
            print(ExceptionObject)
            WrappedResult = DefaultValue
        return WrappedResult

    #Make a function which is designed to iterate over the wrappped function
    def BatchWrappedFunction(ArgSets):
        BatchResults = []
        for ArgSet in ArgSets:
            BatchResults.append(WrappedFunction(ArgSet))
        return BatchResults

    if (Algorithm == 'loop'):
        for ArgSet in tqdm.tqdm(ListOfArgSets, disable=HideProgressBar):
            ResultList.append(WrappedFunction(ArgSet))
    elif (Algorithm == 'pp'):
        pass

    elif (Algorithm in ['multiprocessing', 'multiprocessing_on_dill']):

        CPU_count = multiprocessing_on_dill.cpu_count()
        if PrintExtra:
            print('CPU_count', CPU_count)

        #with multiprocessing_on_dill.Pool() as Pool:
        #PoolObject = multiprocessing_on_dill.Pool(CPU_count - 1)
        #ResultList = PoolObject.map( WrappedFunction, ListOfArgSets  )

        if BatchCount is None and BatchSize == 1:
            with multiprocessing_on_dill.Pool(
                    CPU_count, initializer=numpy.random.seed) as PoolObject:
                ResultList = list(
                    tqdm.tqdm(PoolObject.imap(WrappedFunction, ListOfArgSets),
                              total=len(ListOfArgSets),
                              disable=HideProgressBar))
        else:
            ListOfListsOfArgSets = Library_IterableSplitIntoChunks.Main(
                Iterable=ListOfArgSets,
                ChunkCount=BatchCount,
                ChunkLength=BatchSize,
            )
            with multiprocessing_on_dill.Pool(
                    CPU_count, initializer=numpy.random.seed) as PoolObject:
                ListOfResultLists = list(
                    tqdm.tqdm(PoolObject.imap(BatchWrappedFunction,
                                              ListOfListsOfArgSets),
                              total=len(ListOfListsOfArgSets),
                              disable=HideProgressBar))

                ResultList = list(
                    itertools.chain.from_iterable(ListOfResultLists))

    elif (Algorithm == 'mpi4py'):
        ErrMsg = 'Not Implemented Yet...'
        raise Exception()

    elif (Algorithm == 'dougserver'):
        #dougserver.submitJob
        ErrMsg = 'Not Implemented Yet...'
        raise Exception()
    else:
        ErrMsg = 'Unrecognized `Algorithm`... Library_ParallelLoop  FAILED to execute'
        raise Exception(ErrMsg)

    EndTime = time.time()
    TimeTaken = EndTime - StartTime
    if not HideProgressBar:
        print('TimeTaken', TimeTaken)

    return ResultList
Exemplo n.º 9
0
def PODP(mesh, fes0, fes, fes2, Theta0SolVec, xivec, alpha, sigma, mu, inout,
         epsi, Theta1E1Sol, Theta1E2Sol, Theta1E3Sol, FrequencyArray,
         ConstructedFrequencyArray, PODtol, N0Errors, alphaLB, PODErrorBars):

    #Calculate the imaginary tensors in the full order space (boolean)
    ImagTensorFullOrderCalc = True
    #On how many cores do you want to produce the tensors
    CPUs = 4

    #Print an update on progress
    print(' performing SVD', end='\r')
    #Set up some useful constants
    NumberofFrequencies = len(FrequencyArray)
    NumberofConstructedFrequencies = len(ConstructedFrequencyArray)
    ndof = len(Theta1E1Sol)
    ndof2 = fes2.ndof
    ndof0 = fes0.ndof
    Mu0 = 4 * np.pi * 10**(-7)

    #Perform SVD on the solution vector matrices
    u1, s1, vh1 = np.linalg.svd(Theta1E1Sol, full_matrices=False)
    u2, s2, vh2 = np.linalg.svd(Theta1E2Sol, full_matrices=False)
    u3, s3, vh3 = np.linalg.svd(Theta1E3Sol, full_matrices=False)
    #Print an update on progress
    print(' SVD complete      ')

    #scale the value of the modes
    s1norm = s1 / s1[0]
    s2norm = s2 / s2[0]
    s3norm = s3 / s3[0]

    #Decide where to truncate
    cutoff = NumberofFrequencies
    for i in range(NumberofFrequencies):
        if s1norm[i] < PODtol:
            if s2norm[i] < PODtol:
                if s3norm[i] < PODtol:
                    cutoff = i
                    break

    print(cutoff)
    print(s1norm)

    #Truncate the SVD matrices
    u1Truncated = u1[:, :cutoff]
    s1Truncated = s1[:cutoff]
    vh1Truncated = vh1[:cutoff, :]

    u2Truncated = u2[:, :cutoff]
    s2Truncated = s2[:cutoff]
    vh2Truncated = vh2[:cutoff, :]

    u3Truncated = u3[:, :cutoff]
    s3Truncated = s3[:cutoff]
    vh3Truncated = vh3[:cutoff, :]

    #Turn s into a matrix
    s1mat = np.diag(s1)
    s1Truncatedmat = np.diag(s1Truncated)
    s2Truncatedmat = np.diag(s2Truncated)
    s3Truncatedmat = np.diag(s3Truncated)

    #Create where the final solution vectors will be saved
    W1 = np.zeros([ndof, NumberofConstructedFrequencies], dtype=complex)
    W2 = np.zeros([ndof, NumberofConstructedFrequencies], dtype=complex)
    W3 = np.zeros([ndof, NumberofConstructedFrequencies], dtype=complex)

    ########################################################################
    #Create the ROM

    #Set up the stiffness matrices and right hand side
    A1Constant = np.zeros([cutoff, cutoff], dtype=complex)
    A1Variable = np.zeros([cutoff, cutoff], dtype=complex)
    R1Variable = np.zeros([cutoff, 1], dtype=complex)

    A2Constant = np.zeros([cutoff, cutoff], dtype=complex)
    A2Variable = np.zeros([cutoff, cutoff], dtype=complex)
    R2Variable = np.zeros([cutoff, 1], dtype=complex)

    A3Constant = np.zeros([cutoff, cutoff], dtype=complex)
    A3Variable = np.zeros([cutoff, cutoff], dtype=complex)
    R3Variable = np.zeros([cutoff, 1], dtype=complex)

    #Print an update on progress
    print(' creating reduced order model', end='\r')
    with TaskManager():
        Mu0 = 4 * np.pi * 10**(-7)
        nu = Mu0 * (alpha**2)

        Theta0 = GridFunction(fes)

        u = fes0.TrialFunction()
        v = fes0.TestFunction()

        if PODErrorBars == True:
            m = BilinearForm(fes0)
            m += SymbolicBFI(InnerProduct(u, v))
            f = LinearForm(fes0)
            m.Assemble()
            f.Assemble()
            rowsm, colsm, valsm = m.mat.COO()
            M = sp.csc_matrix((valsm, (rowsm, colsm)))

        u = fes2.TrialFunction()
        v = fes2.TestFunction()

        a0 = BilinearForm(fes2)
        a0 += SymbolicBFI((mu**(-1)) * InnerProduct(curl(u), curl(v)))
        a0 += SymbolicBFI((1j) * (1 - inout) * epsi * InnerProduct(u, v))
        a1 = BilinearForm(fes2)
        a1 += SymbolicBFI((1j) * inout * nu * sigma * InnerProduct(u, v))

        Theta0.vec.FV().NumPy()[:] = Theta0SolVec[:, 0]
        r1 = LinearForm(fes2)
        r1 += SymbolicLFI(inout * (-1j) * nu * sigma * InnerProduct(Theta0, v))
        r1 += SymbolicLFI(inout * (-1j) * nu * sigma *
                          InnerProduct(xivec[0], v))
        r1.Assemble()

        Theta0.vec.FV().NumPy()[:] = Theta0SolVec[:, 1]
        r2 = LinearForm(fes2)
        r2 += SymbolicLFI(inout * (-1j) * nu * sigma * InnerProduct(Theta0, v))
        r2 += SymbolicLFI(inout * (-1j) * nu * sigma *
                          InnerProduct(xivec[1], v))
        r2.Assemble()

        Theta0.vec.FV().NumPy()[:] = Theta0SolVec[:, 2]
        r3 = LinearForm(fes2)
        r3 += SymbolicLFI(inout * (-1j) * nu * sigma * InnerProduct(Theta0, v))
        r3 += SymbolicLFI(inout * (-1j) * nu * sigma *
                          InnerProduct(xivec[2], v))
        r3.Assemble()

        a0.Assemble()
        a1.Assemble()

        rows0, cols0, vals0 = a0.mat.COO()
        rows1, cols1, vals1 = a1.mat.COO()

    A0 = sp.csr_matrix((vals0, (rows0, cols0)))
    A1 = sp.csr_matrix((vals1, (rows1, cols1)))

    R1 = sp.csr_matrix(r1.vec.FV().NumPy())
    R2 = sp.csr_matrix(r2.vec.FV().NumPy())
    R3 = sp.csr_matrix(r3.vec.FV().NumPy())

    H1 = sp.csr_matrix(u1Truncated)
    H2 = sp.csr_matrix(u2Truncated)
    H3 = sp.csr_matrix(u3Truncated)

    A0H1 = A0 @ H1
    A1H1 = A1 @ H1
    A0H2 = A0 @ H2
    A1H2 = A1 @ H2
    A0H3 = A0 @ H3
    A1H3 = A1 @ H3

    HA0H1 = (np.conjugate(np.transpose(H1)) @ A0H1).todense()
    HA1H1 = (np.conjugate(np.transpose(H1)) @ A1H1).todense()
    HR1 = (np.conjugate(np.transpose(H1)) @ np.transpose(R1)).todense()

    HA0H2 = (np.conjugate(np.transpose(H2)) @ A0H2).todense()
    HA1H2 = (np.conjugate(np.transpose(H2)) @ A1H2).todense()
    HR2 = (np.conjugate(np.transpose(H2)) @ np.transpose(R2)).todense()

    HA0H3 = (np.conjugate(np.transpose(H3)) @ A0H3).todense()
    HA1H3 = (np.conjugate(np.transpose(H3)) @ A1H3).todense()
    HR3 = (np.conjugate(np.transpose(H3)) @ np.transpose(R3)).todense()

    print(' created reduced order model    ')
    if PODErrorBars == True:
        print(' calculating error bars for reduced order model')

        Rerror1 = np.zeros([ndof2, cutoff * 2 + 1], dtype=complex)
        Rerror2 = np.zeros([ndof2, cutoff * 2 + 1], dtype=complex)
        Rerror3 = np.zeros([ndof2, cutoff * 2 + 1], dtype=complex)

        RerrorReduced1 = np.zeros([ndof0, cutoff * 2 + 1], dtype=complex)
        RerrorReduced2 = np.zeros([ndof0, cutoff * 2 + 1], dtype=complex)
        RerrorReduced3 = np.zeros([ndof0, cutoff * 2 + 1], dtype=complex)

        Rerror1[:, 0] = R1.todense()
        Rerror2[:, 0] = R2.todense()
        Rerror3[:, 0] = R3.todense()

        Rerror1[:, 1:cutoff + 1] = A0H1.todense()
        Rerror2[:, 1:cutoff + 1] = A0H2.todense()
        Rerror3[:, 1:cutoff + 1] = A0H3.todense()

        Rerror1[:, cutoff + 1:] = A1H1.todense()
        Rerror2[:, cutoff + 1:] = A1H2.todense()
        Rerror3[:, cutoff + 1:] = A1H3.todense()

        MR1 = np.zeros([ndof0, cutoff * 2 + 1], dtype=complex)
        MR2 = np.zeros([ndof0, cutoff * 2 + 1], dtype=complex)
        MR3 = np.zeros([ndof0, cutoff * 2 + 1], dtype=complex)

        with TaskManager():
            ProH = GridFunction(fes2)
            ProL = GridFunction(fes0)

            for i in range(2 * cutoff + 1):
                ProH.vec.FV().NumPy()[:] = Rerror1[:, i]
                ProL.Set(ProH)
                RerrorReduced1[:, i] = ProL.vec.FV().NumPy()[:]

                ProH.vec.FV().NumPy()[:] = Rerror2[:, i]
                ProL.Set(ProH)
                RerrorReduced2[:, i] = ProL.vec.FV().NumPy()[:]

                ProH.vec.FV().NumPy()[:] = Rerror3[:, i]
                ProL.Set(ProH)
                RerrorReduced3[:, i] = ProL.vec.FV().NumPy()[:]

        lu = spl.spilu(M, drop_tol=10**-4)

        for i in range(2 * cutoff + 1):
            if i == 0:
                MR1 = sp.csr_matrix(lu.solve(RerrorReduced1[:, i]))
                MR2 = sp.csr_matrix(lu.solve(RerrorReduced2[:, i]))
                MR3 = sp.csr_matrix(lu.solve(RerrorReduced3[:, i]))
            else:
                MR1 = sp.vstack(
                    (MR1, sp.csr_matrix(lu.solve(RerrorReduced1[:, i]))))
                MR2 = sp.vstack(
                    (MR2, sp.csr_matrix(lu.solve(RerrorReduced2[:, i]))))
                MR3 = sp.vstack(
                    (MR3, sp.csr_matrix(lu.solve(RerrorReduced3[:, i]))))

        lu = spl.spilu(M, drop_tol=10**-4)
        for i in range(2 * cutoff + 1):
            MR1[:, i] = lu.solve(RerrorReduced1[:, i])
            MR2[:, i] = lu.solve(RerrorReduced2[:, i])
            MR3[:, i] = lu.solve(RerrorReduced3[:, i])

        G1 = np.transpose(np.conjugate(RerrorReduced1)) @ np.transpose(MR1)
        G2 = np.transpose(np.conjugate(RerrorReduced2)) @ np.transpose(MR2)
        G3 = np.transpose(np.conjugate(RerrorReduced3)) @ np.transpose(MR3)
        G12 = np.transpose(np.conjugate(RerrorReduced1)) @ np.transpose(MR2)
        G13 = np.transpose(np.conjugate(RerrorReduced1)) @ np.transpose(MR3)
        G23 = np.transpose(np.conjugate(RerrorReduced2)) @ np.transpose(MR3)

        rom1 = np.zeros([1 + 2 * cutoff, 1], dtype=complex)
        rom2 = np.zeros([1 + 2 * cutoff, 1], dtype=complex)
        rom3 = np.zeros([1 + 2 * cutoff, 1], dtype=complex)

        TensorErrors = np.zeros([NumberofConstructedFrequencies, 3])
        ErrorTensors = np.zeros([NumberofConstructedFrequencies, 6])
        ErrorTensor = np.zeros([3, 3])

########################################################################
#project the calculations for tensors to the reduced basis

    with TaskManager():
        #Check whether these are needed
        u = fes2.TrialFunction()
        v = fes2.TestFunction()

        #Real Tensor
        k = BilinearForm(fes2)
        k += SymbolicBFI((mu**(-1)) * InnerProduct(curl(u), curl(v)))
        k.Assemble()
        rowsk, colsk, valsk = k.mat.COO()
        K = sp.csr_matrix((valsk, (rowsk, colsk)))

        #Imaginary Tensor
        #t4
        T4 = BilinearForm(fes2)
        T4 += SymbolicBFI(inout * sigma * InnerProduct(u, v))
        T4.Assemble()
        rowst, colst, valst = T4.mat.COO()
        T4 = sp.csr_matrix((valst, (rowst, colst)))

        Theta0i = GridFunction(fes)
        Theta0j = GridFunction(fes)

        #t1,2,3
        #11

        Theta0i.vec.FV().NumPy()[:] = Theta0SolVec[:, 0]
        Theta0j.vec.FV().NumPy()[:] = Theta0SolVec[:, 0]
        ta11 = Integrate(sigma * inout *
                         InnerProduct(Theta0j + xivec[0], Theta0i + xivec[0]),
                         mesh)  #t_1^11

        Tb1 = LinearForm(fes2)  #T_2^1j
        Tb1 += SymbolicLFI(sigma * inout * InnerProduct(v, Theta0i + xivec[0]))
        Tb1.Assemble()

        Tc1 = LinearForm(fes2)  #T_3^1i
        Tc1 += SymbolicLFI(sigma * inout *
                           InnerProduct(Conj(v), Theta0j + xivec[0]))
        Tc1.Assemble()

        #12

        Theta0j.vec.FV().NumPy()[:] = Theta0SolVec[:, 1]
        ta12 = Integrate(sigma * inout *
                         InnerProduct(Theta0j + xivec[1], Theta0i + xivec[0]),
                         mesh)  #t_1^12

        Tc2 = LinearForm(fes2)  #T_3^2i
        Tc2 += SymbolicLFI(sigma * inout *
                           InnerProduct(Conj(v), Theta0j + xivec[1]))
        Tc2.Assemble()

        #13

        Theta0j.vec.FV().NumPy()[:] = Theta0SolVec[:, 2]
        ta13 = Integrate(sigma * inout *
                         InnerProduct(Theta0j + xivec[2], Theta0i + xivec[0]),
                         mesh)  #t_1^13

        Tc3 = LinearForm(fes2)  #T_3^3i
        Tc3 += SymbolicLFI(sigma * inout *
                           InnerProduct(Conj(v), Theta0j + xivec[2]))
        Tc3.Assemble()

        #22

        Theta0i.vec.FV().NumPy()[:] = Theta0SolVec[:, 1]
        Theta0j.vec.FV().NumPy()[:] = Theta0SolVec[:, 1]
        ta22 = Integrate(sigma * inout *
                         InnerProduct(Theta0j + xivec[1], Theta0i + xivec[1]),
                         mesh)  #t_1^22

        Tb2 = LinearForm(fes2)  #T_2^2j
        Tb2 += SymbolicLFI(sigma * inout * InnerProduct(v, Theta0i + xivec[1]))
        Tb2.Assemble()

        #23
        Theta0j.vec.FV().NumPy()[:] = Theta0SolVec[:, 2]
        ta23 = Integrate(sigma * inout *
                         InnerProduct(Theta0j + xivec[2], Theta0i + xivec[1]),
                         mesh)  #t_1^23

        #33

        Theta0i.vec.FV().NumPy()[:] = Theta0SolVec[:, 2]
        Theta0j.vec.FV().NumPy()[:] = Theta0SolVec[:, 2]
        ta33 = Integrate(sigma * inout *
                         InnerProduct(Theta0j + xivec[2], Theta0i + xivec[2]),
                         mesh)  #t_1^33

        Tb3 = LinearForm(fes2)  #T_2^3j
        Tb3 += SymbolicLFI(sigma * inout * InnerProduct(v, Theta0i + xivec[2]))
        Tb3.Assemble()

    Tb1 = sp.csr_matrix(Tb1.vec.FV().NumPy())
    Tb2 = sp.csr_matrix(Tb2.vec.FV().NumPy())
    Tb3 = sp.csr_matrix(Tb3.vec.FV().NumPy())
    Tc1 = sp.csr_matrix(Tc1.vec.FV().NumPy())
    Tc2 = sp.csr_matrix(Tc2.vec.FV().NumPy())
    Tc3 = sp.csr_matrix(Tc3.vec.FV().NumPy())

    K11 = (np.conjugate(np.transpose(H1)) @ K @ H1).todense()
    K12 = (np.conjugate(np.transpose(H1)) @ K @ H2).todense()
    K13 = (np.conjugate(np.transpose(H1)) @ K @ H3).todense()
    K22 = (np.conjugate(np.transpose(H2)) @ K @ H2).todense()
    K23 = (np.conjugate(np.transpose(H2)) @ K @ H3).todense()
    K33 = (np.conjugate(np.transpose(H3)) @ K @ H3).todense()

    T21H1 = (Tb1 @ H1).todense()
    T21H2 = (Tb1 @ H2).todense()
    T21H3 = (Tb1 @ H3).todense()
    T22H2 = (Tb2 @ H2).todense()
    T22H3 = (Tb2 @ H3).todense()
    T23H3 = (Tb3 @ H3).todense()

    H1T31 = (np.conjugate(np.transpose(H1)) @ np.transpose(Tc1)).todense()
    H2T31 = (np.conjugate(np.transpose(H2)) @ np.transpose(Tc1)).todense()
    H3T31 = (np.conjugate(np.transpose(H3)) @ np.transpose(Tc1)).todense()
    H2T32 = (np.conjugate(np.transpose(H2)) @ np.transpose(Tc2)).todense()
    H3T32 = (np.conjugate(np.transpose(H3)) @ np.transpose(Tc2)).todense()
    H3T33 = (np.conjugate(np.transpose(H3)) @ np.transpose(Tc3)).todense()

    T411 = (np.conjugate(np.transpose(H1)) @ T4 @ H1).todense()
    T412 = (np.conjugate(np.transpose(H1)) @ T4 @ H2).todense()
    T413 = (np.conjugate(np.transpose(H1)) @ T4 @ H3).todense()
    T422 = (np.conjugate(np.transpose(H2)) @ T4 @ H2).todense()
    T423 = (np.conjugate(np.transpose(H2)) @ T4 @ H3).todense()
    T433 = (np.conjugate(np.transpose(H3)) @ T4 @ H3).todense()

    RealTensors = np.zeros([len(ConstructedFrequencyArray), 9])
    ImagTensors = np.zeros([len(ConstructedFrequencyArray), 9])

    I1 = np.zeros([3, 3])
    I2 = np.zeros([3, 3])
    I3 = np.zeros([3, 3])
    I4 = np.zeros([3, 3])

    Theta0i = GridFunction(fes)
    Theta0j = GridFunction(fes)
    Theta1i = GridFunction(fes2)
    Theta1j = GridFunction(fes2)

    ########################################################################
    #Produce the sweep on the lower dimensional space
    if ImagTensorFullOrderCalc == True:
        W1 = np.zeros([ndof2, len(ConstructedFrequencyArray)], dtype=complex)
        W2 = np.zeros([ndof2, len(ConstructedFrequencyArray)], dtype=complex)
        W3 = np.zeros([ndof2, len(ConstructedFrequencyArray)], dtype=complex)

    for i, omega in enumerate(ConstructedFrequencyArray):

        #This part is for obtaining the solutions in the lower dimensional space
        print(' solving reduced order system %d/%d    ' %
              (i, NumberofConstructedFrequencies),
              end='\r')

        g1 = np.linalg.solve(HA0H1 + HA1H1 * omega, HR1 * omega)
        g2 = np.linalg.solve(HA0H2 + HA1H2 * omega, HR2 * omega)
        g3 = np.linalg.solve(HA0H3 + HA1H3 * omega, HR3 * omega)

        #This part projects the problem to the higher dimensional space
        if ImagTensorFullOrderCalc == True:
            W1[:, i] = np.dot(u1Truncated, g1).flatten()
            W2[:, i] = np.dot(u2Truncated, g2).flatten()
            W3[:, i] = np.dot(u3Truncated, g3).flatten()

        #This part is for obtaining the tensor coefficients in the lower dimensional space

        #Real tensors
        RealTensors[i,
                    0] = -(np.transpose(np.conjugate(g1)) @ K11 @ g1)[0,
                                                                      0].real
        RealTensors[i,
                    1] = -(np.transpose(np.conjugate(g1)) @ K12 @ g2)[0,
                                                                      0].real
        RealTensors[i,
                    2] = -(np.transpose(np.conjugate(g1)) @ K13 @ g3)[0,
                                                                      0].real
        RealTensors[i, 3] = RealTensors[i, 1]
        RealTensors[i, 6] = RealTensors[i, 2]
        RealTensors[i,
                    4] = -(np.transpose(np.conjugate(g2)) @ K22 @ g2)[0,
                                                                      0].real
        RealTensors[i,
                    5] = -(np.transpose(np.conjugate(g2)) @ K23 @ g3)[0,
                                                                      0].real
        RealTensors[i, 7] = RealTensors[i, 5]
        RealTensors[i,
                    8] = -(np.transpose(np.conjugate(g3)) @ K33 @ g3)[0,
                                                                      0].real

        #Imaginary tensors
        I1f = I1.flatten()
        I2f = I2.flatten()
        I3f = I3.flatten()
        I4f = I4.flatten()

        ImagTensors[i, 0] = ta11
        ImagTensors[i, 1] = ta12
        ImagTensors[i, 2] = ta13
        ImagTensors[i, 4] = ta22
        ImagTensors[i, 5] = ta23
        ImagTensors[i, 8] = ta33

        ImagTensors[i, 3] = ImagTensors[i, 1]
        ImagTensors[i, 6] = ImagTensors[i, 2]
        ImagTensors[i, 7] = ImagTensors[i, 5]

        ImagTensors[i, 0] += (T21H1 @ g1)[0, 0].real
        ImagTensors[i, 1] += (T21H2 @ g2)[0, 0].real
        ImagTensors[i, 2] += (T21H3 @ g3)[0, 0].real
        ImagTensors[i, 4] += (T22H2 @ g2)[0, 0].real
        ImagTensors[i, 5] += (T22H3 @ g3)[0, 0].real
        ImagTensors[i, 8] += (T23H3 @ g3)[0, 0].real

        ImagTensors[i, 3] = ImagTensors[i, 1]
        ImagTensors[i, 6] = ImagTensors[i, 2]
        ImagTensors[i, 7] = ImagTensors[i, 5]

        ImagTensors[i, 0] += (np.conjugate(np.transpose(g1)) @ H1T31)[0,
                                                                      0].real
        ImagTensors[i, 1] += (np.conjugate(np.transpose(g2)) @ H2T31)[0,
                                                                      0].real
        ImagTensors[i, 2] += (np.conjugate(np.transpose(g3)) @ H3T31)[0,
                                                                      0].real
        ImagTensors[i, 4] += (np.conjugate(np.transpose(g2)) @ H2T32)[0,
                                                                      0].real
        ImagTensors[i, 5] += (np.conjugate(np.transpose(g3)) @ H3T32)[0,
                                                                      0].real
        ImagTensors[i, 8] += (np.conjugate(np.transpose(g3)) @ H3T33)[0,
                                                                      0].real

        ImagTensors[i, 3] = ImagTensors[i, 1]
        ImagTensors[i, 6] = ImagTensors[i, 2]
        ImagTensors[i, 7] = ImagTensors[i, 5]

        ImagTensors[i,
                    0] += (np.conjugate(np.transpose(g1)) @ T411 @ g1)[0,
                                                                       0].real
        ImagTensors[i,
                    1] += (np.conjugate(np.transpose(g1)) @ T412 @ g2)[0,
                                                                       0].real
        ImagTensors[i,
                    2] += (np.conjugate(np.transpose(g1)) @ T413 @ g3)[0,
                                                                       0].real
        ImagTensors[i,
                    4] += (np.conjugate(np.transpose(g2)) @ T422 @ g2)[0,
                                                                       0].real
        ImagTensors[i,
                    5] += (np.conjugate(np.transpose(g2)) @ T423 @ g3)[0,
                                                                       0].real
        ImagTensors[i,
                    8] += (np.conjugate(np.transpose(g3)) @ T433 @ g3)[0,
                                                                       0].real

        ImagTensors[i, 3] = ImagTensors[i, 1]
        ImagTensors[i, 6] = ImagTensors[i, 2]
        ImagTensors[i, 7] = ImagTensors[i, 5]

        ImagTensors[i, :] = ImagTensors[i, :] * nu * omega

        if PODErrorBars == True:
            rom1[0, 0] = omega
            rom2[0, 0] = omega
            rom3[0, 0] = omega

            rom1[1:1 + cutoff, 0] = -g1.flatten()
            rom2[1:1 + cutoff, 0] = -g2.flatten()
            rom3[1:1 + cutoff, 0] = -g3.flatten()

            rom1[1 + cutoff:, 0] = -(g1 * omega).flatten()
            rom2[1 + cutoff:, 0] = -(g2 * omega).flatten()
            rom3[1 + cutoff:, 0] = -(g3 * omega).flatten()

            error1 = np.conjugate(np.transpose(rom1)) @ G1 @ rom1
            error2 = np.conjugate(np.transpose(rom2)) @ G2 @ rom2
            error3 = np.conjugate(np.transpose(rom3)) @ G3 @ rom3
            error12 = np.conjugate(np.transpose(rom1)) @ G12 @ rom2
            error13 = np.conjugate(np.transpose(rom1)) @ G13 @ rom3
            error23 = np.conjugate(np.transpose(rom2)) @ G23 @ rom3

            error1 = abs(error1)**(1 / 2)
            error2 = abs(error2)**(1 / 2)
            error3 = abs(error3)**(1 / 2)
            error12 = error12.real
            error13 = error13.real
            error23 = error23.real

            Errors = [error1, error2, error3, error12, error13, error23]

            for j in range(6):
                if j < 3:
                    ErrorTensors[i, j] = (
                        (alpha**3) / 4) * (Errors[j]**2) / alphaLB
                else:
                    ErrorTensors[i, j] = -2 * Errors[j]
                    if j == 3:
                        ErrorTensors[i, j] += (Errors[0]**2) + (Errors[1]**2)
                        ErrorTensors[i, j] = ((alpha**3) / (8 * alphaLB)) * (
                            (Errors[0]**2) +
                            (Errors[1]**2) + ErrorTensors[i, j])
                    if j == 4:
                        ErrorTensors[i, j] += (Errors[0]**2) + (Errors[2]**2)
                        ErrorTensors[i, j] = ((alpha**3) / (8 * alphaLB)) * (
                            (Errors[0]**2) +
                            (Errors[1]**2) + ErrorTensors[i, j])
                    if j == 5:
                        ErrorTensors[i, j] += (Errors[1]**2) + (Errors[2]**2)
                        ErrorTensors[i, j] = ((alpha**3) / (8 * alphaLB)) * (
                            (Errors[0]**2) +
                            (Errors[1]**2) + ErrorTensors[i, j])

    RealTensors = ((alpha**3) / 4) * RealTensors
    ImagTensors = ((alpha**3) / 4) * ImagTensors

    print(' reduced order systems solved        ')

    #Calculate the imaginary tensors in the full order space if required
    if ImagTensorFullOrderCalc == True:
        #Create the inputs for the calculation of the tensors
        Runlist = []
        manager = multiprocessing.Manager()
        counter = manager.Value('i', 0)
        for i, Omega in enumerate(ConstructedFrequencyArray):
            nu = Omega * Mu0 * (alpha**2)
            NewInput = (mesh, fes, fes2, W1[:, i], W2[:, i], W3[:, i],
                        Theta0SolVec, xivec, alpha, mu, sigma, inout, nu,
                        counter, NumberofConstructedFrequencies)
            Runlist.append(NewInput)

        #Run in parallel
        with multiprocessing.Pool(CPUs) as pool:
            Output = pool.starmap(MPTCalculator, Runlist)
        print(' calculated tensors             ')
        print(' frequency sweep complete')

        #Unpack the outputs
        for i, OutputNumber in enumerate(Output):
            ImagTensors[i, :] = (OutputNumber[1]).flatten()

    if PODErrorBars == True:
        return RealTensors, ImagTensors, ErrorTensors
    else:
        return RealTensors, ImagTensors
Exemplo n.º 10
0
def SingleFrequency(Object, Order, alpha, inorout, mur, sig, Omega, CPUs, VTK,
                    Refine):
    Object = Object[:-4] + ".vol"
    #Set up the Solver Parameters
    Solver, epsi, Maxsteps, Tolerance = SolverParameters()

    #Loading the object file
    ngmesh = ngmeshing.Mesh(dim=3)
    ngmesh.Load("VolFiles/" + Object)

    #Creating the mesh and defining the element types
    mesh = Mesh("VolFiles/" + Object)
    mesh.Curve(5)  #This can be used to refine the mesh
    numelements = mesh.ne  #Count the number elements
    print(" mesh contains " + str(numelements) + " elements")

    #Set up the coefficients
    #Scalars
    Mu0 = 4 * np.pi * 10**(-7)
    #Coefficient functions
    mu_coef = [mur[mat] for mat in mesh.GetMaterials()]
    mu = CoefficientFunction(mu_coef)
    inout_coef = [inorout[mat] for mat in mesh.GetMaterials()]
    inout = CoefficientFunction(inout_coef)
    sigma_coef = [sig[mat] for mat in mesh.GetMaterials()]
    sigma = CoefficientFunction(sigma_coef)

    #Set up how the tensors will be stored
    N0 = np.zeros([3, 3])
    R = np.zeros([3, 3])
    I = np.zeros([3, 3])

    #########################################################################
    #Theta0
    #This section solves the Theta0 problem to calculate both the inputs for
    #the Theta1 problem and calculate the N0 tensor

    #Setup the finite element space
    fes = HCurl(mesh, order=Order, dirichlet="outer", flags={"nograds": True})
    #Count the number of degrees of freedom
    ndof = fes.ndof

    #Define the vectors for the right hand side
    evec = [
        CoefficientFunction((1, 0, 0)),
        CoefficientFunction((0, 1, 0)),
        CoefficientFunction((0, 0, 1))
    ]

    #Setup the grid functions and array which will be used to save
    Theta0i = GridFunction(fes)
    Theta0j = GridFunction(fes)
    Theta0Sol = np.zeros([ndof, 3])

    #Setup the inputs for the functions to run
    Runlist = []
    for i in range(3):
        if CPUs < 3:
            NewInput = (fes, Order, alpha, mu, inout, evec[i], Tolerance,
                        Maxsteps, epsi, i + 1, Solver)
        else:
            NewInput = (fes, Order, alpha, mu, inout, evec[i], Tolerance,
                        Maxsteps, epsi, "No Count", Solver)
        Runlist.append(NewInput)
    #Run on the multiple cores
    with multiprocessing.Pool(CPUs) as pool:
        Output = pool.starmap(Theta0, Runlist)
    print(' solved theta0 problem      ')

    #Unpack the outputs
    for i, Direction in enumerate(Output):
        Theta0Sol[:, i] = Direction

#Calculate the N0 tensor
    VolConstant = Integrate(1 - mu**(-1), mesh)
    for i in range(3):
        Theta0i.vec.FV().NumPy()[:] = Theta0Sol[:, i]
        for j in range(3):
            Theta0j.vec.FV().NumPy()[:] = Theta0Sol[:, j]
            if i == j:
                N0[i, j] = (alpha**3) * (VolConstant + (1 / 4) * (Integrate(
                    mu**(-1) *
                    (InnerProduct(curl(Theta0i), curl(Theta0j))), mesh)))
            else:
                N0[i, j] = (alpha**3 / 4) * (Integrate(
                    mu**(-1) *
                    (InnerProduct(curl(Theta0i), curl(Theta0j))), mesh))

#########################################################################
#Theta1
#This section solves the Theta1 problem and saves the solution vectors

#Setup the finite element space
    dom_nrs_metal = [0 if mat == "air" else 1 for mat in mesh.GetMaterials()]
    fes2 = HCurl(mesh,
                 order=Order,
                 dirichlet="outer",
                 complex=True,
                 gradientdomains=dom_nrs_metal)
    #Count the number of degrees of freedom
    ndof2 = fes2.ndof

    #Define the vectors for the right hand side
    xivec = [
        CoefficientFunction((0, -z, y)),
        CoefficientFunction((z, 0, -x)),
        CoefficientFunction((-y, x, 0))
    ]

    #Setup the array which will be used to store the solution vectors
    Theta1Sol = np.zeros([ndof2, 3], dtype=complex)

    #Set up the inputs for the problem
    Runlist = []
    nu = Omega * Mu0 * (alpha**2)
    for i in range(3):
        if CPUs < 3:
            NewInput = (fes, fes2, Theta0Sol[:, i], xivec[i], Order, alpha, nu,
                        sigma, mu, inout, Tolerance, Maxsteps, epsi, Omega,
                        i + 1, 3, Solver)
        else:
            NewInput = (fes, fes2, Theta0Sol[:, i], xivec[i], Order, alpha, nu,
                        sigma, mu, inout, Tolerance, Maxsteps, epsi, Omega,
                        "No Print", 3, Solver)
        Runlist.append(NewInput)

    #Run on the multiple cores
    with multiprocessing.Pool(CPUs) as pool:
        Output = pool.starmap(Theta1, Runlist)
    print(' solved theta1 problem       ')

    #Unpack the outputs
    for i, OutputNumber in enumerate(Output):
        Theta1Sol[:, i] = OutputNumber

    #Create the VTK output if required
    if VTK == True:
        print(' creating vtk output', end='\r')
        ThetaE1 = GridFunction(fes2)
        ThetaE2 = GridFunction(fes2)
        ThetaE3 = GridFunction(fes2)
        ThetaE1.vec.FV().NumPy()[:] = Output[0]
        ThetaE2.vec.FV().NumPy()[:] = Output[1]
        ThetaE3.vec.FV().NumPy()[:] = Output[2]
        E1Mag = CoefficientFunction(
            sqrt(
                InnerProduct(ThetaE1.real, ThetaE1.real) +
                InnerProduct(ThetaE1.imag, ThetaE1.imag)))
        E2Mag = CoefficientFunction(
            sqrt(
                InnerProduct(ThetaE2.real, ThetaE2.real) +
                InnerProduct(ThetaE2.imag, ThetaE2.imag)))
        E3Mag = CoefficientFunction(
            sqrt(
                InnerProduct(ThetaE3.real, ThetaE3.real) +
                InnerProduct(ThetaE3.imag, ThetaE3.imag)))
        Sols = []
        Sols.append(dom_nrs_metal)
        Sols.append((ThetaE1 * 1j * Omega * sigma).real)
        Sols.append((ThetaE1 * 1j * Omega * sigma).imag)
        Sols.append((ThetaE2 * 1j * Omega * sigma).real)
        Sols.append((ThetaE2 * 1j * Omega * sigma).imag)
        Sols.append((ThetaE3 * 1j * Omega * sigma).real)
        Sols.append((ThetaE3 * 1j * Omega * sigma).imag)
        Sols.append(E1Mag * Omega * sigma)
        Sols.append(E2Mag * Omega * sigma)
        Sols.append(E3Mag * Omega * sigma)
        savename = "Results/vtk_output/" + Object[:-4] + "/om_" + FtoS(
            Omega) + "/"
        if Refine == True:
            vtk = VTKOutput(ma=mesh,
                            coefs=Sols,
                            names=[
                                "Object", "E1real", "E1imag", "E2real",
                                "E2imag", "E3real", "E3imag", "E1Mag", "E2Mag",
                                "E3Mag"
                            ],
                            filename=savename + Object[:-4],
                            subdivision=3)
        else:
            vtk = VTKOutput(ma=mesh,
                            coefs=Sols,
                            names=[
                                "Object", "E1real", "E1imag", "E2real",
                                "E2imag", "E3real", "E3imag", "E1Mag", "E2Mag",
                                "E3Mag"
                            ],
                            filename=savename + Object[:-4],
                            subdivision=0)
        vtk.Do()
        print(' vtk output created     ')

#########################################################################
#Calculate the tensor and eigenvalues

#Create the inputs for the calculation of the tensors
    print(' calculating the tensor  ', end='\r')
    Runlist = []
    nu = Omega * Mu0 * (alpha**2)
    R, I = MPTCalculator(mesh, fes, fes2, Theta1Sol[:, 0], Theta1Sol[:, 1],
                         Theta1Sol[:, 2], Theta0Sol, xivec, alpha, mu, sigma,
                         inout, nu, "No Print", 1)
    print(' calculated the tensor             ')

    #Unpack the outputs
    MPT = N0 + R + 1j * I
    RealEigenvalues = np.sort(np.linalg.eigvals(N0 + R))
    ImaginaryEigenvalues = np.sort(np.linalg.eigvals(I))
    EigenValues = RealEigenvalues + 1j * ImaginaryEigenvalues

    return MPT, EigenValues, N0, numelements
Exemplo n.º 11
0
def main() -> None:
    """Run comparisons with provided arguments."""
    parser = argparse.ArgumentParser(
        description='Compare ng pages to legacy pages')
    parser.add_argument('--idfile', default=False, )
    parser.add_argument('--reset', default=False, const=True,
                        action='store_const', dest='reset')
    parser.add_argument('--short', default=False, const=True,
                        action='store_const', dest='short')
    parser.add_argument('--config', default=False)
    args = parser.parse_args()

    print('Starting config')
    if args.config not in configs.keys():
        raise ValueError(
            f"No config named '{args.config}' choose one of [{' '.join(configs.keys())}]")
    else:
        print(f'Using config {args.config}')
        active_config = configs[args.config]
    print('done with config')

    visited: Set[str] = []
    if args.reset:
        print('Restarting analysis and deleting logs!')
        if os.path.exists(LOG_FILE_NAME):
            os.remove(LOG_FILE_NAME)
        if os.path.exists(VISITED_FILE_NAME):
            os.remove(VISITED_FILE_NAME)
    else:
        if os.path.exists(VISITED_FILE_NAME):
            print('Continuing analysis')
            with open(VISITED_FILE_NAME, 'r') as visited_fh:
                visited = {line.rstrip() for line in visited_fh.readlines()}

    ids = _id_generator_from_file(args.idfile, excluded=visited)

    if args.short:
        n = 0
        total = 10
        logging.info(f'Doing short list of {n}')

        def done() -> bool:
            nonlocal n
            if n >= total:
                return True
            n = n + 1
            return False
    else:
        def done() -> bool:
            return False

    f_then_c = partial(fetch_pages, active_config)

    with open(VISITED_FILE_NAME, 'a', buffering=1) as visited_fh:
        logging.debug(
            f'Opened {VISITED_FILE_NAME} to find already visited ids')
        with open(LOG_FILE_NAME, 'w', buffering=1)as report_fh:
            logging.debug(f'Opened {LOG_FILE_NAME} to write report to')
            with mp.Pool(4) as pool:
                completed_jobs \
                    = pool.imap_unordered(f_then_c, ids)

                def done_job(job):
                    (res_dict, bad_results) = job
                    logging.debug(f"completed {res_dict['id']}")
                    visited_fh.write(f"{res_dict['id']}\n")
                    write_comparison_org(
                        report_fh, (res_dict, list(bad_results)))
                    if done():
                        logging.info("done and existing")
                        exit(0)

                [done_job(job) for job in completed_jobs]
Exemplo n.º 12
0
cube = 'hello'
coord_names = 'boo'
agg_method = 'mean'
nlat = 70
new_lat_bounds = numpy.arange(nlat)


def lat_aggregate(cube, coord_names, lat_bounds, agg_method):
    """Dummy."""

    return lat_bounds


# Start my pool
print('CPUs:', multiprocessing.cpu_count())
pool = multiprocessing.Pool(multiprocessing.cpu_count())

# Build task list
tasks = []
for lat_index in range(0, nlat):
    tasks.append((cube, coord_names, new_lat_bounds[lat_index], agg_method))

# Run tasks
results = [pool.apply_async(lat_aggregate, t) for t in tasks]

# Process results
for lat_index, result in enumerate(results):
    lat_agg = result.get()
    print(lat_agg)
    #new_data[..., lat_index] = lat_agg.data
Exemplo n.º 13
0
def aggregate_maximum_or_average(model,
                                 aggr,
                                 partial_data,
                                 split_series_dict,
                                 name2split,
                                 aggr_id='aggr_id'):
    """Compute maximum or average aggregation `aggr` for `model` on given data.

        Args:
            model: mb_modelbase.Model
            aggr: AggregationTuple
            partial_df: pd.DataFrame
            split_series_dict: dict<str,pd.Series>
            name2split: dict<str, dict>
                Map of a name of a split to the split with meta information. See `make_name2split()`.
            aggr_id: str, optional, defaults to 'aggr_id'.
                name of column of the aggregation result in the resulting data frame.

        Returns: pd.DataFrame
            The result of the aggregation as a pd.DataFrame with input and output included and correctly named columns
    """
    split_data_list = (df for name, df in split_series_dict.items())
    cond_out_data = crossjoin(*split_data_list, partial_data)
    cond_out_names = cond_out_data.columns
    cond_out_ops = condition_ops_and_names(cond_out_names, name2split,
                                           len(split_series_dict),
                                           len(partial_data.columns))

    # TODO: make the outer loop parallel
    # TODO: speed up results = np.empty(len(input_frame))
    results = []

    if len(cond_out_data) == 0:
        # there is no fields to split by, hence only a single value will be aggregated
        assert len(aggr[NAME_IDX]) == len(model.fields)
        res = model.aggregate(aggr[METHOD_IDX], opts=aggr[ARGS_IDX + 1])
        i = model.asindex(aggr[YIELDS_IDX])  # reduce to requested field
        results.append(res[i])

    else:
        row_id_gen = utils.linear_id_generator(prefix="_row")
        rowmodel_name = model.name + next(row_id_gen)

        def pred_max_func(row,
                          cond_out_names=cond_out_names,
                          operator_list=cond_out_ops,
                          rowmodel_name=rowmodel_name,
                          model=model):
            pairs = zip(cond_out_names, operator_list, row)
            rowmodel = model\
                .copy(name=rowmodel_name)\
                .condition(pairs)\
                .marginalize(keep=aggr[NAME_IDX])
            res = rowmodel.aggregate(aggr[METHOD_IDX], opts=aggr[ARGS_IDX + 1])
            i = rowmodel.asindex(aggr[YIELDS_IDX])
            return res[i]

        _input_tuples = cond_out_data.itertuples(index=False, name=None)

        if model.parallel_processing:
            with mp_dill.Pool() as p:
                results = p.map(pred_max_func, _input_tuples)
        else:
            results = [pred_max_func(row) for row in _input_tuples]

    return cond_out_data.assign(**{aggr_id: results})
Exemplo n.º 14
0
    def run(self, n_jobs_links=1, n_jobs_folds=1, n_jobs_grid=1, verbose=2, output_path=None,
            cv_pre=None, topickle=False, recompute=False, skip_runerror=True, skip_ioerror=False,
            zip_code_dirs=None, detailed_save=False):
        """ Start the analyses chain
        Parameters
        ----------
        n_jobs_links:  int
            number of parallell jobs for links
        n_jobs_folds: int
            number of parallell jobs for folds
        verbose: int
            verbosity level
        output_path : str, None
            directoy in which results are saved as pickle files OR
            path to existent database OR
            None, in which case results are stored in memory
        recompute : bool
            recompute links that are already in the provided database (if one is provided via
            output_path)
        skip_runerror : bool
            skip any runtime errors
        skip_ioerror:
            skip I/O errors related to saving the results in a database
        zip_code_dirs: List<str>, None
            python files encontained in a list of directory paths are saved as a zip file
        detailed_save : bool
            whether harddisk-intensive analyses results are saved
            [TODO: which ones?]
        """

        timestamp = time.strftime("%Y%m%d-%H%M%S")

        if n_jobs_links > 1 and n_jobs_folds > 1:
            raise Exception('You cannot set both n_jobs_chain and n_jobs_folds > 1.')


        is_searchlight = self.link_list[0].scheme.searchlight
        has_time = self.link_list[0].scheme.has_time

        lockfile_timestamp = timestamp
        if not is_searchlight and not has_time and cv_pre is None and not topickle and output_path is not None:
            if os.path.splitext(output_path)[1] == '.db':
                output_dir = os.path.dirname(output_path)
                if os.path.exists(output_path):
                    db_string = 'sqlite:///%s' % output_path
                    db = connect(db_string)
                    basename = os.path.splitext(os.path.basename(output_path))[0]
                    if len(basename) == 20 and basename.startswith('data_'):
                        lockfile_timestamp = basename[5:]
                    else:
                        print('Cannot infer lockfile name of previous chain(s). Using '
                              'timestamp as name instead.')
                    if not recompute:
                        in_db = []
                        exclude_keys = ['in_id', 'clf_id']
                        for i, link in enumerate(self.link_list):
                            if all([k in db[db.tables[0]].columns for k in link.description.keys()]):
                                where = ' '.join(["AND %s IS '%s'" % (k, v)
                                                  for k, v in link.description.items()
                                                  if k not in exclude_keys])[4:]
                                where = where.replace("'True'", "1").replace("'False'", "0").\
                                    replace("'None'", "NULL")
                                if list(db.query('SELECT id FROM chain WHERE %s' % where)):
                                    # and not list(db.query('SELECT correct FROM chain WHERE %s' % where))[0]['correct'] is None:
                                    in_db.append(i)
                        for i in sorted(in_db, reverse=True):
                            print('Deleting %s' % self.link_list[i].description_str)
                            del self.link_list[i]  # if entry in database, remove from linkdef_list
                        print('Deleted %g links' % len(in_db))
                else:
                    # db_string = 'sqlite:///%s_%s.db' % (os.path.splitext(output_dir)[0], timestamp)
                    db_string = 'sqlite:///%s' % output_path
            else:
                output_dir = output_path
                db_string = 'sqlite:///%s' % os.path.join(output_path, 'data_%s.db' % timestamp)
        else:
            db_string = ''
            if output_path is not None and not topickle:
                output_dir = os.path.dirname(output_path)
            else:
                output_dir = None


        if output_dir is not None and zip_code_dirs is not None:
            zip_path = os.path.join(output_dir, 'archive_%s.zip' % timestamp)
            zip_directory_structure(zip_code_dirs, zip_path, allowed_pattern='*.py')
        if output_dir is not None and not has_time and not is_searchlight and cv_pre is None:
            lockfile = os.path.join(output_dir, 'lock_%s' % lockfile_timestamp)
            if verbose > -1:
                print('Lockfile: %s' % lockfile)
        else:
            lockfile = None
        if db_string and verbose > -1:
            print('Database: %s' % db_string)

        if n_jobs_links == 1:
            chain = []
            for link_id, link in enumerate(self.link_list):
                params = (n_jobs_folds, n_jobs_grid, verbose, link_id, len(self.link_list),
                          link, skip_runerror, skip_ioerror, db_string, lockfile, output_path,
                          cv_pre, topickle, timestamp, detailed_save)
                link = _link(params)
                chain.append(link)
        else:
            pool = multiprocessing.Pool(None if n_jobs_links == -1 else n_jobs_links)
            params = [(n_jobs_folds, n_jobs_grid, verbose, link_id, len(self.link_list),
                       link, skip_runerror, skip_ioerror, db_string, lockfile, output_path,
                       cv_pre, topickle, timestamp, detailed_save)
                      for link_id, link in enumerate(self.link_list)]
            chain = pool.map(_link, params)
            pool.close()

        for i, link in enumerate(chain):
            if not link.info['success']:
                print('Failed link: %s' % self.link_list[i].db_key)
                for message in link.info['messages']:
                    print(message)

        if verbose > -2:
            print('Finished chain!')

        return chain[0] if len(chain) == 1 else chain
Exemplo n.º 15
0
def PODSweepMulti(Object, Order, alpha, inorout, mur, sig, Array, PODArray,
                  PODTol, PlotPod, CPUs, sweepname, SavePOD, PODErrorBars,
                  BigProblem):
    Object = Object[:-4] + ".vol"
    #Set up the Solver Parameters
    Solver, epsi, Maxsteps, Tolerance = SolverParameters()

    #Loading the object file
    ngmesh = ngmeshing.Mesh(dim=3)
    ngmesh.Load("VolFiles/" + Object)

    #Creating the mesh and defining the element types
    mesh = Mesh("VolFiles/" + Object)
    mesh.Curve(5)  #This can be used to refine the mesh
    numelements = mesh.ne  #Count the number elements
    print(" mesh contains " + str(numelements) + " elements")

    #Set up the coefficients
    #Scalars
    Mu0 = 4 * np.pi * 10**(-7)
    NumberofSnapshots = len(PODArray)
    NumberofFrequencies = len(Array)
    #Coefficient functions
    mu_coef = [mur[mat] for mat in mesh.GetMaterials()]
    mu = CoefficientFunction(mu_coef)
    inout_coef = [inorout[mat] for mat in mesh.GetMaterials()]
    inout = CoefficientFunction(inout_coef)
    sigma_coef = [sig[mat] for mat in mesh.GetMaterials()]
    sigma = CoefficientFunction(sigma_coef)

    #Set up how the tensor and eigenvalues will be stored
    N0 = np.zeros([3, 3])
    TensorArray = np.zeros([NumberofFrequencies, 9], dtype=complex)
    RealEigenvalues = np.zeros([NumberofFrequencies, 3])
    ImaginaryEigenvalues = np.zeros([NumberofFrequencies, 3])
    EigenValues = np.zeros([NumberofFrequencies, 3], dtype=complex)

    #########################################################################
    #Theta0
    #This section solves the Theta0 problem to calculate both the inputs for
    #the Theta1 problem and calculate the N0 tensor

    #Setup the finite element space
    fes = HCurl(mesh, order=Order, dirichlet="outer", flags={"nograds": True})
    #Count the number of degrees of freedom
    ndof = fes.ndof

    #Define the vectors for the right hand side
    evec = [
        CoefficientFunction((1, 0, 0)),
        CoefficientFunction((0, 1, 0)),
        CoefficientFunction((0, 0, 1))
    ]

    #Setup the grid functions and array which will be used to save
    Theta0i = GridFunction(fes)
    Theta0j = GridFunction(fes)
    Theta0Sol = np.zeros([ndof, 3])

    #Setup the inputs for the functions to run
    Theta0CPUs = min(3, multiprocessing.cpu_count(), CPUs)
    Runlist = []
    for i in range(3):
        if Theta0CPUs < 3:
            NewInput = (fes, Order, alpha, mu, inout, evec[i], Tolerance,
                        Maxsteps, epsi, i + 1, Solver)
        else:
            NewInput = (fes, Order, alpha, mu, inout, evec[i], Tolerance,
                        Maxsteps, epsi, "No Print", Solver)
        Runlist.append(NewInput)
    #Run on the multiple cores
    with multiprocessing.Pool(Theta0CPUs) as pool:
        Output = pool.starmap(Theta0, Runlist)
    print(' solved theta0 problems    ')

    #Unpack the outputs
    for i, Direction in enumerate(Output):
        Theta0Sol[:, i] = Direction

#Calculate the N0 tensor
    VolConstant = Integrate(1 - mu**(-1), mesh)
    for i in range(3):
        Theta0i.vec.FV().NumPy()[:] = Theta0Sol[:, i]
        for j in range(3):
            Theta0j.vec.FV().NumPy()[:] = Theta0Sol[:, j]
            if i == j:
                N0[i, j] = (alpha**3) * (VolConstant + (1 / 4) * (Integrate(
                    mu**(-1) *
                    (InnerProduct(curl(Theta0i), curl(Theta0j))), mesh)))
            else:
                N0[i, j] = (alpha**3 / 4) * (Integrate(
                    mu**(-1) *
                    (InnerProduct(curl(Theta0i), curl(Theta0j))), mesh))

#########################################################################
#Theta1
#This section solves the Theta1 problem and saves the solution vectors

#Setup the finite element space
    dom_nrs_metal = [0 if mat == "air" else 1 for mat in mesh.GetMaterials()]
    fes2 = HCurl(mesh,
                 order=Order,
                 dirichlet="outer",
                 complex=True,
                 gradientdomains=dom_nrs_metal)
    #Count the number of degrees of freedom
    ndof2 = fes2.ndof

    #Define the vectors for the right hand side
    xivec = [
        CoefficientFunction((0, -z, y)),
        CoefficientFunction((z, 0, -x)),
        CoefficientFunction((-y, x, 0))
    ]

    #Work out where to send each frequency
    Theta1_CPUs = min(NumberofSnapshots, multiprocessing.cpu_count(), CPUs)
    Core_Distribution = []
    Count_Distribution = []
    for i in range(Theta1_CPUs):
        Core_Distribution.append([])
        Count_Distribution.append([])

    #Distribute between the cores
    CoreNumber = 0
    count = 1
    for i, Omega in enumerate(PODArray):
        Core_Distribution[CoreNumber].append(Omega)
        Count_Distribution[CoreNumber].append(i)
        if CoreNumber == CPUs - 1 and count == 1:
            count = -1
        elif CoreNumber == 0 and count == -1:
            count = 1
        else:
            CoreNumber += count

    #Create the inputs
    Runlist = []
    manager = multiprocessing.Manager()
    counter = manager.Value('i', 0)
    for i in range(Theta1_CPUs):
        if PlotPod == True:
            Runlist.append(
                (Core_Distribution[i], mesh, fes, fes2, Theta0Sol, xivec,
                 alpha, sigma, mu, inout, Tolerance, Maxsteps, epsi, Solver,
                 N0, NumberofSnapshots, True, True, counter, BigProblem))
        else:
            Runlist.append(
                (Core_Distribution[i], mesh, fes, fes2, Theta0Sol, xivec,
                 alpha, sigma, mu, inout, Tolerance, Maxsteps, epsi, Solver,
                 N0, NumberofSnapshots, True, False, counter, BigProblem))

    #Run on the multiple cores
    with multiprocessing.Pool(Theta1_CPUs) as pool:
        Outputs = pool.starmap(Theta1_Sweep, Runlist)

    #Unpack the results
    if BigProblem == True:
        Theta1Sols = np.zeros([ndof2, NumberofSnapshots, 3],
                              dtype=np.complex64)
    else:
        Theta1Sols = np.zeros([ndof2, NumberofSnapshots, 3], dtype=complex)
    if PlotPod == True:
        PODTensors = np.zeros([NumberofSnapshots, 9], dtype=complex)
        PODEigenValues = np.zeros([NumberofSnapshots, 3], dtype=complex)
    for i, Output in enumerate(Outputs):
        for j, Num in enumerate(Count_Distribution[i]):
            if PlotPod == True:
                PODTensors[Num, :] = Output[0][j]
                PODEigenValues[Num, :] = Output[1][j]
                Theta1Sols[:, Num, :] = Output[2][:, j, :]
            else:
                Theta1Sols[:, Num, :] = Output[:, j, :]

#########################################################################
#POD

    print(' performing SVD              ', end='\r')
    #Perform SVD on the solution vector matrices
    u1Truncated, s1, vh1 = np.linalg.svd(Theta1Sols[:, :, 0],
                                         full_matrices=False)
    u2Truncated, s2, vh2 = np.linalg.svd(Theta1Sols[:, :, 1],
                                         full_matrices=False)
    u3Truncated, s3, vh3 = np.linalg.svd(Theta1Sols[:, :, 2],
                                         full_matrices=False)
    #Get rid of the solution vectors
    Theta1Sols = None
    #Print an update on progress
    print(' SVD complete      ')

    #scale the value of the modes
    s1norm = s1 / s1[0]
    s2norm = s2 / s2[0]
    s3norm = s3 / s3[0]

    #Decide where to truncate
    cutoff = NumberofSnapshots
    for i in range(NumberofSnapshots):
        if s1norm[i] < PODTol:
            if s2norm[i] < PODTol:
                if s3norm[i] < PODTol:
                    cutoff = i
                    break

    #Truncate the SVD matrices
    u1Truncated = u1Truncated[:, :cutoff]
    u2Truncated = u2Truncated[:, :cutoff]
    u3Truncated = u3Truncated[:, :cutoff]

    ########################################################################
    #Create the ROM

    print(' creating reduced order model', end='\r')
    #Mu0=4*np.pi*10**(-7)
    nu_no_omega = Mu0 * (alpha**2)

    Theta_0 = GridFunction(fes)
    u, v = fes2.TnT()

    if BigProblem == True:
        a0 = BilinearForm(fes2, symmetric=True)
    else:
        a0 = BilinearForm(fes2)
    a0 += SymbolicBFI((mu**(-1)) * InnerProduct(curl(u), curl(v)))
    a0 += SymbolicBFI((1j) * (1 - inout) * epsi * InnerProduct(u, v))
    if BigProblem == True:
        a1 = BilinearForm(fes2, symmetric=True)
    else:
        a1 = BilinearForm(fes2)
    a1 += SymbolicBFI((1j) * inout * nu_no_omega * sigma * InnerProduct(u, v))

    a0.Assemble()
    a1.Assemble()

    Theta_0.vec.FV().NumPy()[:] = Theta0Sol[:, 0]
    r1 = LinearForm(fes2)
    r1 += SymbolicLFI(inout * (-1j) * nu_no_omega * sigma *
                      InnerProduct(Theta_0, v))
    r1 += SymbolicLFI(inout * (-1j) * nu_no_omega * sigma *
                      InnerProduct(xivec[0], v))
    r1.Assemble()
    read_vec = r1.vec.CreateVector()
    write_vec = r1.vec.CreateVector()

    Theta_0.vec.FV().NumPy()[:] = Theta0Sol[:, 1]
    r2 = LinearForm(fes2)
    r2 += SymbolicLFI(inout * (-1j) * nu_no_omega * sigma *
                      InnerProduct(Theta_0, v))
    r2 += SymbolicLFI(inout * (-1j) * nu_no_omega * sigma *
                      InnerProduct(xivec[1], v))
    r2.Assemble()

    Theta_0.vec.FV().NumPy()[:] = Theta0Sol[:, 2]
    r3 = LinearForm(fes2)
    r3 += SymbolicLFI(inout * (-1j) * nu_no_omega * sigma *
                      InnerProduct(Theta_0, v))
    r3 += SymbolicLFI(inout * (-1j) * nu_no_omega * sigma *
                      InnerProduct(xivec[2], v))
    r3.Assemble()

    if PODErrorBars == True:
        fes0 = HCurl(mesh,
                     order=0,
                     dirichlet="outer",
                     complex=True,
                     gradientdomains=dom_nrs_metal)
        ndof0 = fes0.ndof
        RerrorReduced1 = np.zeros([ndof0, cutoff * 2 + 1], dtype=complex)
        RerrorReduced2 = np.zeros([ndof0, cutoff * 2 + 1], dtype=complex)
        RerrorReduced3 = np.zeros([ndof0, cutoff * 2 + 1], dtype=complex)
        ProH = GridFunction(fes2)
        ProL = GridFunction(fes0)
########################################################################
#Create the ROM
    R1 = r1.vec.FV().NumPy()
    R2 = r2.vec.FV().NumPy()
    R3 = r3.vec.FV().NumPy()
    A0H = np.zeros([ndof2, cutoff], dtype=complex)
    A1H = np.zeros([ndof2, cutoff], dtype=complex)

    #E1
    for i in range(cutoff):
        read_vec.FV().NumPy()[:] = u1Truncated[:, i]
        write_vec.data = a0.mat * read_vec
        A0H[:, i] = write_vec.FV().NumPy()
        write_vec.data = a1.mat * read_vec
        A1H[:, i] = write_vec.FV().NumPy()
    HA0H1 = (np.conjugate(np.transpose(u1Truncated)) @ A0H)
    HA1H1 = (np.conjugate(np.transpose(u1Truncated)) @ A1H)
    HR1 = (np.conjugate(np.transpose(u1Truncated)) @ np.transpose(R1))

    if PODErrorBars == True:
        ProH.vec.FV().NumPy()[:] = R1
        ProL.Set(ProH)
        RerrorReduced1[:, 0] = ProL.vec.FV().NumPy()[:]
        for i in range(cutoff):
            ProH.vec.FV().NumPy()[:] = A0H[:, i]
            ProL.Set(ProH)
            RerrorReduced1[:, i + 1] = ProL.vec.FV().NumPy()[:]
            ProH.vec.FV().NumPy()[:] = A1H[:, i]
            ProL.Set(ProH)
            RerrorReduced1[:, i + cutoff + 1] = ProL.vec.FV().NumPy()[:]
#E2
    for i in range(cutoff):
        read_vec.FV().NumPy()[:] = u2Truncated[:, i]
        write_vec.data = a0.mat * read_vec
        A0H[:, i] = write_vec.FV().NumPy()
        write_vec.data = a1.mat * read_vec
        A1H[:, i] = write_vec.FV().NumPy()
    HA0H2 = (np.conjugate(np.transpose(u2Truncated)) @ A0H)
    HA1H2 = (np.conjugate(np.transpose(u2Truncated)) @ A1H)
    HR2 = (np.conjugate(np.transpose(u2Truncated)) @ np.transpose(R2))

    if PODErrorBars == True:
        ProH.vec.FV().NumPy()[:] = R2
        ProL.Set(ProH)
        RerrorReduced2[:, 0] = ProL.vec.FV().NumPy()[:]
        for i in range(cutoff):
            ProH.vec.FV().NumPy()[:] = A0H[:, i]
            ProL.Set(ProH)
            RerrorReduced2[:, i + 1] = ProL.vec.FV().NumPy()[:]
            ProH.vec.FV().NumPy()[:] = A1H[:, i]
            ProL.Set(ProH)
            RerrorReduced2[:, i + cutoff + 1] = ProL.vec.FV().NumPy()[:]
#E3
    for i in range(cutoff):
        read_vec.FV().NumPy()[:] = u3Truncated[:, i]
        write_vec.data = a0.mat * read_vec
        A0H[:, i] = write_vec.FV().NumPy()
        write_vec.data = a1.mat * read_vec
        A1H[:, i] = write_vec.FV().NumPy()
    HA0H3 = (np.conjugate(np.transpose(u3Truncated)) @ A0H)
    HA1H3 = (np.conjugate(np.transpose(u3Truncated)) @ A1H)
    HR3 = (np.conjugate(np.transpose(u3Truncated)) @ np.transpose(R3))

    if PODErrorBars == True:
        ProH.vec.FV().NumPy()[:] = R3
        ProL.Set(ProH)
        RerrorReduced3[:, 0] = ProL.vec.FV().NumPy()[:]
        for i in range(cutoff):
            ProH.vec.FV().NumPy()[:] = A0H[:, i]
            ProL.Set(ProH)
            RerrorReduced3[:, i + 1] = ProL.vec.FV().NumPy()[:]
            ProH.vec.FV().NumPy()[:] = A1H[:, i]
            ProL.Set(ProH)
            RerrorReduced3[:, i + cutoff + 1] = ProL.vec.FV().NumPy()[:]

    #Clear the variables
    A0H, A1H = None, None
    a0, a1 = None, None

    ########################################################################
    #Sort out the error bounds
    if PODErrorBars == True:
        if BigProblem == True:
            MR1 = np.zeros([ndof0, cutoff * 2 + 1], dtype=np.complex64)
            MR2 = np.zeros([ndof0, cutoff * 2 + 1], dtype=np.complex64)
            MR3 = np.zeros([ndof0, cutoff * 2 + 1], dtype=np.complex64)
        else:
            MR1 = np.zeros([ndof0, cutoff * 2 + 1], dtype=complex)
            MR2 = np.zeros([ndof0, cutoff * 2 + 1], dtype=complex)
            MR3 = np.zeros([ndof0, cutoff * 2 + 1], dtype=complex)

        u, v = fes0.TnT()

        m = BilinearForm(fes0)
        m += SymbolicBFI(InnerProduct(u, v))
        f = LinearForm(fes0)
        m.Assemble()
        c = Preconditioner(m, "local")
        c.Update()
        inverse = CGSolver(m.mat, c.mat, precision=1e-20, maxsteps=500)

        ErrorGFU = GridFunction(fes0)
        for i in range(2 * cutoff + 1):
            #E1
            ProL.vec.data.FV().NumPy()[:] = RerrorReduced1[:, i]
            ProL.vec.data -= m.mat * ErrorGFU.vec
            ErrorGFU.vec.data += inverse * ProL.vec
            MR1[:, i] = ErrorGFU.vec.FV().NumPy()

            #E2
            ProL.vec.data.FV().NumPy()[:] = RerrorReduced2[:, i]
            ProL.vec.data -= m.mat * ErrorGFU.vec
            ErrorGFU.vec.data += inverse * ProL.vec
            MR2[:, i] = ErrorGFU.vec.FV().NumPy()

            #E3
            ProL.vec.data.FV().NumPy()[:] = RerrorReduced3[:, i]
            ProL.vec.data -= m.mat * ErrorGFU.vec
            ErrorGFU.vec.data += inverse * ProL.vec
            MR3[:, i] = ErrorGFU.vec.FV().NumPy()

        G_Store = np.zeros([2 * cutoff + 1, 2 * cutoff + 1, 6], dtype=complex)
        G_Store[:, :, 0] = np.transpose(np.conjugate(RerrorReduced1)) @ MR1
        G_Store[:, :, 1] = np.transpose(np.conjugate(RerrorReduced2)) @ MR2
        G_Store[:, :, 2] = np.transpose(np.conjugate(RerrorReduced3)) @ MR3
        G_Store[:, :, 3] = np.transpose(np.conjugate(RerrorReduced1)) @ MR2
        G_Store[:, :, 4] = np.transpose(np.conjugate(RerrorReduced1)) @ MR3
        G_Store[:, :, 5] = np.transpose(np.conjugate(RerrorReduced2)) @ MR3

        #Clear the variables
        RerrorReduced1, RerrorReduced2, RerrorReduced3 = None, None, None
        MR1, MR2, MR3 = None, None, None
        fes0, m, c, inverse = None, None, None, None

        fes3 = HCurl(mesh,
                     order=Order,
                     dirichlet="outer",
                     gradientdomains=dom_nrs_metal)
        ndof3 = fes3.ndof
        Omega = Array[0]
        u, v = fes3.TnT()
        amax = BilinearForm(fes3)
        amax += (mu**(-1)) * curl(u) * curl(v) * dx
        amax += (1 - inout) * epsi * u * v * dx
        amax += inout * sigma * (alpha**2) * Mu0 * Omega * u * v * dx

        m = BilinearForm(fes3)
        m += u * v * dx

        apre = BilinearForm(fes3)
        apre += curl(u) * curl(v) * dx + u * v * dx
        pre = Preconditioner(amax, "bddc")

        with TaskManager():
            amax.Assemble()
            m.Assemble()
            apre.Assemble()

            # build gradient matrix as sparse matrix (and corresponding scalar FESpace)
            gradmat, fesh1 = fes3.CreateGradient()
            gradmattrans = gradmat.CreateTranspose()  # transpose sparse matrix
            math1 = gradmattrans @ m.mat @ gradmat  # multiply matrices
            math1[0, 0] += 1  # fix the 1-dim kernel
            invh1 = math1.Inverse(inverse="sparsecholesky")

            # build the Poisson projector with operator Algebra:
            proj = IdentityMatrix() - gradmat @ invh1 @ gradmattrans @ m.mat
            projpre = proj @ pre.mat
            evals, evecs = solvers.PINVIT(amax.mat,
                                          m.mat,
                                          pre=projpre,
                                          num=1,
                                          maxit=50)

        alphaLB = evals[0]
    else:
        alphaLB, G_Store = False, False

        #Clear the variables
        fes3, amax, apre, pre, invh1, m = None, None, None, None, None, None

######################################################################
#Produce the sweep on the lower dimensional space
    g = np.zeros([cutoff, NumberofFrequencies, 3], dtype=complex)
    for k, omega in enumerate(Array):
        g[:, k, 0] = np.linalg.solve(HA0H1 + HA1H1 * omega, HR1 * omega)
        g[:, k, 1] = np.linalg.solve(HA0H2 + HA1H2 * omega, HR2 * omega)
        g[:, k, 2] = np.linalg.solve(HA0H3 + HA1H3 * omega, HR3 * omega)
    #Work out where to send each frequency
    Tensor_CPUs = min(NumberofFrequencies, multiprocessing.cpu_count(), CPUs)
    Core_Distribution = []
    Count_Distribution = []
    for i in range(Tensor_CPUs):
        Core_Distribution.append([])
        Count_Distribution.append([])
    #Distribute frequencies between the cores
    CoreNumber = 0
    for i, Omega in enumerate(Array):
        Core_Distribution[CoreNumber].append(Omega)
        Count_Distribution[CoreNumber].append(i)
        if CoreNumber == Tensor_CPUs - 1:
            CoreNumber = 0
        else:
            CoreNumber += 1
    #Distribute the lower dimensional solutions
    Lower_Sols = []
    for i in range(Tensor_CPUs):
        TempArray = np.zeros([cutoff, len(Count_Distribution[i]), 3],
                             dtype=complex)
        for j, Sim in enumerate(Count_Distribution[i]):
            TempArray[:, j, :] = g[:, Sim, :]
        Lower_Sols.append(TempArray)

    #Cteate the inputs
    Runlist = []
    manager = multiprocessing.Manager()
    counter = manager.Value('i', 0)
    for i in range(Tensor_CPUs):
        Runlist.append(
            (Core_Distribution[i], mesh, fes, fes2, Lower_Sols[i], u1Truncated,
             u2Truncated, u3Truncated, Theta0Sol, xivec, alpha, sigma, mu,
             inout, N0, NumberofFrequencies, counter, PODErrorBars, alphaLB,
             G_Store))

    #Run on the multiple cores
    with multiprocessing.Pool(Tensor_CPUs) as pool:
        Outputs = pool.starmap(Theta1_Lower_Sweep, Runlist)

    #Unpack the outputs
    if PODErrorBars == True:
        ErrorTensors = np.zeros([NumberofFrequencies, 6])
    for i, Output in enumerate(Outputs):
        for j, Num in enumerate(Count_Distribution[i]):
            if PODErrorBars == True:
                TensorArray[Num, :] = Output[0][j]
                EigenValues[Num, :] = Output[1][j]
                ErrorTensors[Num, :] = Output[2][j]
            else:
                TensorArray[Num, :] = Output[0][j]
                EigenValues[Num, :] = Output[1][j]

    print(' reduced order systems solved          ')
    print(' frequency sweep complete')
    if PlotPod == True:
        if PODErrorBars == True:
            return TensorArray, EigenValues, N0, PODTensors, PODEigenValues, numelements, ErrorTensors
        else:
            return TensorArray, EigenValues, N0, PODTensors, PODEigenValues, numelements
    else:
        if PODErrorBars == True:
            return TensorArray, EigenValues, N0, numelements, ErrorTensors
        else:
            return TensorArray, EigenValues, N0, numelements