Example #1
0
def fullParApply(frames, func):
    # Balance out the shuffled data in parallel because it takes a long time
    # This technically works for an arbitrary function and frame
    split_frames = list(
        map(lambda frame: array_split(frame, cpu_count()), frames))
    # sTrips = array_split( trips, 36)
    full_frames = []
    with pool(cpu_count()) as p:
        for frame in split_frames:
            full_frames.append(concat(p.map(func, frame)))
            # tTriples = concat(p.map(parApply, sTrips))
    return full_frames
Example #2
0
def create_pool(test_name,
                chrome_options,
                processes=multiprocessing.cpu_count()):
    """
    This method creates the test pool and returns.  Tests need to be added to the input queue quickly after calling this
    or else the pool will exit due to an empty input queue
    :param test_name: The test name will be used as the name field in the output JSON report
    :param chrome_options: These are the options sent to the browser.  Using these options you can, among other things,
    control whether or not the browsers run headless
    :param processes: This is the count of Selenium webdriver processes the pool should have in it
    :return: The input and output queues
    """
    global start, output_queue, name
    start = time.time()
    name = test_name

    output_queue = TestRunOutput()
    ctx = multiprocessing.get_context()
    input_queue = JoinableQueue(ctx=ctx)

    workers = []
    for i in range(processes):
        workers.append(
            SeleniumWorker(input_queue, output_queue, chrome_options).start())

    return input_queue, output_queue
Example #3
0
def get_bgzip_lines_parallel(
    infilename,
    nprocs=multiprocessing.cpu_count(),
    linebatch=2048,  # number of lines in a batch
    queuesize=2,  # number of batches queued per subprocess
    preproc=None,
    preprocargs=[],
    preprockwargs={},
):
    # this is the number of bytes in each chunk of the file
    # each chunk will be handled by a separate child process
    procsize = os.stat(infilename).st_size / nprocs

    # this is the queue the lines will be returned from subprocs via
    q = multiprocessing.Queue(nprocs * queuesize)

    # create the subprocesses
    subprocs = []
    for i in range(nprocs):
        start = int(i * procsize)
        end = int((i + 1) * procsize - 1)
        subproc = multiprocessing.Process(
            target=_get_lines_to_pipe,
            args=(
                infilename,
                start,
                end,
                preproc,
                preprocargs,
                preprockwargs,
                q,
                linebatch,
            ),
        )
        subprocs.append(subproc)

    # start all the subprocesses
    for subproc in subprocs:
        subproc.start()

    # to keep track of the number of active subprocs
    active = nprocs
    while active > 0:
        lines = q.get()
        # we recieved a sentinel value to say that this chunk is complete
        if lines is None:
            active -= 1
        else:
            for line in lines:
                yield line

    # make sure all the subprocesses terminate tidily
    # at this point we have recieved all the sentinels, so we should be fine
    for subproc in subprocs:
        subproc.join(1)
Example #4
0
def Pool(NPROC=None):
    """Initialize a multiprocessing `Pool`.

    - Uses `pathos/dill` for serialisation.
    - Provides unified interface for multiprocessing on/off (as a function of NPROC).

    There is some overhead associated with the pool creation,
    so you likely want to re-use a pool rather than repeatedly creating one.
    Consider using `functools.partial` to fix kwargs.

    .. note::
        In contrast to *reading*, in-place writing does not work with multiprocessing.
        This changes with "shared" arrays, but that has not been tested here.
        By contrast, multi*threading* shares the process memory,
        but was significantly slower in the tested (pertinent) cases.

    .. caution::
        `multiprocessing` does not mix with `matplotlib`, so ensure `func` does not
        reference `xp.stats.LP_instance`. In fact, `func` should not reference `xp`
        at all, because it takes time to serialize.

    See example use in `dapper.mods.QG` and `dapper.da_methods.LETKF`.
    """
    if NPROC == False:
        # Yield plain old map
        class NoPool:
            def __enter__(self):
                return builtins

            def __exit__(self, *args):
                pass

        import builtins
        return NoPool()

    else:
        # from psutil import cpu_percent, cpu_count
        if NPROC in [True, None]:
            NPROC = mpd.cpu_count() - 1  # be nice

        return mpd.Pool(NPROC)
Example #5
0
def map(func, xx, **kwargs):  # noqa
    """A parallelized version of map.

    Similar to `result = [func(x, **kwargs) for x in xx]`, but also deals with:

    - passing kwargs
    - join(), close()
    - KeyboardInterrupt (not any more)

    Note: in contrast to reading operations, writing "in-place"
    does not work with multiprocessing. This changes with
    "shared" arrays, but this has not been tried out here.
    By contrast, multithreading shares the memory,
    but was significantly slower in the tested (pertinent) cases.

    NB: multiprocessing does not mix with matplotlib,
        so ensure `func` does not reference `self.stats.LP_instance`,
        where `self` is a `@da_method` object.
        In fact, `func` should not reference `self` at all,
        because its serialization is rather slow.

    See example use in `dapper.mods.QG`
    """
    NMAX = mpd.cpu_count() - 1  # Be nice
    NPROC = kwargs.pop("NPROC", NMAX)
    pool = mpd.Pool(NPROC)

    try:
        f = functools.partial(func, **kwargs)  # Fix kwargs

        # map vs imap: https://stackoverflow.com/a/26521507
        result = pool.map(f, xx)

    except Exception:
        pool.terminate()
        pool.close()
        pool.join()
        raise

    return result
Example #6
0
def FullSweepMulti(Object, Order, alpha, inorout, mur, sig, Array, CPUs,
                   BigProblem):
    Object = Object[:-4] + ".vol"
    #Set up the Solver Parameters
    Solver, epsi, Maxsteps, Tolerance = SolverParameters()

    #Loading the object file
    ngmesh = ngmeshing.Mesh(dim=3)
    ngmesh.Load("VolFiles/" + Object)

    #Creating the mesh and defining the element types
    mesh = Mesh("VolFiles/" + Object)
    mesh.Curve(5)  #This can be used to refine the mesh
    numelements = mesh.ne  #Count the number elements
    print(" mesh contains " + str(numelements) + " elements")

    #Set up the coefficients
    #Scalars
    Mu0 = 4 * np.pi * 10**(-7)
    NumberofFrequencies = len(Array)
    #Coefficient functions
    mu_coef = [mur[mat] for mat in mesh.GetMaterials()]
    mu = CoefficientFunction(mu_coef)
    inout_coef = [inorout[mat] for mat in mesh.GetMaterials()]
    inout = CoefficientFunction(inout_coef)
    sigma_coef = [sig[mat] for mat in mesh.GetMaterials()]
    sigma = CoefficientFunction(sigma_coef)

    #Set up how the tensor and eigenvalues will be stored
    N0 = np.zeros([3, 3])
    TensorArray = np.zeros([NumberofFrequencies, 9], dtype=complex)
    RealEigenvalues = np.zeros([NumberofFrequencies, 3])
    ImaginaryEigenvalues = np.zeros([NumberofFrequencies, 3])
    EigenValues = np.zeros([NumberofFrequencies, 3], dtype=complex)

    #########################################################################
    #Theta0
    #This section solves the Theta0 problem to calculate both the inputs for
    #the Theta1 problem and calculate the N0 tensor

    #Setup the finite element space
    fes = HCurl(mesh, order=Order, dirichlet="outer", flags={"nograds": True})
    #Count the number of degrees of freedom
    ndof = fes.ndof

    #Define the vectors for the right hand side
    evec = [
        CoefficientFunction((1, 0, 0)),
        CoefficientFunction((0, 1, 0)),
        CoefficientFunction((0, 0, 1))
    ]

    #Setup the grid functions and array which will be used to save
    Theta0i = GridFunction(fes)
    Theta0j = GridFunction(fes)
    Theta0Sol = np.zeros([ndof, 3])

    #Setup the inputs for the functions to run
    Theta0CPUs = min(3, multiprocessing.cpu_count(), CPUs)
    Runlist = []
    for i in range(3):
        if Theta0CPUs < 3:
            NewInput = (fes, Order, alpha, mu, inout, evec[i], Tolerance,
                        Maxsteps, epsi, i + 1, Solver)
        else:
            NewInput = (fes, Order, alpha, mu, inout, evec[i], Tolerance,
                        Maxsteps, epsi, "No Print", Solver)
        Runlist.append(NewInput)
    #Run on the multiple cores
    with multiprocessing.Pool(Theta0CPUs) as pool:
        Output = pool.starmap(Theta0, Runlist)
    print(' solved theta0 problems    ')

    #Unpack the outputs
    for i, Direction in enumerate(Output):
        Theta0Sol[:, i] = Direction

#Calculate the N0 tensor
    VolConstant = Integrate(1 - mu**(-1), mesh)
    for i in range(3):
        Theta0i.vec.FV().NumPy()[:] = Theta0Sol[:, i]
        for j in range(3):
            Theta0j.vec.FV().NumPy()[:] = Theta0Sol[:, j]
            if i == j:
                N0[i, j] = (alpha**3) * (VolConstant + (1 / 4) * (Integrate(
                    mu**(-1) *
                    (InnerProduct(curl(Theta0i), curl(Theta0j))), mesh)))
            else:
                N0[i, j] = (alpha**3 / 4) * (Integrate(
                    mu**(-1) *
                    (InnerProduct(curl(Theta0i), curl(Theta0j))), mesh))

#########################################################################
#Theta1
#This section solves the Theta1 problem and saves the solution vectors

#Setup the finite element space
    dom_nrs_metal = [0 if mat == "air" else 1 for mat in mesh.GetMaterials()]
    fes2 = HCurl(mesh,
                 order=Order,
                 dirichlet="outer",
                 complex=True,
                 gradientdomains=dom_nrs_metal)
    #Count the number of degrees of freedom
    ndof2 = fes2.ndof

    #Define the vectors for the right hand side
    xivec = [
        CoefficientFunction((0, -z, y)),
        CoefficientFunction((z, 0, -x)),
        CoefficientFunction((-y, x, 0))
    ]

    #Work out where to send each frequency
    Theta1_CPUs = min(NumberofFrequencies, multiprocessing.cpu_count(), CPUs)
    Core_Distribution = []
    Count_Distribution = []
    for i in range(Theta1_CPUs):
        Core_Distribution.append([])
        Count_Distribution.append([])

    #Distribute between the cores
    CoreNumber = 0
    count = 1
    for i, Omega in enumerate(Array):
        Core_Distribution[CoreNumber].append(Omega)
        Count_Distribution[CoreNumber].append(i)
        if CoreNumber == CPUs - 1 and count == 1:
            count = -1
        elif CoreNumber == 0 and count == -1:
            count = 1
        else:
            CoreNumber += count

    #Create the inputs
    Runlist = []
    manager = multiprocessing.Manager()
    counter = manager.Value('i', 0)
    for i in range(Theta1_CPUs):
        Runlist.append(
            (Core_Distribution[i], mesh, fes, fes2, Theta0Sol, xivec, alpha,
             sigma, mu, inout, Tolerance, Maxsteps, epsi, Solver, N0,
             NumberofFrequencies, False, True, counter, False))

    #Run on the multiple cores
    with multiprocessing.Pool(Theta1_CPUs) as pool:
        Outputs = pool.starmap(Theta1_Sweep, Runlist)

    #Unpack the results
    for i, Output in enumerate(Outputs):
        for j, Num in enumerate(Count_Distribution[i]):
            TensorArray[Num, :] = Output[0][j]
            EigenValues[Num, :] = Output[1][j]

    print("Frequency Sweep complete")

    return TensorArray, EigenValues, N0, numelements
Example #7
0
def Main(
    Function=None,
    ListOfArgSets=None,
    Algorithm=None,
    Method=None,
    BatchSize=None,
    BatchCount=None,
    HideProgressBar=None,
    DefaultValue=None,
    CheckArguments=True,
    PrintExtra=True,
):

    StartTime = time.time()

    if BatchSize is None and BatchCount is None:
        BatchSize = 1
    #elif BatchSize is not None and BatchCount is None:
    #    pass
    #elif BatchSize is None and BatchCount is not None:
    #    pass

    #TODO: rename 'Algorithm' as 'Method'

    if HideProgressBar is None:
        HideProgressBar = False

    if DefaultValue is None:
        DefaultValue = None

    if Algorithm is None:
        Algorithm = 'loop'

    if (CheckArguments):
        ArgumentErrorMessage = ""

        if BatchSize is not None and BatchCount is not None:
            ArgumentErrorMessage += ' BatchSize is not None and BatchCount is not None...\n'

        if (len(ArgumentErrorMessage) > 0):
            if (PrintExtra):
                print("ArgumentErrorMessage:\n", ArgumentErrorMessage)
            raise Exception(ArgumentErrorMessage)

    #Determine if the ArgSets are named, or not.
    #   if not, we will assume the function is of a single variable
    ArgSetsNamed = isinstance(ListOfArgSets[0], dict)
    if PrintExtra: print('ArgSetsNamed', ArgSetsNamed)

    #Build an object to collect the results
    ResultList = []

    #Make a function which uses the invoker, the functino, and handles errors gracefully
    def WrappedFunction(ArgSet):
        WrappedResult = None
        try:
            if ArgSetsNamed:
                WrappedResult = Function(
                    **
                    ArgSet)  #Library_FunctionInvoker.Main( Function, ArgSet )
            else:
                WrappedResult = Function(ArgSet)
        except Exception as ExceptionObject:
            print('FAIL: ArgSet ', ArgSet)
            print(ExceptionObject)
            WrappedResult = DefaultValue
        return WrappedResult

    #Make a function which is designed to iterate over the wrappped function
    def BatchWrappedFunction(ArgSets):
        BatchResults = []
        for ArgSet in ArgSets:
            BatchResults.append(WrappedFunction(ArgSet))
        return BatchResults

    if (Algorithm == 'loop'):
        for ArgSet in tqdm.tqdm(ListOfArgSets, disable=HideProgressBar):
            ResultList.append(WrappedFunction(ArgSet))
    elif (Algorithm == 'pp'):
        pass

    elif (Algorithm in ['multiprocessing', 'multiprocessing_on_dill']):

        CPU_count = multiprocessing_on_dill.cpu_count()
        if PrintExtra:
            print('CPU_count', CPU_count)

        #with multiprocessing_on_dill.Pool() as Pool:
        #PoolObject = multiprocessing_on_dill.Pool(CPU_count - 1)
        #ResultList = PoolObject.map( WrappedFunction, ListOfArgSets  )

        if BatchCount is None and BatchSize == 1:
            with multiprocessing_on_dill.Pool(
                    CPU_count, initializer=numpy.random.seed) as PoolObject:
                ResultList = list(
                    tqdm.tqdm(PoolObject.imap(WrappedFunction, ListOfArgSets),
                              total=len(ListOfArgSets),
                              disable=HideProgressBar))
        else:
            ListOfListsOfArgSets = Library_IterableSplitIntoChunks.Main(
                Iterable=ListOfArgSets,
                ChunkCount=BatchCount,
                ChunkLength=BatchSize,
            )
            with multiprocessing_on_dill.Pool(
                    CPU_count, initializer=numpy.random.seed) as PoolObject:
                ListOfResultLists = list(
                    tqdm.tqdm(PoolObject.imap(BatchWrappedFunction,
                                              ListOfListsOfArgSets),
                              total=len(ListOfListsOfArgSets),
                              disable=HideProgressBar))

                ResultList = list(
                    itertools.chain.from_iterable(ListOfResultLists))

    elif (Algorithm == 'mpi4py'):
        ErrMsg = 'Not Implemented Yet...'
        raise Exception()

    elif (Algorithm == 'dougserver'):
        #dougserver.submitJob
        ErrMsg = 'Not Implemented Yet...'
        raise Exception()
    else:
        ErrMsg = 'Unrecognized `Algorithm`... Library_ParallelLoop  FAILED to execute'
        raise Exception(ErrMsg)

    EndTime = time.time()
    TimeTaken = EndTime - StartTime
    if not HideProgressBar:
        print('TimeTaken', TimeTaken)

    return ResultList
Example #8
0
cube = 'hello'
coord_names = 'boo'
agg_method = 'mean'
nlat = 70
new_lat_bounds = numpy.arange(nlat)


def lat_aggregate(cube, coord_names, lat_bounds, agg_method):
    """Dummy."""

    return lat_bounds


# Start my pool
print('CPUs:', multiprocessing.cpu_count())
pool = multiprocessing.Pool(multiprocessing.cpu_count())

# Build task list
tasks = []
for lat_index in range(0, nlat):
    tasks.append((cube, coord_names, new_lat_bounds[lat_index], agg_method))

# Run tasks
results = [pool.apply_async(lat_aggregate, t) for t in tasks]

# Process results
for lat_index, result in enumerate(results):
    lat_agg = result.get()
    print(lat_agg)
    #new_data[..., lat_index] = lat_agg.data
    parser.add_argument('-itab', type=str, required=True, help='Input table name')
    parser.add_argument('-otab', type=str, required=True, help='Output table name')
    parser.add_argument('-c', '--clear', action='store_true', required=False, help='Clear output table or append if missing')
    parser.add_argument('-idcol', type=str, required=True, help='Document ID SQL column name')
    parser.add_argument('-txtcol', type=str, required=True, help='Document text SQL column name')
    parser.add_argument('-rncol', type=str, required=True, help='Row number column name (for batching)')
    parser.add_argument('-start', type=int, default=1, required=False, help='Starting row number (defaults to 1)')
    parser.add_argument('-end', type=int, default=0, required=False, help='Ending row number (default of 0 will use maximum row number)')
    parser.add_argument('-batch', type=int, default=100, required=False, help='Batch size (defaults to 100)')
    parser.add_argument('-mode', type=str, choices=['combined', 'separate'], default='separate', required=False, help='Combined: output single table column and eHOST classes as target_modifier. Separate (default): output separate table columns for targets and modifiers and eHOST classes as targets with modifiers as attributes')
    parser.add_argument('-edir', type=str, default=None, required=False, help='Output directory for generated eHOST project, can be absolute or relative to the pyConTextPipeline directory (defaults to None)')
    parser.add_argument('-stok', type=str, choices=['pyrush', 'resplit', 'helpers'], default='pyrush', required=False, help='Sentence tokenizer - pyrush (default - for splitting medical documents), resplit (for splitting lists), or helpers (modified from pyConTextNLP built-in, for more normal text/delimiters)')
    parser.add_argument('-roc', type=float, default=1.0, required=False, help='Add misspellings/variations of target lexical terms >=4 characters based on the corpus (built-in Ratcliff/Obershelp algorithm). Coefficient of 1.0 (default/off) means include exact matches only and 0 means include very dissimilar matches (0.9 recommended for 1-2 character errors)')
    parser.add_argument('-tfile', type=str, default='./kb/targets.yml', required=False, help='pyConTextNLP targets file (defaults to ./kb/targets.yml)')
    parser.add_argument('-mfile', type=str, default='./kb/modifiers.yml', required=False, help='pyConTextNLP modifiers file (defaults to ./kb/modifiers.yml)')
    parser.add_argument('-p', '--processes', type=int, default=cpu_count(), required=False, help='Number of processes to spawn (defaults to number of logical CPU cores)')
    args = parser.parse_args()

    pctp = pyConTextPipeline()
    print("\nTask to be run...")
    print(pctp.__str__())
    print("Is this correct(y/n)?")
    if (input() != 'y'):#currrent logging method won't print input text until after input
        print("Exiting...")
        sys.stdout = backup
        sys.exit(0)
    else:
        if pctp.roc != 1.0:
            mf_time = default_timer()
            pctp.find_misspellings()
            hm = (divmod(default_timer()-mf_time,3600));print("--- Misspellings finder took %dh %dm %.2fs ---"%(hm[0],*divmod(hm[1],60)))
Example #10
0
def Main(
    Function=None,
    #Functions = None,
    DomainMinimumPoint=None,
    DomainMaximumPoint=None,
    ObservedDataset=None,
    Markersize=None,
    PlotThreeDimensional=False,
    ShowContours=False,
    ShowContourLabels=False,
    SurfaceType=None,
    PluginPointCount=None,
    PluginSimultaneous=None,
    Xlabel="X",
    Ylabel="Y",
    Zlabel="Z",
    PlotTitle=None,
    LogX=False,
    LogY=False,
    LogZ=False,
    Zorder=None,
    ScaleRange=None,
    ColorMap=None,
    CheckArguments=True,
    SaveFigureFilePath=None,
    HideProgressBar=None,
    Parallelize=None,
    BatchCount=None,
    BatchSize=None,
    ResultFormat=None,
    PickleResult=None,
    ExistingFigure=None,
    PrintExtra=False,
):
    Result = None

    #---------------------------------------------------------------------------
    #ARG CHECKING:
    if (CheckArguments):
        ArgumentErrorMessage = ""

        #Make sure we have enough information for a domain box:
        if ObservedDataset is None:
            if DomainMinimumPoint is None or DomainMaximumPoint is None:
                ArgumentErrorMessage += 'Need a dataset or a domain box for graphing a density map\n'

        #Make sure that the format of the observed ataset is correct:
        elif ObservedDataset is not None:
            if (Type_NumpyTwoDimensionalDataset.Main(ObservedDataset) != True):
                ArgumentErrorMessage += "(Type_NumpyTwoDimensionalDataset.Main(ObservedDataset) != True)\n"

        if (len(ArgumentErrorMessage) > 0):
            if (PrintExtra):
                print("ArgumentErrorMessage:\n", ArgumentErrorMessage)
            raise Exception(ArgumentErrorMessage)

    #---------------------------------------------------------------------------
    #ARG FORMATTING / FIXING /
    #Figure out parallel processing loop settings:
    if HideProgressBar is None:
        HideProgressBar = False

    if BatchCount is None and BatchSize is None:
        BatchCount = 100

    if Parallelize is None:
        Parallelize = False

    LoopAlgorithm = None
    if Parallelize:
        LoopAlgorithm = 'multiprocessing'
    else:
        LoopAlgorithm = 'loop'

    #Figure out how many dimensions matplotlib needs to render
    PlotDimensionCount = None
    if PlotThreeDimensional:
        PlotDimensionCount = 3
    else:
        PlotDimensionCount = 2

    #Fill in any none values to defaults:
    if Markersize is None:
        Markersize = 1

    #Extract all the observations in X, Y coordinates:
    if (ObservedDataset is not None):
        ObservedPointsCount = len(ObservedDataset)
        ObservedX = ObservedDataset.T[0]
        ObservedY = ObservedDataset.T[1]
    else:
        ObservedPointsCount = 0
        ObservedX = numpy.array([])
        ObservedY = numpy.array([])

    #Infer the plot domain minimums and maximums from the dataset if omited
    if (DomainMinimumPoint is None):
        DomainMinimumPoint = numpy.nanmin(ObservedDataset, axis=0)
    if (DomainMaximumPoint is None):
        DomainMaximumPoint = numpy.nanmax(ObservedDataset, axis=0)

    #Further format the domain box information to smaller variables:
    DomainMinimumPoint = numpy.array(DomainMinimumPoint)
    DomainMaximumPoint = numpy.array(DomainMaximumPoint)
    xmin, ymin = DomainMinimumPoint
    xmax, ymax = DomainMaximumPoint
    xrng, yrng = DomainMaximumPoint - DomainMinimumPoint
    if PrintExtra:
        print('DomainMinimumPoint', DomainMinimumPoint)
        print('DomainMaximumPoint', DomainMaximumPoint)

    #Plugin options:
    if PluginPointCount is None:
        PluginPointCount = 10**5

    if PluginSimultaneous is None:
        PluginSimultaneous = False

    #misc formating options:
    if ResultFormat is None:
        ResultFormat = 'fig'

    if ScaleRange is None:
        ScaleRange = 1.

    if PickleResult is None:
        PickleResult = False

    if Zorder is None:
        Zorder = 0  #Not using... causes more trouble than its worth

    if SurfaceType is None:
        SurfaceType = 'solid'

    if ColorMap is None:
        #ColorMap = matplotlib.pyplot.cm.gist_earth_r
        ColorMap = matplotlib.pyplot.cm.viridis  #This is the built in default
    elif ColorMap in ['grey', 'gray']:
        ColorMap = matplotlib.pyplot.cm.gray

    #---------------------------------------------------------------------------
    #CREATE FIGURE OBJECT:
    fig = None
    subplot = None
    if ExistingFigure is None:
        fig = Library_FigureCreate.Main(
            DimensionCount=PlotDimensionCount,
            DimensionLabels=[Xlabel, Ylabel, Zlabel],
            PlotTitle=PlotTitle,
            LimitBoxMinimum=DomainMinimumPoint.tolist() + [None],
            LimitBoxMaximum=DomainMaximumPoint.tolist() + [None],
            FontSize=None,
            ShowGrid=None)
        subplot = fig.onlysubplot

    else:
        fig = ExistingFigure
        subplot = fig.onlysubplot

    #---------------------------------------------------------------------------
    #BEGIN BUILDING COLOR MAP:
    #Make meshgrid of points from minimum and maximum (plugged into the function)

    PluginPointCountX = int(numpy.sqrt(PluginPointCount))
    PluginPointCountY = int(numpy.sqrt(PluginPointCount))

    #FIXME: (NOTE)
    #   mgrid == meshgrid.T
    #       1) http://louistiao.me/posts/numpy-mgrid-vs-meshgrid/
    #   This problem showed up when teaching the SC python minicourse in 2020 summer
    #       1) https://github.com/uofscphysics/STEM_Python_Course/tree/Summer2020/02_Week2/00_Probability_Theory
    #       2) Test_DensityColorPlot
    X, Y = numpy.mgrid[xmin:xmax:complex(PluginPointCountX), ymin:ymax:complex(
        PluginPointCountY)]  #1000 x 1000 -> 1 million points

    PointsToPlugIn = numpy.vstack([X.ravel(), Y.ravel()])
    PointsToPlugInDataset = PointsToPlugIn.T
    PlugInPointsCount = len(PointsToPlugInDataset)
    if PrintExtra:
        print('X.shape, Y.shape', X.shape, Y.shape)
        print('PlugInPointsCount', PlugInPointsCount)
        print('PointsToPlugInDataset.shape', PointsToPlugInDataset.shape)
        print('PointsToPlugInDataset[0]', PointsToPlugInDataset[0])

    #Plug meshgrid points into function (store values as numpy.float datatype )
    if PrintExtra: print('LoopAlgorithm', LoopAlgorithm)
    FunctionResultValuesForGrid = None
    if PluginSimultaneous:
        #If the function can handle simulatneous points with numpy broadcasting...
        #   ... then the best optimziation is to throw equal chunks at the func for each core:
        CPU_count = multiprocessing_on_dill.cpu_count()
        PointsToPlugInDatasetChunks = Library_IterableSplitIntoChunks.Main(
            Iterable=PointsToPlugInDataset,
            ChunkCount=CPU_count,
        )
        FunctionResultValuesForGrid = numpy.vstack(
            Library_ParallelLoop.Main(
                Function=Function,
                ListOfArgSets=PointsToPlugInDatasetChunks,
                Algorithm=LoopAlgorithm,
                #BatchCount      = 100,
                BatchCount=BatchCount,
                BatchSize=BatchSize,
                HideProgressBar=HideProgressBar,
                PrintExtra=PrintExtra,
            ))
    else:
        #If the function can only handle one point at a time...
        #   the best optimization is just to throw all the points into a big queue:
        FunctionResultValuesForGrid = Library_ParallelLoop.Main(
            Function=Function,
            ListOfArgSets=PointsToPlugInDataset,
            Algorithm=LoopAlgorithm,
            #BatchCount      = 100,
            BatchCount=BatchCount,
            BatchSize=BatchSize,
            HideProgressBar=HideProgressBar,
            PrintExtra=PrintExtra,
        )

    FunctionResultValuesForGrid = numpy.array(FunctionResultValuesForGrid,
                                              dtype=numpy.float)
    if LogZ:
        FunctionResultValuesForGrid = numpy.log(
            FunctionResultValuesForGrid
        )  #TODO: scale the image axes without changing the function values

    Z = numpy.reshape(FunctionResultValuesForGrid, X.shape)
    zmin = numpy.nanmin(Z)
    zmax = numpy.nanmax(Z)
    zrng = zmax - zmin
    if PrintExtra:  #>0:
        print('Z.shape', Z.shape)
        print('zmin, zmax, zrng', zmin, zmax, zrng)
        if PrintExtra > 1:
            print('FunctionResultValuesForGrid', FunctionResultValuesForGrid)

    #Design the plot with the coordinate values (this could be done in it's own function)
    if (PlotThreeDimensional == True):

        #Plot the surface:
        if SurfaceType == 'solid':
            surface = subplot.plot_surface(
                X,
                Y,
                Z,
                rstride=1,
                cstride=1,
                cmap=ColorMap,
                #zorder=Zorder,
                antialiased=False,
                vmin=zmin,
                vmax=zmax,
            )
        elif SurfaceType == 'wireframe':
            subplot.plot_wireframe(X, Y, Z, rstride=1, cstride=1)

        #Plot the observed dataset:
        if ObservedDataset is not None:

            #Get Z-values for the data based on the function:
            ObservedZ = []
            for PointToPlugIn in ObservedDataset:
                FunctionValueForPointToPlugIn = 0.0
                try:
                    FunctionValueForPointToPlugIn = Function(PointToPlugIn)
                except:
                    print('Function(', PointToPlugIn, ')',
                          ' failed... skipping')
                    pass
                ObservedZ.append(FunctionValueForPointToPlugIn)
            ObservedZ = numpy.array(ObservedZ).flatten()
            if PrintExtra:
                print('ObservedX.shape', ObservedX.shape)
                print('ObservedY.shape', ObservedY.shape)
                print('ObservedZ.shape', ObservedZ.shape)

            #Draw litle verticle bars going up to each point:
            #https://towardsdatascience.com/an-easy-introduction-to-3d-plotting-with-matplotlib-801561999725
            subplot.bar3d(
                ObservedX,
                ObservedY,
                ObservedZ - zrng / 100,
                #numpy.zeros(ObservedPointsCount),
                numpy.ones(ObservedPointsCount) * xrng / 100,
                numpy.ones(ObservedPointsCount) * yrng / 100,
                numpy.ones(ObservedPointsCount) * 2 * zrng / 100,
                #numpy.ones(ObservedPointsCount)*ObservedZ + 2* zrng / 100
                #color='aqua'
                #zorder=Zorder,
            )

    else:

        #Plot the colormap of the function:
        heatmap = subplot.imshow(
            numpy.rot90(Z),
            cmap=ColorMap,
            extent=[xmin, xmax, ymin, ymax],
            aspect='auto',
            interpolation=None,
            #norm=LogNorm(vmin=0.01, vmax=1)
            #zorder=Zorder,
        )

        #Add color bar (showing Z-axis sense of scale)
        cbar = matplotlib.pyplot.colorbar(heatmap, label=Zlabel)

        #Add the observations:
        if (ObservedDataset is not None):
            matplotlib.pyplot.plot(
                ObservedX,
                ObservedY,
                color='red',
                marker='o',
                linestyle='None',
                markersize=Markersize,
                #zorder=Zorder,
            )

        #Plot Countours: ( From http://matplotlib.org/examples/pylab_examples/contour_demo.html )
        if ShowContours:
            CS = matplotlib.pyplot.contour(
                X,
                Y,
                Z,
                5,  # number of contours
                colors='k',  # negative contours will be dashed by default
            )
            if ShowContourLabels:
                matplotlib.pyplot.clabel(CS, fontsize=9, inline=1)

    #Save the final figure:
    if (SaveFigureFilePath is not None):
        matplotlib.pyplot.savefig(SaveFigureFilePath)

    #Craft the final result object:
    if ResultFormat == 'fig':
        Result = fig
    elif ResultFormat == 'Dictionary':
        Result = {}
        Result['fig'] = fig
        Result['PointsToPlugInDataset'] = PointsToPlugInDataset
        Result['FunctionResultValuesForGrid'] = FunctionResultValuesForGrid

    #Should we store the points we plugged into the function to file?
    if PickleResult:
        Library_LazyPickleDump.Main(PointsToPlugInDataset)
        Library_LazyPickleDump.Main(FunctionResultValuesForGrid)
        Library_LazyPickleDump.Main(Result)

    return Result
Example #11
0
def PODSweepMulti(Object, Order, alpha, inorout, mur, sig, Array, PODArray,
                  PODTol, PlotPod, CPUs, sweepname, SavePOD, PODErrorBars,
                  BigProblem):
    Object = Object[:-4] + ".vol"
    #Set up the Solver Parameters
    Solver, epsi, Maxsteps, Tolerance = SolverParameters()

    #Loading the object file
    ngmesh = ngmeshing.Mesh(dim=3)
    ngmesh.Load("VolFiles/" + Object)

    #Creating the mesh and defining the element types
    mesh = Mesh("VolFiles/" + Object)
    mesh.Curve(5)  #This can be used to refine the mesh
    numelements = mesh.ne  #Count the number elements
    print(" mesh contains " + str(numelements) + " elements")

    #Set up the coefficients
    #Scalars
    Mu0 = 4 * np.pi * 10**(-7)
    NumberofSnapshots = len(PODArray)
    NumberofFrequencies = len(Array)
    #Coefficient functions
    mu_coef = [mur[mat] for mat in mesh.GetMaterials()]
    mu = CoefficientFunction(mu_coef)
    inout_coef = [inorout[mat] for mat in mesh.GetMaterials()]
    inout = CoefficientFunction(inout_coef)
    sigma_coef = [sig[mat] for mat in mesh.GetMaterials()]
    sigma = CoefficientFunction(sigma_coef)

    #Set up how the tensor and eigenvalues will be stored
    N0 = np.zeros([3, 3])
    TensorArray = np.zeros([NumberofFrequencies, 9], dtype=complex)
    RealEigenvalues = np.zeros([NumberofFrequencies, 3])
    ImaginaryEigenvalues = np.zeros([NumberofFrequencies, 3])
    EigenValues = np.zeros([NumberofFrequencies, 3], dtype=complex)

    #########################################################################
    #Theta0
    #This section solves the Theta0 problem to calculate both the inputs for
    #the Theta1 problem and calculate the N0 tensor

    #Setup the finite element space
    fes = HCurl(mesh, order=Order, dirichlet="outer", flags={"nograds": True})
    #Count the number of degrees of freedom
    ndof = fes.ndof

    #Define the vectors for the right hand side
    evec = [
        CoefficientFunction((1, 0, 0)),
        CoefficientFunction((0, 1, 0)),
        CoefficientFunction((0, 0, 1))
    ]

    #Setup the grid functions and array which will be used to save
    Theta0i = GridFunction(fes)
    Theta0j = GridFunction(fes)
    Theta0Sol = np.zeros([ndof, 3])

    #Setup the inputs for the functions to run
    Theta0CPUs = min(3, multiprocessing.cpu_count(), CPUs)
    Runlist = []
    for i in range(3):
        if Theta0CPUs < 3:
            NewInput = (fes, Order, alpha, mu, inout, evec[i], Tolerance,
                        Maxsteps, epsi, i + 1, Solver)
        else:
            NewInput = (fes, Order, alpha, mu, inout, evec[i], Tolerance,
                        Maxsteps, epsi, "No Print", Solver)
        Runlist.append(NewInput)
    #Run on the multiple cores
    with multiprocessing.Pool(Theta0CPUs) as pool:
        Output = pool.starmap(Theta0, Runlist)
    print(' solved theta0 problems    ')

    #Unpack the outputs
    for i, Direction in enumerate(Output):
        Theta0Sol[:, i] = Direction

#Calculate the N0 tensor
    VolConstant = Integrate(1 - mu**(-1), mesh)
    for i in range(3):
        Theta0i.vec.FV().NumPy()[:] = Theta0Sol[:, i]
        for j in range(3):
            Theta0j.vec.FV().NumPy()[:] = Theta0Sol[:, j]
            if i == j:
                N0[i, j] = (alpha**3) * (VolConstant + (1 / 4) * (Integrate(
                    mu**(-1) *
                    (InnerProduct(curl(Theta0i), curl(Theta0j))), mesh)))
            else:
                N0[i, j] = (alpha**3 / 4) * (Integrate(
                    mu**(-1) *
                    (InnerProduct(curl(Theta0i), curl(Theta0j))), mesh))

#########################################################################
#Theta1
#This section solves the Theta1 problem and saves the solution vectors

#Setup the finite element space
    dom_nrs_metal = [0 if mat == "air" else 1 for mat in mesh.GetMaterials()]
    fes2 = HCurl(mesh,
                 order=Order,
                 dirichlet="outer",
                 complex=True,
                 gradientdomains=dom_nrs_metal)
    #Count the number of degrees of freedom
    ndof2 = fes2.ndof

    #Define the vectors for the right hand side
    xivec = [
        CoefficientFunction((0, -z, y)),
        CoefficientFunction((z, 0, -x)),
        CoefficientFunction((-y, x, 0))
    ]

    #Work out where to send each frequency
    Theta1_CPUs = min(NumberofSnapshots, multiprocessing.cpu_count(), CPUs)
    Core_Distribution = []
    Count_Distribution = []
    for i in range(Theta1_CPUs):
        Core_Distribution.append([])
        Count_Distribution.append([])

    #Distribute between the cores
    CoreNumber = 0
    count = 1
    for i, Omega in enumerate(PODArray):
        Core_Distribution[CoreNumber].append(Omega)
        Count_Distribution[CoreNumber].append(i)
        if CoreNumber == CPUs - 1 and count == 1:
            count = -1
        elif CoreNumber == 0 and count == -1:
            count = 1
        else:
            CoreNumber += count

    #Create the inputs
    Runlist = []
    manager = multiprocessing.Manager()
    counter = manager.Value('i', 0)
    for i in range(Theta1_CPUs):
        if PlotPod == True:
            Runlist.append(
                (Core_Distribution[i], mesh, fes, fes2, Theta0Sol, xivec,
                 alpha, sigma, mu, inout, Tolerance, Maxsteps, epsi, Solver,
                 N0, NumberofSnapshots, True, True, counter, BigProblem))
        else:
            Runlist.append(
                (Core_Distribution[i], mesh, fes, fes2, Theta0Sol, xivec,
                 alpha, sigma, mu, inout, Tolerance, Maxsteps, epsi, Solver,
                 N0, NumberofSnapshots, True, False, counter, BigProblem))

    #Run on the multiple cores
    with multiprocessing.Pool(Theta1_CPUs) as pool:
        Outputs = pool.starmap(Theta1_Sweep, Runlist)

    #Unpack the results
    if BigProblem == True:
        Theta1Sols = np.zeros([ndof2, NumberofSnapshots, 3],
                              dtype=np.complex64)
    else:
        Theta1Sols = np.zeros([ndof2, NumberofSnapshots, 3], dtype=complex)
    if PlotPod == True:
        PODTensors = np.zeros([NumberofSnapshots, 9], dtype=complex)
        PODEigenValues = np.zeros([NumberofSnapshots, 3], dtype=complex)
    for i, Output in enumerate(Outputs):
        for j, Num in enumerate(Count_Distribution[i]):
            if PlotPod == True:
                PODTensors[Num, :] = Output[0][j]
                PODEigenValues[Num, :] = Output[1][j]
                Theta1Sols[:, Num, :] = Output[2][:, j, :]
            else:
                Theta1Sols[:, Num, :] = Output[:, j, :]

#########################################################################
#POD

    print(' performing SVD              ', end='\r')
    #Perform SVD on the solution vector matrices
    u1Truncated, s1, vh1 = np.linalg.svd(Theta1Sols[:, :, 0],
                                         full_matrices=False)
    u2Truncated, s2, vh2 = np.linalg.svd(Theta1Sols[:, :, 1],
                                         full_matrices=False)
    u3Truncated, s3, vh3 = np.linalg.svd(Theta1Sols[:, :, 2],
                                         full_matrices=False)
    #Get rid of the solution vectors
    Theta1Sols = None
    #Print an update on progress
    print(' SVD complete      ')

    #scale the value of the modes
    s1norm = s1 / s1[0]
    s2norm = s2 / s2[0]
    s3norm = s3 / s3[0]

    #Decide where to truncate
    cutoff = NumberofSnapshots
    for i in range(NumberofSnapshots):
        if s1norm[i] < PODTol:
            if s2norm[i] < PODTol:
                if s3norm[i] < PODTol:
                    cutoff = i
                    break

    #Truncate the SVD matrices
    u1Truncated = u1Truncated[:, :cutoff]
    u2Truncated = u2Truncated[:, :cutoff]
    u3Truncated = u3Truncated[:, :cutoff]

    ########################################################################
    #Create the ROM

    print(' creating reduced order model', end='\r')
    #Mu0=4*np.pi*10**(-7)
    nu_no_omega = Mu0 * (alpha**2)

    Theta_0 = GridFunction(fes)
    u, v = fes2.TnT()

    if BigProblem == True:
        a0 = BilinearForm(fes2, symmetric=True)
    else:
        a0 = BilinearForm(fes2)
    a0 += SymbolicBFI((mu**(-1)) * InnerProduct(curl(u), curl(v)))
    a0 += SymbolicBFI((1j) * (1 - inout) * epsi * InnerProduct(u, v))
    if BigProblem == True:
        a1 = BilinearForm(fes2, symmetric=True)
    else:
        a1 = BilinearForm(fes2)
    a1 += SymbolicBFI((1j) * inout * nu_no_omega * sigma * InnerProduct(u, v))

    a0.Assemble()
    a1.Assemble()

    Theta_0.vec.FV().NumPy()[:] = Theta0Sol[:, 0]
    r1 = LinearForm(fes2)
    r1 += SymbolicLFI(inout * (-1j) * nu_no_omega * sigma *
                      InnerProduct(Theta_0, v))
    r1 += SymbolicLFI(inout * (-1j) * nu_no_omega * sigma *
                      InnerProduct(xivec[0], v))
    r1.Assemble()
    read_vec = r1.vec.CreateVector()
    write_vec = r1.vec.CreateVector()

    Theta_0.vec.FV().NumPy()[:] = Theta0Sol[:, 1]
    r2 = LinearForm(fes2)
    r2 += SymbolicLFI(inout * (-1j) * nu_no_omega * sigma *
                      InnerProduct(Theta_0, v))
    r2 += SymbolicLFI(inout * (-1j) * nu_no_omega * sigma *
                      InnerProduct(xivec[1], v))
    r2.Assemble()

    Theta_0.vec.FV().NumPy()[:] = Theta0Sol[:, 2]
    r3 = LinearForm(fes2)
    r3 += SymbolicLFI(inout * (-1j) * nu_no_omega * sigma *
                      InnerProduct(Theta_0, v))
    r3 += SymbolicLFI(inout * (-1j) * nu_no_omega * sigma *
                      InnerProduct(xivec[2], v))
    r3.Assemble()

    if PODErrorBars == True:
        fes0 = HCurl(mesh,
                     order=0,
                     dirichlet="outer",
                     complex=True,
                     gradientdomains=dom_nrs_metal)
        ndof0 = fes0.ndof
        RerrorReduced1 = np.zeros([ndof0, cutoff * 2 + 1], dtype=complex)
        RerrorReduced2 = np.zeros([ndof0, cutoff * 2 + 1], dtype=complex)
        RerrorReduced3 = np.zeros([ndof0, cutoff * 2 + 1], dtype=complex)
        ProH = GridFunction(fes2)
        ProL = GridFunction(fes0)
########################################################################
#Create the ROM
    R1 = r1.vec.FV().NumPy()
    R2 = r2.vec.FV().NumPy()
    R3 = r3.vec.FV().NumPy()
    A0H = np.zeros([ndof2, cutoff], dtype=complex)
    A1H = np.zeros([ndof2, cutoff], dtype=complex)

    #E1
    for i in range(cutoff):
        read_vec.FV().NumPy()[:] = u1Truncated[:, i]
        write_vec.data = a0.mat * read_vec
        A0H[:, i] = write_vec.FV().NumPy()
        write_vec.data = a1.mat * read_vec
        A1H[:, i] = write_vec.FV().NumPy()
    HA0H1 = (np.conjugate(np.transpose(u1Truncated)) @ A0H)
    HA1H1 = (np.conjugate(np.transpose(u1Truncated)) @ A1H)
    HR1 = (np.conjugate(np.transpose(u1Truncated)) @ np.transpose(R1))

    if PODErrorBars == True:
        ProH.vec.FV().NumPy()[:] = R1
        ProL.Set(ProH)
        RerrorReduced1[:, 0] = ProL.vec.FV().NumPy()[:]
        for i in range(cutoff):
            ProH.vec.FV().NumPy()[:] = A0H[:, i]
            ProL.Set(ProH)
            RerrorReduced1[:, i + 1] = ProL.vec.FV().NumPy()[:]
            ProH.vec.FV().NumPy()[:] = A1H[:, i]
            ProL.Set(ProH)
            RerrorReduced1[:, i + cutoff + 1] = ProL.vec.FV().NumPy()[:]
#E2
    for i in range(cutoff):
        read_vec.FV().NumPy()[:] = u2Truncated[:, i]
        write_vec.data = a0.mat * read_vec
        A0H[:, i] = write_vec.FV().NumPy()
        write_vec.data = a1.mat * read_vec
        A1H[:, i] = write_vec.FV().NumPy()
    HA0H2 = (np.conjugate(np.transpose(u2Truncated)) @ A0H)
    HA1H2 = (np.conjugate(np.transpose(u2Truncated)) @ A1H)
    HR2 = (np.conjugate(np.transpose(u2Truncated)) @ np.transpose(R2))

    if PODErrorBars == True:
        ProH.vec.FV().NumPy()[:] = R2
        ProL.Set(ProH)
        RerrorReduced2[:, 0] = ProL.vec.FV().NumPy()[:]
        for i in range(cutoff):
            ProH.vec.FV().NumPy()[:] = A0H[:, i]
            ProL.Set(ProH)
            RerrorReduced2[:, i + 1] = ProL.vec.FV().NumPy()[:]
            ProH.vec.FV().NumPy()[:] = A1H[:, i]
            ProL.Set(ProH)
            RerrorReduced2[:, i + cutoff + 1] = ProL.vec.FV().NumPy()[:]
#E3
    for i in range(cutoff):
        read_vec.FV().NumPy()[:] = u3Truncated[:, i]
        write_vec.data = a0.mat * read_vec
        A0H[:, i] = write_vec.FV().NumPy()
        write_vec.data = a1.mat * read_vec
        A1H[:, i] = write_vec.FV().NumPy()
    HA0H3 = (np.conjugate(np.transpose(u3Truncated)) @ A0H)
    HA1H3 = (np.conjugate(np.transpose(u3Truncated)) @ A1H)
    HR3 = (np.conjugate(np.transpose(u3Truncated)) @ np.transpose(R3))

    if PODErrorBars == True:
        ProH.vec.FV().NumPy()[:] = R3
        ProL.Set(ProH)
        RerrorReduced3[:, 0] = ProL.vec.FV().NumPy()[:]
        for i in range(cutoff):
            ProH.vec.FV().NumPy()[:] = A0H[:, i]
            ProL.Set(ProH)
            RerrorReduced3[:, i + 1] = ProL.vec.FV().NumPy()[:]
            ProH.vec.FV().NumPy()[:] = A1H[:, i]
            ProL.Set(ProH)
            RerrorReduced3[:, i + cutoff + 1] = ProL.vec.FV().NumPy()[:]

    #Clear the variables
    A0H, A1H = None, None
    a0, a1 = None, None

    ########################################################################
    #Sort out the error bounds
    if PODErrorBars == True:
        if BigProblem == True:
            MR1 = np.zeros([ndof0, cutoff * 2 + 1], dtype=np.complex64)
            MR2 = np.zeros([ndof0, cutoff * 2 + 1], dtype=np.complex64)
            MR3 = np.zeros([ndof0, cutoff * 2 + 1], dtype=np.complex64)
        else:
            MR1 = np.zeros([ndof0, cutoff * 2 + 1], dtype=complex)
            MR2 = np.zeros([ndof0, cutoff * 2 + 1], dtype=complex)
            MR3 = np.zeros([ndof0, cutoff * 2 + 1], dtype=complex)

        u, v = fes0.TnT()

        m = BilinearForm(fes0)
        m += SymbolicBFI(InnerProduct(u, v))
        f = LinearForm(fes0)
        m.Assemble()
        c = Preconditioner(m, "local")
        c.Update()
        inverse = CGSolver(m.mat, c.mat, precision=1e-20, maxsteps=500)

        ErrorGFU = GridFunction(fes0)
        for i in range(2 * cutoff + 1):
            #E1
            ProL.vec.data.FV().NumPy()[:] = RerrorReduced1[:, i]
            ProL.vec.data -= m.mat * ErrorGFU.vec
            ErrorGFU.vec.data += inverse * ProL.vec
            MR1[:, i] = ErrorGFU.vec.FV().NumPy()

            #E2
            ProL.vec.data.FV().NumPy()[:] = RerrorReduced2[:, i]
            ProL.vec.data -= m.mat * ErrorGFU.vec
            ErrorGFU.vec.data += inverse * ProL.vec
            MR2[:, i] = ErrorGFU.vec.FV().NumPy()

            #E3
            ProL.vec.data.FV().NumPy()[:] = RerrorReduced3[:, i]
            ProL.vec.data -= m.mat * ErrorGFU.vec
            ErrorGFU.vec.data += inverse * ProL.vec
            MR3[:, i] = ErrorGFU.vec.FV().NumPy()

        G_Store = np.zeros([2 * cutoff + 1, 2 * cutoff + 1, 6], dtype=complex)
        G_Store[:, :, 0] = np.transpose(np.conjugate(RerrorReduced1)) @ MR1
        G_Store[:, :, 1] = np.transpose(np.conjugate(RerrorReduced2)) @ MR2
        G_Store[:, :, 2] = np.transpose(np.conjugate(RerrorReduced3)) @ MR3
        G_Store[:, :, 3] = np.transpose(np.conjugate(RerrorReduced1)) @ MR2
        G_Store[:, :, 4] = np.transpose(np.conjugate(RerrorReduced1)) @ MR3
        G_Store[:, :, 5] = np.transpose(np.conjugate(RerrorReduced2)) @ MR3

        #Clear the variables
        RerrorReduced1, RerrorReduced2, RerrorReduced3 = None, None, None
        MR1, MR2, MR3 = None, None, None
        fes0, m, c, inverse = None, None, None, None

        fes3 = HCurl(mesh,
                     order=Order,
                     dirichlet="outer",
                     gradientdomains=dom_nrs_metal)
        ndof3 = fes3.ndof
        Omega = Array[0]
        u, v = fes3.TnT()
        amax = BilinearForm(fes3)
        amax += (mu**(-1)) * curl(u) * curl(v) * dx
        amax += (1 - inout) * epsi * u * v * dx
        amax += inout * sigma * (alpha**2) * Mu0 * Omega * u * v * dx

        m = BilinearForm(fes3)
        m += u * v * dx

        apre = BilinearForm(fes3)
        apre += curl(u) * curl(v) * dx + u * v * dx
        pre = Preconditioner(amax, "bddc")

        with TaskManager():
            amax.Assemble()
            m.Assemble()
            apre.Assemble()

            # build gradient matrix as sparse matrix (and corresponding scalar FESpace)
            gradmat, fesh1 = fes3.CreateGradient()
            gradmattrans = gradmat.CreateTranspose()  # transpose sparse matrix
            math1 = gradmattrans @ m.mat @ gradmat  # multiply matrices
            math1[0, 0] += 1  # fix the 1-dim kernel
            invh1 = math1.Inverse(inverse="sparsecholesky")

            # build the Poisson projector with operator Algebra:
            proj = IdentityMatrix() - gradmat @ invh1 @ gradmattrans @ m.mat
            projpre = proj @ pre.mat
            evals, evecs = solvers.PINVIT(amax.mat,
                                          m.mat,
                                          pre=projpre,
                                          num=1,
                                          maxit=50)

        alphaLB = evals[0]
    else:
        alphaLB, G_Store = False, False

        #Clear the variables
        fes3, amax, apre, pre, invh1, m = None, None, None, None, None, None

######################################################################
#Produce the sweep on the lower dimensional space
    g = np.zeros([cutoff, NumberofFrequencies, 3], dtype=complex)
    for k, omega in enumerate(Array):
        g[:, k, 0] = np.linalg.solve(HA0H1 + HA1H1 * omega, HR1 * omega)
        g[:, k, 1] = np.linalg.solve(HA0H2 + HA1H2 * omega, HR2 * omega)
        g[:, k, 2] = np.linalg.solve(HA0H3 + HA1H3 * omega, HR3 * omega)
    #Work out where to send each frequency
    Tensor_CPUs = min(NumberofFrequencies, multiprocessing.cpu_count(), CPUs)
    Core_Distribution = []
    Count_Distribution = []
    for i in range(Tensor_CPUs):
        Core_Distribution.append([])
        Count_Distribution.append([])
    #Distribute frequencies between the cores
    CoreNumber = 0
    for i, Omega in enumerate(Array):
        Core_Distribution[CoreNumber].append(Omega)
        Count_Distribution[CoreNumber].append(i)
        if CoreNumber == Tensor_CPUs - 1:
            CoreNumber = 0
        else:
            CoreNumber += 1
    #Distribute the lower dimensional solutions
    Lower_Sols = []
    for i in range(Tensor_CPUs):
        TempArray = np.zeros([cutoff, len(Count_Distribution[i]), 3],
                             dtype=complex)
        for j, Sim in enumerate(Count_Distribution[i]):
            TempArray[:, j, :] = g[:, Sim, :]
        Lower_Sols.append(TempArray)

    #Cteate the inputs
    Runlist = []
    manager = multiprocessing.Manager()
    counter = manager.Value('i', 0)
    for i in range(Tensor_CPUs):
        Runlist.append(
            (Core_Distribution[i], mesh, fes, fes2, Lower_Sols[i], u1Truncated,
             u2Truncated, u3Truncated, Theta0Sol, xivec, alpha, sigma, mu,
             inout, N0, NumberofFrequencies, counter, PODErrorBars, alphaLB,
             G_Store))

    #Run on the multiple cores
    with multiprocessing.Pool(Tensor_CPUs) as pool:
        Outputs = pool.starmap(Theta1_Lower_Sweep, Runlist)

    #Unpack the outputs
    if PODErrorBars == True:
        ErrorTensors = np.zeros([NumberofFrequencies, 6])
    for i, Output in enumerate(Outputs):
        for j, Num in enumerate(Count_Distribution[i]):
            if PODErrorBars == True:
                TensorArray[Num, :] = Output[0][j]
                EigenValues[Num, :] = Output[1][j]
                ErrorTensors[Num, :] = Output[2][j]
            else:
                TensorArray[Num, :] = Output[0][j]
                EigenValues[Num, :] = Output[1][j]

    print(' reduced order systems solved          ')
    print(' frequency sweep complete')
    if PlotPod == True:
        if PODErrorBars == True:
            return TensorArray, EigenValues, N0, PODTensors, PODEigenValues, numelements, ErrorTensors
        else:
            return TensorArray, EigenValues, N0, PODTensors, PODEigenValues, numelements
    else:
        if PODErrorBars == True:
            return TensorArray, EigenValues, N0, numelements, ErrorTensors
        else:
            return TensorArray, EigenValues, N0, numelements