def __init__(self, args):
		win32serviceutil.ServiceFramework.__init__(self, args)
		
		# Give the multiprocessing module a python interpreter to run
		if sys.argv[0].endswith("exe"):
			basedir = os.path.dirname(sys.argv[0])
			multiprocessing.set_executable(os.path.join(basedir, "ulteo-ovd-slaveserver.exe"))
	
		# Init the logger instance
		Win32Logger.initialize("OVD", Config.log_level, None)
		ConfigModule.report_error = WinReport_error
		
		config_file = os.path.join(Platform.System.get_default_config_dir(), "slaveserver.conf")
		if not Config.read(config_file):
			Logger.error("invalid configuration file '%s'"%(config_file))
			sys.exit(1)
	
		if not Config.is_valid():
			Logger.error("invalid config")
			sys.exit(1)
		
		Win32Logger.initialize("OVD", Config.log_level, Config.log_file)
		
		
		SlaveServer.__init__(self, Communication)
		self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)
Exemple #2
0
def __encode_multithreaded(f, data):
    """The base function that runs the given function f in multithreaded
    fashion.

    :param f: The function
    :param data: The data
    :return:
    """
    import multiprocessing
    import platform
    number_of_threads = multiprocessing.cpu_count() / 2

    if platform.system() == 'Windows':
        multiprocessing.set_executable('C:/Python27/pythonw.exe')
    elif platform.system() == 'Linux':
        multiprocessing.set_executable('/usr/bin/python')

    p = multiprocessing.Pool(number_of_threads)

    number_of_chunks = len(data) // 4
    chunk_per_thread = number_of_chunks / number_of_threads
    split_per_char = chunk_per_thread * 4

    thread_data = []
    for i in range(0, len(data), split_per_char):
        thread_data.append(data[i:i + split_per_char])

    data = ''.join(p.map(f, thread_data))
    p.close()
    return data
    def create_raster_providers(self, dem, texture):
        dataSrcImg = None
        dataSrcMnt = None
        extent = self.parameters.extent
        originExtent = Extent(extent[0], extent[1], extent[2], extent[3])
        tileSize = self.parameters.tileSize
        zoomLevel = self.parameters.zoomLevel
        path = self.parameters.rastersPath
        if dem is not None:
            demProvider = RasterProvider(dem)
            dataSrcMnt = demProvider.source
            self.parameters.set_resources_dem(demProvider.httpResource)
            self.providerManager.dem = demProvider
        if texture is not None:
            textureProvider = RasterProvider(texture)
            dataSrcImg = textureProvider.source
            self.parameters.set_resources_texture(textureProvider.httpResource)
            self.providerManager.texture = textureProvider

        if os.name == 'nt':
            pythonPath = os.path.abspath(os.path.join(sys.exec_prefix, '../../bin/pythonw.exe'))
            mp.set_executable(pythonPath)
            sys.argv = [None]

        self.clear_rasters_directory(self.parameters.rastersPath)
        tiler = VTTiler(originExtent, tileSize, zoomLevel, dataSrcMnt, dataSrcImg)
        self.parameters.GDALprocess = mp.Process(target=tiler.create, args=(path, self.parameters.GDALqueue))
        self.parameters.GDALprocess.start()
	def __init__(self, args):
		win32serviceutil.ServiceFramework.__init__(self, args)
		
		# Give the multiprocessing module a python interpreter to run
		if sys.argv[0].endswith("exe"):
			basedir = os.path.dirname(sys.argv[0])
			multiprocessing.set_executable(os.path.join(basedir, "ulteo-ovd-slaveserver.exe"))
		
		# Init the logger instance
		Win32Logger.initialize("OVD", Config.log_level, None)
		ConfigModule.report_error = WinReport_error
		
		self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)
Exemple #5
0
 def _Construct(self):
     logger.debug('_Construct')
     # Fixme this is just an example and should be set by the BCI2000
     self.fbmod = 'Feedbacks.TrivialPong.TrivialPong'
     self.fbclassname = 'TrivialPong'
     # OnInit
     import sys
     import os
     # needed because we run processes from within an embedded python
     # interpreter
     multiprocessing.set_executable(os.path.join(sys.exec_prefix, 'pythonw.exe'))
     sys.argv = [""]
     self.mycon, childcon = multiprocessing.Pipe()
     self.feedback_proc = multiprocessing.Process(target=feedback_process_loop, args=(self.fbmod, self.fbclassname, childcon,))
     self.feedback_proc.start()
     # returns variables of the feedback in form of parameter lines
     return [], []
def main_execute(floodmap_directory, working_directory, out_shapefile):

    if arcpy.CheckExtension("Spatial") == "Available":
        arcpy.AddMessage("Checking out spatial license ...")
        arcpy.CheckOutExtension("Spatial")
    else:
        arcpy.ExecuteError("ERROR: The Spatial Analyst license is required to run this tool.")

    multiprocessing.set_executable(os.path.join(sys.exec_prefix, 'pythonw.exe'))
    
    # Overwrite pre-existing files
    arcpy.env.overwriteOutput = True
    
    arcpy.env.workspace = floodmap_directory
    flood_rasters = arcpy.ListRasters()
    number_of_rasters = len(flood_rasters)
    mp_spatial_lock = multiprocessing.Manager().Lock()
    job_combinations = []
    out_shapefile_list = []
    for index, flood_raster in enumerate(flood_rasters):
        job_combinations.append((os.path.join(floodmap_directory, flood_raster), index, number_of_rasters, working_directory, mp_spatial_lock))
        #out_shapefile_list.append(floodmap_to_shapefile((flood_raster, index, number_of_rasters, working_directory)))

    arcpy.env.workspace = ""
    
    NUM_CPUS = min(number_of_rasters, multiprocessing.cpu_count())
    pool = multiprocessing.Pool(NUM_CPUS)
    
    for output in pool.imap_unordered(floodmap_to_shapefile, job_combinations):
        out_shapefile_list.append(output)
        
    pool.close()
    pool.join()

    #CLEANUP
    arcpy.Delete_management("in_memory")

    #Merge Shapefiles
    arcpy.AddMessage("Merging all flood map shapefiles ...")
    arcpy.Merge_management(out_shapefile_list, out_shapefile)
    
    #CLEANUP
    for shapfile in out_shapefile_list:
        arcpy.Delete_management(shapfile)
Exemple #7
0
import os
import sys
import time
import win32serviceutil
import win32service
import multiprocessing


def is_frozen():
    return (hasattr(sys, "frozen")          # new py2exe
            or hasattr(sys, "importers")    # old py2exe
            or imp.is_frozen("__main__"))   # tools/freeze


if is_frozen():
    multiprocessing.set_executable(os.path.join(
        os.path.dirname(sys.executable), 'porcupineserver.exe'))


class PorcupineServerService(win32serviceutil.ServiceFramework):
    _svc_name_ = 'Porcupine'
    _svc_display_name_ = 'Porcupine'
    _svc_description = 'Porcupine Web Application Server'

    def __init__(self, *args):
        win32serviceutil.ServiceFramework.__init__(self, *args)
        self.controller = None

    def SvcDoRun(self):
        try:
            if '' not in sys.path:
                sys.path.insert(0, '')
Exemple #8
0
def draw(op,inp,usr_selection):
    # separate process so that excel is not blocked
    multiprocessing.set_executable(os.path.join(sys.exec_prefix, 'pythonw.exe')) 
    p = multiprocessing.Process(target=f, args=(op,inp,usr_selection))
    p.start()
"""Handle a list of `CartogramFeature`."""

import functools
import math
import multiprocessing
import os.path
import platform
import sys

from qgis.core import QgsGeometry, QgsProcessingFeedback

from .cartogramfeature import CartogramFeature

if platform.system() == "Windows":
    sys.argv = [os.path.abspath(__file__)]
    multiprocessing.set_executable(os.path.join(sys.exec_prefix,
                                                "pythonw.exe"))
elif platform.system() == "Darwin":
    sys.argv = [os.path.abspath(__file__)]
    multiprocessing.set_executable(
        os.path.join(sys.exec_prefix, "bin", "python3"))

# monkey-patch functools for older Python versions
# (e.g. installed with QGIS 3.16 on MacOS)
if "cache" not in dir(functools):

    def _cache(user_function):
        return functools.lru_cache(maxsize=None)(user_function)

    functools.cache = _cache

Exemple #10
0
def integrate(ftable, integratorid="std", msLevel=None, showProgress=True, n_cpus=-1,
              min_size_for_parallel_execution=500, post_fixes=None):
    """ integrates features  in ftable.
        returns processed table. ``ftable`` is not changed inplace.

        The peak integrator corresponding to the integratorId is
        defined in ``algorithm_configs.py`` or ``local_configs.py``

        n_cpus <= 0 has special meaning:
            n_cpus = 0 means "use all cpu cores"
            n_cpus = -1 means "use all but one cpu cores", etc

    """

    needed_columns = ["mzmin", "mzmax", "rtmin", "rtmax", "peakmap"]
    if post_fixes is None:
        post_fixes = ftable.supportedPostfixes(needed_columns)
        if not post_fixes:
            raise Exception("is no feature table")
    else:
        col_names = ftable.getColNames()
        missing = []
        for post_fix in post_fixes:
            for name in needed_columns:
                if name + post_fix not in col_names:
                    missing.append(name + post_fix)
        if missing:
            raise ValueError("column name(s) %s missing" % (", ".join(missing)))

    if sys.platform == "win32":
        # if subprocesses use python.exe a console window pops up for each
        # subprocess. this is quite ugly..
        import os.path
        multiprocessing.set_executable(os.path.join(
                                       os.path.dirname(sys.executable),
                                       "pythonw.exe")
                                       )
    import time
    from ..core.data_types import Table

    started = time.time()

    messages, n_cpus = check_num_cpus(n_cpus, len(ftable), min_size_for_parallel_execution)

    if showProgress:
        print
        if messages:
            print "\n".join(messages)
        print "integrate table using", n_cpus, "processes"
        print

    if n_cpus == 1:
        __, result = _integrate((0, ftable, post_fixes, integratorid, msLevel, showProgress,))
    else:
        pool = multiprocessing.Pool(n_cpus)
        args = []
        all_pms = []
        for i in range(n_cpus):
            partial = ftable[i::n_cpus]
            show_progress = (i == 0)  # only first process prints progress status
            args.append(
                (i, partial, post_fixes, integratorid, msLevel, show_progress))
            all_pms.append(partial.peakmap.values)

        # map_async() avoids bug of map() when trying to stop jobs using ^C
        results = pool.map_async(_integrate, args).get()

        results.sort()  # sorts by first entry which is the index of the partial table
        tables = [t for (i, t) in results]

        # as peakmaps are serialized/unserialized for paralell execution, lots of duplicate
        # peakmaps come back after. we reset those columns to their state before spreading
        # them:
        for t, pms in zip(tables, all_pms):
            t.replaceColumn("peakmap", pms, type_=ftable.getColType("peakmap"),
                            format_=ftable.getColFormat("peakmap"))

        # at least needed on win, else worker processes accumulate:
        pool.close()

        tables = [t for t in tables if len(t) > 0]
        result = Table.stackTables(tables)

    if showProgress:
        needed = time.time() - started
        minutes = int(needed) / 60
        seconds = needed - minutes * 60
        print
        if minutes:
            print "needed %d minutes and %.1f seconds" % (minutes, seconds)
        else:
            print "needed %.1f seconds" % seconds
    return result
Exemple #11
0
def integrate(ftable, integratorid="std", msLevel=None, showProgress=True, n_cpus=-1,
        min_size_for_parallel_execution=500):
    """ integrates features  in ftable.
        returns processed table. ``ftable`` is not changed inplace.

        The peak integrator corresponding to the integratorId is
        defined in ``algorithm_configs.py`` or ``local_configs.py``

        n_cpus <= 0 has special meaning:
            n_cpus = 0 means "use all cpu cores"
            n_cpus = -1 means "use all but one cpu cores", etc
    """
    import sys
    import multiprocessing
    if sys.platform == "win32":
        # if subprocesses use python.exe a console window pops up for each
        # subprocess. this is quite ugly..
        import os.path
        multiprocessing.set_executable(os.path.join(
                                       os.path.dirname(sys.executable),
                                       "pythonw.exe")
                                       )
    import time
    from ..core.data_types.table import Table

    started = time.time()

    messages = []
    if multiprocessing.current_process().daemon and n_cpus != 1:
        messages.append("WARNING: you choose n_cpus = %d but integrate already runs inside a "
                        "daemon process which is not allowed. therefore set n_cpus = 1" % n_cpus)
        n_cpus = 1

    if n_cpus < 0:
        n_cpus = multiprocessing.cpu_count() + n_cpus

    if n_cpus <= 0:
        messages.append("WARNING: you requested to use %d cores, "
                        "we use single core instead !" % n_cpus)
        n_cpus = 1

    if n_cpus > 1 and len(ftable) < min_size_for_parallel_execution:
        messages.append("INFO: as the table has les thann %d rows, we switch to one cpu mode"
                        % min_size_for_parallel_execution)
        n_cpus = 1     

    elif n_cpus > multiprocessing.cpu_count():
        messages.append("WARNING: more processes demanded than available cpu cores, this might be "
                        "inefficient")

    if showProgress:
        print
        if messages:
            print "\n".join(messages)
        print "integrate table using", n_cpus, "processes"
        print

    if n_cpus == 1:
        result = _integrate((ftable, integratorid, msLevel, showProgress))
    else:
        pool = multiprocessing.Pool(n_cpus)
        args = []
        all_pms = []
        for i in range(n_cpus):
            subt = ftable[i::n_cpus]
            show_progress = (i == 0)  # only first process prints progress status
            args.append((subt, integratorid, msLevel, show_progress))
            all_pms.append(subt.peakmap.values)

        # map_async() avoids bug of map() when trying to stop jobs using ^C
        tables = pool.map_async(_integrate, args).get()

        # as peakmaps are serialized/unserialized for paralell execution, lots of duplicate
        # peakmaps come back after. we reset those columns to their state before spreading
        # them:
        for t, pms in zip(tables, all_pms):
            t.replaceColumn("peakmap", pms)

        pool.close()

        tables = [t for t in tables if len(t) > 0]
        result = Table.mergeTables(tables)

    if showProgress:
        needed = time.time() - started
        minutes = int(needed) / 60
        seconds = needed - minutes * 60
        print
        if minutes:
            print "needed %d minutes and %.1f seconds" % (minutes, seconds)
        else:
            print "needed %.1f seconds" % seconds
    return result
Exemple #12
0
def _main():
    import multiprocessing as _mp
    import threading
    import time

    import bpy
    _mp.set_executable(bpy.app.binary_path_python)

    # import logging
    # logger = _mp.log_to_stderr()
    # logger.setLevel(logging.INFO)

    global _engine_, _data_, _width_, _height_, _mmap_size_, _mmap_

    _mmap_name = "blender/barnold/ipr/pid-%d" % id(_engine_)

    if platform.system() == "Darwin" or "Linux":
        _mmap_ = mmap.mmap(-1, 64 * 1024 * 1024)  # 64Mb

    if platform.system() == "Windows":
        _mmap_ = mmap.mmap(-1, 64 * 1024 * 1024, _mmap_name)  # 64Mb

    state = _mp.Value('i', 0)
    redraw_event = _mp.Event()

    def tag_redraw():
        while redraw_event.wait() and state.value != ABORT:
            redraw_event.clear()
            e = _engine_()
            if e is not None:
                e.tag_redraw()
            del e

    def _mmap_size(opts):
        global _mmap_
        m = max(_width_, _height_)
        if m > 300:
            c = 900 / (m + 600)
            w = int(_width_ * c)
            h = int(_height_ * c)
        else:
            w = _width_
            h = _height_

        if platform.system() == "Darwin" or "Linux":
            _mmap_ = mmap.mmap(-1, w * h * 4 * 4)

        if platform.system() == "Windows":
            _mmap_ = mmap.mmap(-1, w * h * 4 * 4, _mmap_name)

        opts['xres'] = ('INT', w)
        opts['yres'] = ('INT', h)
        return w, h

    _mmap_size_ = _mmap_size(_data_['options'])
    pout, pin = _mp.Pipe(False)

    def update(width, height, data):
        global _width_, _height_, _mmap_size_
        if _width_ != width or _height_ != height:
            _width_ = width
            _height_ = height
            _mmap_size_ = _mmap_size(data.setdefault('options', {}))
            data['mmap_size'] = _mmap_size_
        if data:
            #print(">>> update [%f]" % time.clock())
            pin.send(data)
        return _mmap_size_, numpy.frombuffer(_mmap_, dtype=numpy.float32)

    redraw_thread = threading.Thread(target=tag_redraw)
    process = _mp.Process(target=_worker,
                          args=(_data_, pout, redraw_event, _mmap_size_,
                                _mmap_name, state))

    def stop():
        print(">>> stop [%f]: ABORT" % time.clock())
        state.value = ABORT
        print(">>> stop [%f]: close data" % time.clock())
        pin.send(None)
        pin.close()
        print(">>> stop [%f]: set event" % time.clock())
        redraw_event.set()
        print(">>> stop [%f]: join" % time.clock(), redraw_thread)
        redraw_thread.join()
        print(">>> stop [%f]:" % time.clock(), redraw_thread)
        print(">>> stop [%f]: join" % time.clock(), process)
        process.join(5)
        if process.is_alive():
            print(">>> stop [%f]: terminate" % time.clock(), process)
            process.terminate()
        print(">>> stop [%f]:" % time.clock(), process)

    redraw_thread.start()
    process.start()

    return update, stop
Exemple #13
0
import os
import sys
import time
import win32serviceutil
import win32service
import win32event
import win32evtlogutil
import win32console
import servicemanager

# Give the multiprocessing module a python interpreter to run
import multiprocessing
executable = os.path.join(os.path.dirname(sys.executable), 'u2.exe')
multiprocessing.set_executable(executable)
del executable

BASE_PROCESS_WAIT_TIME = 3

class logger(object):
 def __init__(self,out):
  self.out = out
 def write(self,message):
  if 'Traceback' in message:
   self.out('\n' + message)

class ServiceLauncher(win32serviceutil.ServiceFramework):
 _svc_name_ = 'U2 Service'
 _svc_display_name_ ='U2 Service'

 def __init__(self, args):
  win32serviceutil.ServiceFramework.__init__(self, args)
Exemple #14
0
def mp_handler():

    for item in clipList:
        print("here is the list: " + item)

    try:
        # Create a list of object IDs for clipper polygons

        arcpy.AddMessage("Creating Polygon OID list...")
        print("Creating Polygon OID list...")
        clipperDescObj = arcpy.Describe(clipper)
        field = clipperDescObj.OIDFieldName

        idList = []
        with arcpy.da.SearchCursor(clipper, [field]) as cursor:
            for row in cursor:
                id = row[0]
                idList.append(id)

        arcpy.AddMessage("There are " + str(len(idList)) +
                         " object IDs (polygons) to process.")
        print("There are " + str(len(idList)) +
              " object IDs (polygons) to process.")

        # Create a task list with parameter tuples for each call of the worker function. Tuples consist of the clippper, tobeclipped, field, and oid values.

        jobs = []
        for item in clipList:
            tobeclipped = Path + "\\" + item
            for id in idList:
                jobs.append(
                    (clipper, tobeclipped, field, id, outFolder)
                )  # adds tuples of the parameters that need to be given to the worker function to the jobs list

        arcpy.AddMessage("Job list has " + str(len(jobs)) + " elements.")
        print("Job list has " + str(len(jobs)) + " elements.")

        # Create and run multiprocessing pool.

        multiprocessing.set_executable(
            os.path.join(get_install_path(), 'pythonw.exe')
        )  # make sure Python environment is used for running processes, even when this is run as a script tool

        arcpy.AddMessage("Sending to pool")
        print("Sending to pool")

        cpuNum = multiprocessing.cpu_count(
        )  # determine number of cores to use
        print("there are: " + str(cpuNum) + " cpu cores on this machine")

        with multiprocessing.Pool(
                processes=cpuNum) as pool:  # Create the pool object
            res = pool.starmap(
                worker, jobs
            )  # run jobs in job list; res is a list with return values of the worker function

        # If an error has occurred report it

        failed = res.count(
            False
        )  # count how many times False appears in the list with the return values
        if failed > 0:
            arcpy.AddError("{} workers failed!".format(failed))
            print("{} workers failed!".format(failed))

        arcpy.AddMessage("Finished multiprocessing!")

    except arcpy.ExecuteError:
        # Geoprocessor threw an error
        arcpy.AddError(arcpy.GetMessages(2))
        print("Execute Error:", arcpy.ExecuteError)
    except Exception as e:
        # Capture all other errors
        arcpy.AddError(str(e))
        print("Exception:", e)

    # Get list of shapefiles in the output folder
    list_layers = glob.glob(outFolder + "\\" + "*.shp")

    # Call the function to add clipped shapefiles to open project
    add_layers(outFolder, list_layers)

    # Print out total processing time
    arcpy.AddMessage("--- %s seconds ---" % (time.time() - start_time))
Exemple #15
0
            from PyQt5.QtWidgets import QApplication
            QApplication.addLibraryPath(str(os.path.dirname(os.path.abspath( __file__ ))) + "/qt_plugins/")

elif system().startswith("Windows"):
	ib = (hasattr(sys, "frozen") or # new py2exe
    	hasattr(sys, "importers") # old py2exe
    	or imp.is_frozen("__main__")) # tools/freeze
	from PyQt5.QtWidgets import QApplication

	if ib:
    		QApplication.addLibraryPath(str(os.path.dirname(os.path.abspath( __file__ ))) + "\\qt_plugins\\")
	else:
    		QApplication.addLibraryPath("c:\\Python34\\Lib\\site-packages\\PyQt5\\plugins")


import numpy
from multiprocessing import freeze_support

if system() == "Windows" and (hasattr(sys, "frozen") # new py2exe
                            or hasattr(sys, "importers") # old py2exe
                            or imp.is_frozen("__main__")): # tools/freeze
    from multiprocessing import set_executable
    executable = os.path.join(os.path.dirname(sys.executable), 'artisan.exe')
    set_executable(executable)    
    del executable

if __name__ == '__main__':
    freeze_support()
    with numpy.errstate(invalid='ignore'):
        main.app.exec_()
Exemple #16
0
def integrate(ftable,
              integratorid="std",
              msLevel=None,
              showProgress=True,
              n_cpus=-1,
              min_size_for_parallel_execution=500):
    """ integrates features  in ftable.
        returns processed table. ``ftable`` is not changed inplace.

        The peak integrator corresponding to the integratorId is
        defined in ``algorithm_configs.py`` or ``local_configs.py``

        n_cpus <= 0 has special meaning:
            n_cpus = 0 means "use all cpu cores"
            n_cpus = -1 means "use all but one cpu cores", etc

    """
    from ..core.data_types.table import Table, PeakMap

    assert isinstance(ftable, Table)

    neededColumns = ["mzmin", "mzmax", "rtmin", "rtmax", "peakmap"]
    supportedPostfixes = ftable.supportedPostfixes(neededColumns)
    if not supportedPostfixes:
        raise Exception("is no feature table")

    import sys
    import multiprocessing
    if sys.platform == "win32":
        # if subprocesses use python.exe a console window pops up for each
        # subprocess. this is quite ugly..
        import os.path
        multiprocessing.set_executable(
            os.path.join(os.path.dirname(sys.executable), "pythonw.exe"))
    import time

    started = time.time()

    messages = []
    if multiprocessing.current_process().daemon and n_cpus != 1:
        messages.append(
            "WARNING: you choose n_cpus = %d but integrate already runs inside a "
            "daemon process which is not allowed. therefore set n_cpus = 1" %
            n_cpus)
        n_cpus = 1

    if n_cpus < 0:
        n_cpus = multiprocessing.cpu_count() + n_cpus

    if n_cpus <= 0:
        messages.append("WARNING: you requested to use %d cores, "
                        "we use single core instead !" % n_cpus)
        n_cpus = 1

    if n_cpus > 1 and len(ftable) < min_size_for_parallel_execution:
        messages.append(
            "INFO: as the table has les thann %d rows, we switch to one cpu mode"
            % min_size_for_parallel_execution)
        n_cpus = 1

    elif n_cpus > multiprocessing.cpu_count():
        messages.append(
            "WARNING: more processes demanded than available cpu cores, this might be "
            "inefficient")

    if showProgress:
        print
        if messages:
            print "\n".join(messages)
        print "integrate table using", n_cpus, "processes"
        print

    if n_cpus == 1:
        result = _integrate((
            ftable,
            supportedPostfixes,
            integratorid,
            msLevel,
            showProgress,
        ))
    else:
        pool = multiprocessing.Pool(n_cpus)
        args = []
        all_pms = []
        for i in range(n_cpus):
            subt = ftable[i::n_cpus]
            show_progress = (i == 0
                             )  # only first process prints progress status
            args.append((subt, supportedPostfixes, integratorid, msLevel,
                         show_progress))
            all_pms.append(subt.peakmap.values)

        # map_async() avoids bug of map() when trying to stop jobs using ^C
        tables = pool.map_async(_integrate, args).get()

        # as peakmaps are serialized/unserialized for paralell execution, lots of duplicate
        # peakmaps come back after. we reset those columns to their state before spreading
        # them:
        for t, pms in zip(tables, all_pms):
            t.replaceColumn("peakmap",
                            pms,
                            type_=ftable.getColType("peakmap"),
                            format_=ftable.getColFormat("peakmap"))

        # at least needed on win, else worker processes accumulate:
        pool.close()

        tables = [t for t in tables if len(t) > 0]
        result = Table.stackTables(tables)

    if showProgress:
        needed = time.time() - started
        minutes = int(needed) / 60
        seconds = needed - minutes * 60
        print
        if minutes:
            print "needed %d minutes and %.1f seconds" % (minutes, seconds)
        else:
            print "needed %.1f seconds" % seconds
    return result
Exemple #17
0
    def action(self, action, **kwargs):
        """
        Call the action method for each resource of the ResourceSet.
        Handle parallel or serialized execution plans.
        """
        tags = kwargs.get("tags", set())
        xtags = kwargs.get("xtags", set())
        xtypes = kwargs.get("xtypes")
        types = kwargs.get("types")

        if self.parallel:
            # verify we can actually do parallel processing, fallback to serialized
            try:
                from multiprocessing import Process
                if Env.sysname == "Windows":
                    from multiprocessing import set_executable
                    set_executable(os.path.join(sys.exec_prefix,
                                                'pythonw.exe'))
            except:
                self.parallel = False

        resources = self.action_resources(action, tags, xtags, xtypes, types)
        barrier = None

        if not self.svc.options.dry_run and \
           self.parallel and len(resources) > 1 and \
           action not in ["presync", "postsync"]:
            procs = {}
            for resource in resources:
                if not resource.can_rollback and action == "rollback":
                    continue
                if resource.skip or resource.is_disabled():
                    continue
                proc = Process(target=self.action_job,
                               args=(
                                   resource,
                                   action,
                               ))
                proc.start()
                resource.log.info("action %s started in child process %d" %
                                  (action, proc.pid))
                procs[resource.rid] = proc
                if self.svc.options.upto and resource.rid == self.svc.options.upto:
                    barrier = "reached 'up to %s' barrier" % resource.rid
                    break
                if self.svc.options.downto and resource.rid == self.svc.options.downto:
                    barrier = "reached 'down to %s' barrier" % resource.rid
                    break
            for proc in procs.values():
                proc.join()
            err = []
            for resource in resources:
                if resource.rid not in procs:
                    continue
                proc = procs[resource.rid]
                if proc.exitcode == 1 and not resource.optional:
                    err.append(resource.rid)
                elif proc.exitcode == 2:
                    # can_rollback resource property is lost with the thread
                    # the action_job tells us what to do with it through its exitcode
                    resource.can_rollback = True
            if len(err) > 0:
                raise ex.Error("%s non-optional resources jobs returned "
                               "with error" % ",".join(err))
        else:
            if self.svc.options.dry_run and \
               self.parallel and len(resources) > 1 and \
               action not in ["presync", "postsync"]:
                self.log.info("entering parallel subset")
            for resource in resources:
                try:
                    resource.action(action)
                except ex.AbortAction as exc:
                    msg = str(exc)
                    if msg != "":
                        resource.log.warning(msg)
                    resource.log.warning("abort action on resource set")
                    break
                except ex.ContinueAction as exc:
                    msg = str(exc)
                    if msg != "":
                        resource.log.info(msg)
                    resource.log.info("continue action on resource set")
                except ex.Error as exc:
                    msg = str(exc)
                    if msg != "":
                        resource.log.error(msg)
                        # prevent re-logging
                        exc.value = ""
                    raise exc
                if self.svc.options.upto and resource.rid == self.svc.options.upto:
                    barrier = "reached 'up to %s' barrier" % resource.rid
                    break
                if self.svc.options.downto and resource.rid == self.svc.options.downto:
                    barrier = "reached 'down to %s' barrier" % resource.rid
                    break

        if barrier:
            raise ex.EndAction(barrier)
Exemple #18
0
                import site
                QApplication.addLibraryPath(
                    os.path.dirname(site.getsitepackages()[0]) +
                    "/PyQt4/qt_plugins")
        except:
            pass

from artisanlib import main
import numpy
from multiprocessing import freeze_support

if system() == "Windows" and (hasattr(sys, "frozen")  # new py2exe
                              or hasattr(sys, "importers")  # old py2exe
                              or imp.is_frozen("__main__")):  # tools/freeze
    from multiprocessing import set_executable
    executable = os.path.join(os.path.dirname(sys.executable), 'artisan.exe')
    set_executable(executable)
    del executable

if __name__ == '__main__':
    freeze_support()
    if os.environ.get('TRAVIS'):
        # Hack to exit inside Travis CI
        # Ideally we would use pytest-qt.
        import threading
        t = threading.Timer(30, lambda: os._exit(0))
        t.start()
    main.main()

# EOF
import os
os.environ["OMP_NUM_THREADS"] = "1"
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["OPENBLAS_NUM_THREADS"] = "1"
import sys
import multiprocessing

from libertem import api
import numpy as np

# Since the interpreter is embedded, we have to set the Python executable.
# Otherwise we'd spawn new instances of Digital Micrograph instead of workers.
multiprocessing.set_executable(os.path.join(sys.exec_prefix, 'pythonw.exe'))

if __name__ == "__main__":

    with api.Context() as ctx:

        ds = ctx.load(
            "raw",
            path=
            ("C:/Users/weber/Nextcloud/Projects/Open Pixelated STEM framework/"
             "Data/EMPAD/scan_11_x256_y256.raw"),
            dtype="float32",
            scan_size=(256, 256),
            detector_size_raw=(130, 128),
            crop_detector_to=(128, 128),
        )

        DM.DoEvents()
        sum_analysis = ctx.create_sum_analysis(dataset=ds)
import arcpy
import os
import sys
import json
import re
import orientedimagerytools as oitools
from numpy.linalg import inv
import numpy as np
from datetime import datetime
import multiprocessing
import concurrent.futures
from concurrent.futures import ProcessPoolExecutor
import math
multiprocessing.set_executable(
    os.path.join(os.path.dirname(sys.executable),
                 r"Python\envs\arcgispro-py3\pythonw.exe"))


def __init__(self, nametype):
    self._name = nametype


def main(oicPath, oicParas, inputParams, defValues, log=None):
    try:
        outPathSRS = arcpy.Describe(oicPath).spatialReference
        rasterPath = oicParas['RasterPath']
        cameraFile = oicParas['Drone2Map Camera File']
        imageDetailsFile = oicParas['Drone2Map Image List']
        imageryType = oicParas['Imagery Type']
        inputwkid = oicParas['Input Images SRS (WKID)']
def main(input_network, travel_mode, cutoff, time_of_day,
         selected_impedance_function, origins_i_input, i_id_field,
         search_tolerance_i, search_criteria_i, search_query_i,
         destinations_j_input, j_id_field, o_j_field, search_tolerance_j,
         search_criteria_j, search_query_j, batch_size_factor, output_dir,
         output_gdb, del_i_eq_j, join_back_i):

    # --- check opportunities_j field type compatibility ---
    o_j_field_type = field_type_x(destinations_j_input, o_j_field)
    if o_j_field_type == "TEXT":
        raise Exception(str(o_j_field) + " field type is text")

    # --- setup workspace ---
    arcpy.env.workspace = workspace_setup(output_dir, output_gdb)
    arcpy.env.scratchWorkspace = scratchWorkspace_setup(output_dir, output_gdb)

    # --- setup batching ---
    batch_size = batch_size_f(origins_i_input, batch_size_factor)

    # --- pre-process origins ---
    origins_i = preprocess_x(input_fc=origins_i_input,
                             input_type="origins_i",
                             id_field=i_id_field,
                             o_j_field=None,
                             input_network=input_network,
                             search_tolerance=search_tolerance_i,
                             search_criteria=search_criteria_i,
                             search_query=search_query_i,
                             travel_mode=travel_mode,
                             batch_size=batch_size)
    #print(origins_i)
    origins_i_dict = create_dict(origins_i,
                                 key_field="i_id_text",
                                 value_field="i_id")

    # ----- destinations -----
    destinations_j = preprocess_x(input_fc=destinations_j_input,
                                  input_type="destinations_j",
                                  id_field=j_id_field,
                                  o_j_field=o_j_field,
                                  input_network=input_network,
                                  search_tolerance=search_tolerance_j,
                                  search_criteria=search_criteria_j,
                                  search_query=search_query_j,
                                  travel_mode=travel_mode,
                                  batch_size=None)
    #print(destinations_j)
    o_j_dict = create_dict(destinations_j,
                           key_field="j_id_text",
                           value_field="o_j")

    # worker iterator
    batch_list = list_unique(os.path.join(arcpy.env.workspace + "/origins_i"),
                             "batch_id")

    jobs = []
    # adds tuples of the parameters that need to be given to the worker function to the jobs list
    for batch_id in batch_list:
        jobs.append(
            (batch_id, arcpy.env.scratchWorkspace, origins_i, destinations_j,
             input_network, travel_mode, cutoff, time_of_day,
             selected_impedance_function, o_j_dict, del_i_eq_j))

    # multiprocessing
    multiprocessing.set_executable(os.path.join(sys.exec_prefix,
                                                'pythonw.exe'))
    arcpy.AddMessage("Sending batch to multiprocessing pool...")
    pool = multiprocessing.Pool(
        processes=cpu_count(multiprocessing.cpu_count()))
    result = pool.map(access_multi, jobs)
    pool.close()
    pool.join()
    arcpy.AddMessage("Multiprocessing complete, merging results...")
    access_output = arcpy.management.Merge(
        result, arcpy.env.workspace + "/output_" + output_gdb)

    # add back original i_id
    turbo_joiner(target_fc=access_output,
                 target_id_field='OriginName',
                 join_fc=origins_i,
                 join_id_field='i_id_text',
                 join_value_field='i_id')

    if join_back_i == "true":
        # join accessibility output back to origins input
        join_fields = [
            "SUM_Ai_" + f_field for f_field in selected_impedance_function
        ]
        join_fields.insert(0, "FREQUENCY")
        arcpy.AddMessage("Joining accessibility output to origins_i...")
        arcpy.management.JoinField(origins_i_input, i_id_field, access_output,
                                   "i_id", join_fields)

    # ----- clean up: this deletes the workers directory. comment-out if you want to keep -----
    arcpy.management.Delete(arcpy.env.scratchWorkspace)
Exemple #22
0
def _main():
    import multiprocessing as _mp
    import threading
    import time

    import bpy
    _mp.set_executable(bpy.app.binary_path_python)

    #import logging
    #logger = _mp.log_to_stderr()
    #logger.setLevel(logging.INFO)

    global _engine_, _data_, _width_, _height_, _mmap_size_, _mmap_

    _mmap_name = "blender/barnold/ipr/pid-%d" % id(_engine_)
    _mmap_ = mmap.mmap(-1, 64 * 1024 * 1024, _mmap_name)  # 64Mb

    state = _mp.Value('i', 0)
    redraw_event = _mp.Event()

    def tag_redraw():
        while redraw_event.wait() and state.value != ABORT:
            redraw_event.clear()
            e = _engine_()
            if e is not None:
                e.tag_redraw()
            del e

    def _mmap_size(opts):
        global _mmap_
        m = max(_width_, _height_)
        if m > 300:
            c = 900 / (m + 600)
            w = int(_width_ * c)
            h = int(_height_ * c)
        else:
            w = _width_
            h = _height_
        _mmap_ = mmap.mmap(-1, w * h * 4 * 4, _mmap_name)
        opts['xres'] = ('INT', w)
        opts['yres'] = ('INT', h)
        return w, h

    _mmap_size_ = _mmap_size(_data_['options'])
    pout, pin = _mp.Pipe(False)

    def update(width, height, data):
        global _width_, _height_, _mmap_size_
        if _width_ != width or _height_ != height:
            _width_ = width
            _height_ = height
            _mmap_size_ = _mmap_size(data.setdefault('options', {}))
            data['mmap_size'] = _mmap_size_
        if data:
            #print(">>> update [%f]" % time.clock())
            pin.send(data)
        return _mmap_size_, numpy.frombuffer(_mmap_, dtype=numpy.float32)

    redraw_thread = threading.Thread(target=tag_redraw)
    process = _mp.Process(target=_worker, args=(
        _data_, pout, redraw_event, _mmap_size_, _mmap_name, state
    ))

    def stop():
        print(">>> stop [%f]: ABORT" % time.clock())
        state.value = ABORT
        print(">>> stop [%f]: close data" % time.clock())
        pin.send(None)
        pin.close()
        print(">>> stop [%f]: set event" % time.clock())
        redraw_event.set()
        print(">>> stop [%f]: join" % time.clock(), redraw_thread)
        redraw_thread.join()
        print(">>> stop [%f]:" % time.clock(), redraw_thread)
        print(">>> stop [%f]: join" % time.clock(), process)
        process.join(5)
        if process.is_alive():
            print(">>> stop [%f]: terminate" % time.clock(), process)
            process.terminate()
        print(">>> stop [%f]:" % time.clock(), process)

    redraw_thread.start()
    process.start()

    return update, stop
import os, sys, math, time, multiprocessing, json
from functools import partial
import numpy as np
from qgis.core import QgsVectorLayer
from qgis.PyQt.QtCore import QVariant
import processing
from qgis.core import *
from PyQt5.QtGui import QFont
from osgeo import ogr

t1 = time.time()

#Set recurrsion depth limit to avoid error at a later stage
sys.setrecursionlimit(10000)
path = os.path.abspath(os.path.join(sys.exec_prefix, '../../bin/pythonw.exe'))
multiprocessing.set_executable(path)
sys.argv = [ None ]

def getPythonPath():
    path = os.__file__.split('\\')
    path.pop()
    path.pop()
    path = '\\'.join(path)
    return(path)

"""
The imported shapefile lines comes as tuple, whereas
the export requires list, this finction converts tuple
inside lines to list
"""
def tupleToList(line):
import multiprocessing as mp
import sys, os

from multiprocessing import Process

print("I'm in main module")
# OSGeo4W does not bundle python in exec_prefix for python
path = os.path.abspath(os.path.join(sys.exec_prefix, '../../bin/pythonw.exe'))
mp.set_executable(path)
print("Setting executable path to {:s}".format(path))
sys.argv = [ None ]               # '../tst.py' __file__
mgr = mp.Manager()
print("I'm past Manager()")