コード例 #1
0
def interlock(MainFrame, scope):
    from PYME import config
    from PYME.Acquire.Utils.failsafe import FailsafeClient

    address = config.get('interlockserver-address', '127.0.0.1')
    port = config.get('interlockserver-port', 9119)
    scope.interlock = FailsafeClient(address, port)
コード例 #2
0
def failsafe(MainFrame, scope):
    from PYME import config
    from PYME.Acquire.Utils.failsafe import FailsafeServer
    import yaml

    email_info = config.get('email-info-path')
    with open(email_info, 'r') as f:
        email_info = yaml.safe_load(f)

    address = config.get('failsafeserver-address', '127.0.0.1')
    port = config.get('failsafeserver-port', 9119)
    scope.failsafe = FailsafeServer(scope, email_info, port, address)
コード例 #3
0
    def get_dirname(self, subdirectory=None):
        """ Get the current directory name, including any subdirectories from
        chunking or additional spec.

        Parameters
        ----------
        subdirectory : str, optional
            Directory within current set directory to spool this series. The
            directory will be created if it doesn't already exist.

        Returns
        -------
        str
            spool directory name
        """
        dir = self._dirname if self.spoolType != 'Cluster' else self._cluster_dirname

        if subdirectory != None:
            dir = dir + self._sep + subdirectory.replace(os.sep, self._sep)
        
        if config.get('acquire-spool_subdirectories', False):
            # limit single directory size for (cluster) IO performance
            subdir = '%03d' % int(self.seriesCounter/100)
            dir = dir + self._sep + subdir

        return dir
コード例 #4
0
def genClusterResultFileName(dataFileName, create=True):
    """Generates a filename for saving fit results based on the original image
    filename"""
    from PYME import config
    import posixpath
    fn, ext = os.path.splitext(dataFileName) #remove extension
    
    if fn.upper().startswith('PYME-CLUSTER://'):
        # std case - we are analysing a file that is already on the cluster

        clusterfilter = fn.split('://')[1].split('/')[0]
        rel_name = fn.split('://%s/' % clusterfilter)[1]
    else:
        # special case for cluster of one uses where we didn't open file using a cluster URI
        if not os.path.isabs(fn):
            # add the PYMEData dir path on if we weren't given an absolute path
            fn = getFullFilename(fn)
        
        try:
            # relpath will raise ValueError on windows if we aren't on the same drive
            rel_name = os.path.relpath(fn, config.get('dataserver-root'))
            if rel_name.startswith('..'):
                raise ValueError  # we are not under PYMEData
        except ValueError:
            # recreate the tree under PYMEData, dropping the drive letter or UNC
            rel_name = fn
            
        rel_name = rel_path_as_posix(rel_name)

    dir_name = posixpath.dirname(rel_name)
    file_name = posixpath.basename(rel_name)

    return posixpath.join(dir_name, 'analysis', file_name + '.h5r')
コード例 #5
0
ファイル: views.py プロジェクト: b3nroll1ns/python-microscopy
def run_template(request):
    from PYME import config
    from PYME.IO import unifiedIO
    from PYME.recipes.modules import ModuleCollection

    if config.get('PYMERuleserver-use', True):
        from PYME.cluster.HTTPRulePusher import RecipePusher
    else:
        from PYME.cluster.HTTPTaskPusher import RecipePusher

    recipeURI = 'pyme-cluster://%s/%s' % (
        server_filter, request.POST.get('recipeURL').lstrip('/'))
    output_directory = 'pyme-cluster://%s/%s' % (
        server_filter, request.POST.get('recipeOutputPath').lstrip('/'))

    recipe_text = unifiedIO.read(recipeURI)
    recipe = ModuleCollection.fromYAML(recipe_text)

    for file_input in recipe.file_inputs:
        input_url = 'pyme-cluster://%s/%s' % (
            server_filter, request.POST.get('%sURL' % file_input).lstrip('/'))
        recipe_text = recipe_text.replace('{' + file_input + '}', input_url)

    pusher = RecipePusher(recipe=recipe_text, output_dir=output_directory)

    fileNames = request.POST.getlist('files', [])
    pusher.fileTasksForInputs(input=fileNames)

    return HttpResponseRedirect('/status/queues/')
コード例 #6
0
 def _GenSeriesName(self):
     if config.get('acquire-spool_subdirectories', False):
         # High-throughput performance optimization
         # If true, add a layer of directories to limit the number of series saved in a single directory
         return '%03d%s%s_%05d' % (int(self.seriesCounter/100), self._sep, self.seriesStub, self.seriesCounter)
     else:
         return self.seriesStub + '_' + numToAlpha(self.seriesCounter)
コード例 #7
0
    def run(self):
        """"""

        if self._profile:
            from PYME.util import mProfile

            mProfile.profileOn([
                'ruleserver.py',
            ])
            profileOutDir = config.get(
                'dataserver-root',
                os.curdir) + '/LOGS/%s/mProf' % computerName.GetComputerName()

        if self.bind_addr == '':
            import socket
            self.externalAddr = socket.gethostbyname(socket.gethostname())
        else:
            self.externalAddr = self.bind_addr

        self.distributor = WFRuleServer(self.port, bind_addr=self.bind_addr)

        logger.info('Starting ruleserver on %s:%d' %
                    (self.externalAddr, self.port))
        try:
            self.distributor.serve_forever()
        finally:
            self.distributor._do_poll = False
            #logger.info('Shutting down ...')
            #self.distributor.shutdown()
            logger.info('Closing server ...')
            self.distributor.server_close()

            if self._profile:
                mProfile.report(False, profiledir=profileOutDir)
コード例 #8
0
def action_manager(MainFrame, scope):
    from PYME import config
    from PYME.Acquire.ui import actionUI
    from PYME.Acquire.ActionManager import ActionManagerServer

    ap = actionUI.ActionPanel(MainFrame, scope.actions, scope)
    MainFrame.AddPage(ap, caption='Queued Actions')

    ActionManagerServer(scope.actions, 9393,
                        config.get('actionmanagerserver-address', '127.0.0.1'))
コード例 #9
0
ファイル: shell.py プロジェクト: b3nroll1ns/python-microscopy
def Plug(dsviewer):
    sh = wx.py.shell.Shell(id=-1,
                           parent=dsviewer, pos=wx.Point(0, 0), size=wx.Size(618, 451), style=0, locals=dsviewer.__dict__,
                           startupScript=config.get('dh5View-console-startup-file', None),
              introText='note that help, license etc below is for Python, not PYME\n\n')

    sh.Execute('from pylab import *')
    sh.Execute('from PYME.DSView import View3D, ViewIm3D')

    dsviewer.AddPage(page=sh, select=False, caption='Console')

    dsviewer.sh = sh
コード例 #10
0
ファイル: views.py プロジェクト: b3nroll1ns/python-microscopy
def run(request):
    from PYME import config
    if config.get('PYMERuleserver-use', True):
        from PYME.cluster.HTTPRulePusher import RecipePusher
    else:
        from PYME.cluster.HTTPTaskPusher import RecipePusher
    recipeURI = ('pyme-cluster://%s/' %
                 server_filter) + request.POST.get('recipeURL').lstrip('/')

    pusher = RecipePusher(recipeURI=recipeURI)

    fileNames = request.POST.getlist('files', [])
    pusher.fileTasksForInputs(input=fileNames)

    return HttpResponseRedirect('/status/queues/')
コード例 #11
0
def check_for_updates(gui=True, force=False):
    global update_available, update_ver
    import requests
    import packaging.version

    with shelve.open(update_info_fn) as s:
        next_update_time = s.get(
            'last_update_check',
            datetime.datetime.fromtimestamp(0)) + datetime.timedelta(days=1)
        t = datetime.datetime.now()

        if not (force or (config.get('check_for_updates', True)
                          and t > next_update_time)):
            # respect config setting and bail
            # called with force=True when called from the menu (i.e. explicitly rather than automatically)
            return

        s['last_update_check'] = t

    logger.info('Checking for updates ...')
    try:
        version_info = requests.get(
            'http://www.python-microscopy.org/current_version.json').json()

        update_ver = version_info['version']

        if packaging.version.parse(update_ver) > packaging.version.parse(
                version.version):
            update_msg = 'A new version of PYME is available\nYou have version %s, the current version is %s' % (
                version.version, update_ver)
            logger.info(update_msg)

            install_type = guess_install_type()
            logger.info(update_messages[install_type])

            update_available = True

            if gui:
                gui_prompt_update()

        else:
            update_available = False

    except:
        logger.exception('Error getting info on updates')
コード例 #12
0
def main():
    global LOG_STREAMS
    confFile = os.path.join(conf.user_config_dir, 'distributor.yaml')
    with open(confFile) as f:
        config = yaml.load(f)

    serverAddr, serverPort = config['distributor']['http_endpoint'].split(':')
    externalAddr = socket.gethostbyname(socket.gethostname())
    
    #set up logging
    data_root = conf.get('dataserver-root')
    if data_root:
        distr_log_dir = '%s/LOGS' % data_root

        dist_log_err_file = os.path.join(distr_log_dir, 'distributor.log')
        if os.path.exists(dist_log_err_file):
            os.remove(dist_log_err_file)

        dist_err_handler = logging.handlers.RotatingFileHandler(filename=dist_log_err_file, mode='w', maxBytes=1e6, backupCount=1)
        #dist_err_handler.setFormatter(logging.Formatter('%(message)s'))
        distLogErr = logging.getLogger('distributor')
        distLogErr.setLevel(logging.DEBUG)
        distLogErr.addHandler(dist_err_handler)
    
    
    proc = ruleserver.ServerThread(serverPort, profile=False)
    proc.start()
    #proc = subprocess.Popen('python -m PYME.ParallelTasks.distributor 1234', shell=True)

    ns = pyme_zeroconf.getNS('_pyme-taskdist')
    ns.register_service('PYMERuleServer: ' + GetComputerName(), externalAddr, int(serverPort))

    try:
        while proc.is_alive():
            time.sleep(1)

    finally:
        logger.debug('trying to shut down server')
        proc.shutdown()
        ns.unregister('PYMERuleServer: ' + GetComputerName())
コード例 #13
0
    def __init__(self,
                 ruleID,
                 task_template,
                 inputs_by_task=None,
                 max_task_ID=100000,
                 task_timeout=600,
                 rule_timeout=3600):
        self.ruleID = ruleID

        if not inputs_by_task is None:
            self._inputs_by_task = {
                int(k): v
                for k, v in inputs_by_task.items()
            }
        else:
            self._inputs_by_task = None

        self._template = task_template
        self._task_info = np.zeros(max_task_ID, self.TASK_INFO_DTYPE)

        self._n_retries = config.get('ruleserver-retries', 3)
        self._timeout = task_timeout

        self._rule_timeout = rule_timeout
        self._cached_advert = None

        self.nTotal = 0
        self.nAssigned = 0
        self.nAvailable = 0
        self.nCompleted = 0
        self.nFailed = 0

        self.avCost = 0

        self.expiry = time.time() + self._rule_timeout

        self._info_lock = threading.Lock()
        self._advert_lock = threading.Lock()
コード例 #14
0
    def finalise(self):
        # wait until our input queue is empty rather than immediately stopping saving.
        self._stopping=True
        logger.debug('Stopping spooling %s' % self.seriesName)
        
        
        #join our polling threads
        if config.get('httpspooler-jointhreads', True):
            # Allow this to be switched off in a config option for maximum performance on High Throughput system.
            # Joining threads is the recommended and safest behaviour, but forces spooling of current series to complete
            # before next series starts, so could have negative performance implications.
            # The alternative - letting spooling continue during the acquisition of the next series - has the potential
            # to result in runaway memory and thread usage when things go pear shaped (i.e. spooling is not fast enough)
            # TODO - is there actually a performance impact that justifies this config option, or is it purely theoretical
            for pt in self._pollThreads:
                pt.join()

        # remove our reference to the threads which hold back-references preventing garbage collection
        del(self._pollThreads)
        
        # save events and final metadata
        # TODO - use a binary format for saving events - they can be quite
        # numerous, and can trip the standard 1 s clusterIO.put_file timeout.
        # Use long timeouts as a temporary hack because failing these can ruin
        # a dataset
        if self._aggregate_h5:
            clusterIO.put_file('__aggregate_h5/' + self.seriesName + '/final_metadata.json', 
                               self.md.to_JSON().encode(), self.clusterFilter)
            clusterIO.put_file('__aggregate_h5/' + self.seriesName + '/events.json', 
                               self.evtLogger.to_JSON().encode(),
                               self.clusterFilter, timeout=10)
        else:
            clusterIO.put_file(self.seriesName + '/final_metadata.json', 
                               self.md.to_JSON().encode(), self.clusterFilter)
            clusterIO.put_file(self.seriesName + '/events.json', 
                               self.evtLogger.to_JSON().encode(), 
                               self.clusterFilter, timeout=10)
コード例 #15
0
ファイル: views.py プロジェクト: barentine/python-microscopy
def run(request):
    from PYME import config
    if config.get('PYMERuleserver-use', True):
        from PYME.cluster.HTTPRulePusher import RecipePusher
    else:
        from PYME.cluster.HTTPTaskPusher import RecipePusher

    recipe_url = request.POST.get('recipeURL')
    output_directory = 'pyme-cluster://%s/%s' % (
        server_filter, request.POST.get('recipeOutputPath').lstrip('/'))

    if recipe_url is not None:
        recipeURI = ('pyme-cluster://%s/' %
                     server_filter) + recipe_url.lstrip('/')

        pusher = RecipePusher(recipeURI=recipeURI, output_dir=output_directory)
    else:
        recipe_text = request.POST.get('recipe_text')
        pusher = RecipePusher(recipe=recipe_text, output_dir=output_directory)

    fileNames = request.POST.getlist('files', [])
    pusher.fileTasksForInputs(input=fileNames)

    return HttpResponseRedirect('/status/queues/')
コード例 #16
0
 def num_tasks_to_request(self):
     return config.get('nodeserver-chunksize',
                       50) * multiprocessing.cpu_count()
コード例 #17
0
def main():
    global LOG_STREAMS
    
    op = ArgumentParser(description="PYME rule server for task distribution. This should run once per cluster.")

    #NOTE - currently squatting on port 15346 for testing - TODO can we use an ephemeral port
    op.add_argument('-p', '--port', dest='port', default=config.get('ruleserver-port', 15346), type=int,
                  help="port number to serve on (default: 15346, see also 'ruleserver-port' config entry)")
    
    op.add_argument('-a','--advertisements', dest='advertisements', choices=['zeroconf', 'local'], default='zeroconf',
                  help='Optionally restrict advertisements to local machine')
    
    args = op.parse_args()

    serverPort = args.port
    
    if args.advertisements == 'local':
        #bind on localhost
        bind_addr = '127.0.0.1'
    else:
        bind_addr = '' #bind all interfaces
    
    #set up logging
    data_root = config.get('dataserver-root')
    if data_root:
        distr_log_dir = '%s/LOGS' % data_root
        try:  # make sure the directory exists
            os.makedirs(distr_log_dir)  # exist_ok flag not present on py2
        except OSError as e:
            import errno
            if e.errno != errno.EEXIST:
                raise e

        dist_log_err_file = os.path.join(distr_log_dir, 'distributor.log')
        if os.path.exists(dist_log_err_file):
            os.remove(dist_log_err_file)

        dist_err_handler = logging.handlers.RotatingFileHandler(filename=dist_log_err_file, mode='w', maxBytes=1e6, backupCount=1)
        #dist_err_handler.setFormatter(logging.Formatter('%(message)s'))
        distLogErr = logging.getLogger('distributor')
        distLogErr.setLevel(logging.DEBUG)
        distLogErr.addHandler(dist_err_handler)
    
    
    proc = ruleserver.ServerThread(serverPort, bind_addr=bind_addr, profile=False)
    proc.start()
    #proc = subprocess.Popen('python -m PYME.ParallelTasks.distributor 1234', shell=True)

    if args.advertisements == 'zeroconf':
        ns = pyme_zeroconf.getNS('_pyme-taskdist')
    else:
        #assume 'local'
        ns = sqlite_ns.getNS('_pyme-taskdist')

    time.sleep(0.5)
    #get the actual adress (port) we bound to
    sa = proc.distributor.socket.getsockname()
    service_name = get_service_name('PYMERuleServer')
    ns.register_service(service_name, proc.externalAddr, int(sa[1]))

    try:
        while proc.is_alive():
            time.sleep(1)

    finally:
        logger.debug('trying to shut down server')
        proc.shutdown()
        ns.unregister(service_name)
コード例 #18
0
def main(protocol="HTTP/1.0"):
    global GPU_STATS
    """Test the HTTP request handler class.

    This runs an HTTP server on port 8000 (or the first command line
    argument).

    """
    from optparse import OptionParser

    op = OptionParser(usage='usage: %s [options]' % sys.argv[0])

    op.add_option(
        '-p',
        '--port',
        dest='port',
        default=config.get('dataserver-port', 8080),
        help=
        "port number to serve on (default: 8080, see also 'dataserver-port' config entry)"
    )
    op.add_option('-t',
                  '--test',
                  dest='test',
                  help="Set up for bandwidth test (don't save files)",
                  action="store_true",
                  default=False)
    op.add_option('-v',
                  '--protocol',
                  dest='protocol',
                  help="HTTP protocol version",
                  default="1.1")
    op.add_option('-l',
                  '--log-requests',
                  dest='log_requests',
                  help="Display http request info",
                  default=False,
                  action="store_true")
    default_root = config.get('dataserver-root', os.curdir)
    op.add_option(
        '-r',
        '--root',
        dest='root',
        help=
        "Root directory of virtual filesystem (default %s, see also 'dataserver-root' config entry)"
        % dataserver_root,
        default=default_root)
    op.add_option('-k',
                  '--profile',
                  dest='profile',
                  help="Enable profiling",
                  default=False,
                  action="store_true")
    default_server_filter = config.get('dataserver-filter', compName)
    op.add_option(
        '-f',
        '--server-filter',
        dest='server_filter',
        help='Add a serverfilter for distinguishing between different clusters',
        default=default_server_filter)
    op.add_option(
        '--timeout-test',
        dest='timeout_test',
        help=
        'deliberately make requests timeout for testing error handling in calling modules',
        default=0)

    options, args = op.parse_args()
    if options.profile:
        from PYME.util import mProfile
        mProfile.profileOn(['HTTPDataServer.py', 'clusterListing.py'])

        profileOutDir = options.root + '/LOGS/%s/mProf' % compName

    logger.info(
        '========================================\nPYMEDataServer, running on python %s\n'
        % sys.version)

    #change to the dataserver root if given'
    logger.info('Serving from directory: %s' % options.root)
    os.chdir(options.root)

    server_address = ('', int(options.port))

    PYMEHTTPRequestHandler.protocol_version = 'HTTP/%s' % options.protocol
    PYMEHTTPRequestHandler.bandwidthTesting = options.test
    PYMEHTTPRequestHandler.timeoutTesting = options.timeout_test
    PYMEHTTPRequestHandler.logrequests = options.log_requests

    httpd = ThreadedHTTPServer(server_address, PYMEHTTPRequestHandler)
    #httpd = http.server.HTTPServer(server_address, PYMEHTTPRequestHandler)
    httpd.daemon_threads = True

    sa = httpd.socket.getsockname()

    try:
        ip_addr = socket.gethostbyname(socket.gethostname())
    except:
        ip_addr = socket.gethostbyname(socket.gethostname() + '.local')

    ns = pzc.getNS('_pyme-http')
    ns.register_service(
        'PYMEDataServer [%s]: ' % options.server_filter + procName, ip_addr,
        sa[1])

    status['IPAddress'] = ip_addr
    status['BindAddress'] = server_address
    status['Port'] = sa[1]
    status['Protocol'] = options.protocol
    status['TestMode'] = options.test
    status['ComputerName'] = GetComputerName()

    if GPU_STATS:
        try:
            pynvml.nvmlInit()
        except:
            GPU_STATS = False

    sp = statusPoller()
    sp.start()

    logger.info("Serving HTTP on %s port %d ..." % (ip_addr, sa[1]))
    try:
        httpd.serve_forever()
    finally:
        logger.info('Shutting down ...')
        httpd.shutdown()
        httpd.server_close()

        if options.profile:
            mProfile.report(display=False, profiledir=profileOutDir)

        sp.stop()

        if GPU_STATS:
            pynvml.nvmlShutdown()

        sys.exit()
コード例 #19
0
    Returns
    -------

    """
    try:
        os.makedirs(dir)
    except OSError as e:
        if e.errno != errno.EEXIST:
            raise


#make sure we set up our logging before anyone elses does
import logging
import logging.handlers
dataserver_root = config.get('dataserver-root')
if dataserver_root:
    log_dir = '%s/LOGS/%s' % (dataserver_root, compName)
    #if not os.path.exists(log_dir):
    #    os.makedirs(log_dir)
    makedirs_safe(log_dir)

    log_file = '%s/LOGS/%s/PYMEDataServer.log' % (dataserver_root, compName)

    #logging.basicConfig(filename =log_file, level=logging.DEBUG, filemode='w')
    #logger = logging.getLogger('')
    logger = logging.getLogger('')
    logger.setLevel(logging.DEBUG)
    fh = logging.handlers.RotatingFileHandler(filename=log_file,
                                              mode='w',
                                              maxBytes=1e6,
コード例 #20
0
ファイル: views.py プロジェクト: b3nroll1ns/python-microscopy
from PYME import config
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render

from PYME.misc.computerName import GetComputerName
server_filter = config.get('dataserver-filter', GetComputerName())

# Create your views here.


def recipe_form(request):
    """stand in until we have a better recipe GUI"""
    return render(request, 'recipes/form_recipe.html',
                  {'serverfilter': server_filter})


def recipe_standalone(request):
    """This allows file selection with globs like bakeshop"""
    return render(request, 'recipes/recipe_standalone.html',
                  {'serverfilter': server_filter})


def recipe_template(request):
    """This allows file selection with globs like bakeshop"""
    return render(request, 'recipes/recipe_template.html',
                  {'serverfilter': server_filter})


def get_input_glob(request):
    from PYME.IO import clusterIO
コード例 #21
0
local_computer_name = GetComputerName()

import logging

logger = logging.getLogger(__name__)

SERVICE_CACHE_LIFETIME = 1  #seconds
DIR_CACHE_TIME = 1  #seconds

import PYME.misc.pyme_zeroconf as pzc
from PYME.misc import hybrid_ns

_ns = None
_ns_lock = threading.Lock()

if config.get('clusterIO-hybridns', True):

    def get_ns():
        global _ns
        with _ns_lock:
            if _ns is None:
                #stagger query times
                time.sleep(3 * np.random.rand())
                #_ns = pzc.getNS('_pyme-http')
                _ns = hybrid_ns.getNS('_pyme-http')
                #wait for replies
                time.sleep(5)

        return _ns
else:
コード例 #22
0
#logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('nodeserver')
#logger.setLevel(logging.INFO)

import time
import sys

from PYME.misc import computerName
from PYME import config
from PYME.IO import clusterIO

from PYME.util import webframework

import ujson as json

WORKER_GET_TIMEOUT = config.get('nodeserver-worker-get-timeout', 60)

#disable socket timeout to prevent us from generating 408 errors
cherrypy.server.socket_timeout = 0

import requests
import multiprocessing

#TODO - should be defined in one place
STATUS_UNAVAILABLE, STATUS_AVAILABLE, STATUS_ASSIGNED, STATUS_COMPLETE, STATUS_FAILED = range(
    5)


def template_fill(template, **kwargs):
    s = template
コード例 #23
0
    def __init__(self,
                 ruleID,
                 task_template,
                 inputs_by_task=None,
                 max_task_ID=100000,
                 task_timeout=600,
                 rule_timeout=3600,
                 on_completion=None):
        self.ruleID = ruleID

        if not inputs_by_task is None:
            self._inputs_by_task = {
                int(k): v
                for k, v in inputs_by_task.items()
            }
        else:
            self._inputs_by_task = None

        self._template = task_template
        self._task_info = np.zeros(max_task_ID, self.TASK_INFO_DTYPE)

        # Number of times to re-queue a task if it times out is set by the 'ruleserver-retries' config option
        # Setting a value of 0 effectively disables re-trying and makes analysis less robust.
        # Note that a timeout is different to a failure - failing tasks will be marked as having failed and will not be re-tried. Timeouts will
        # occur in one of 4 scenarios:
        # - a worker falls over completely or is disconnected from the network
        # - an unhandled exception - e.g. due to an IO error connecting to the ruleserver in `taskWorkerHTTP.taskWorker._return_task_results()`
        # - analysis is getting massively bogged down and nothing is keeping up
        # - the task timeout is unrealistically short for a given processing task
        #
        # In scenarios 1 & 2 it's reasonable to expect that retrying will result in success. In scenarios 3 & 4 it's a bit muddier, but seeing as a) these kind of
        # failures tend to be a bit stochastic and b) the retries get punted to the back of the queue, when the load might have let up a bit, the odds are
        # reasonably good.
        self._n_retries = config.get('ruleserver-retries', 1)
        self._timeout = task_timeout

        self._rule_timeout = rule_timeout
        self._cached_advert = None
        self._active = True  # making this rule inactive will cause it not to generate adverts (this is the closest we  get to aborting)

        self.nTotal = 0
        self.nAssigned = 0
        self.nAvailable = 0
        self.nCompleted = 0
        self.nFailed = 0
        self.n_returned_after_timeout = 0
        self.n_timed_out = 0

        self._n_max = max_task_ID

        self.on_completion = on_completion

        self.avCost = 0

        self.expiry = time.time() + self._rule_timeout

        # store pending bids
        self._pending_bids = {}
        self._current_bidder_id = 0

        self._info_lock = threading.Lock()
        self._advert_lock = threading.Lock()
コード例 #24
0

if __name__ == '__main__':
    import signal

    port = sys.argv[1]

    if (len(sys.argv) == 3) and (sys.argv[2] == '-k'):
        profile = True
        from PYME.util import mProfile

        mProfile.profileOn([
            'ruleserver.py',
        ])
        profileOutDir = config.get(
            'dataserver-root',
            os.curdir) + '/LOGS/%s/mProf' % computerName.GetComputerName()
    else:
        profile = False
        profileOutDir = None

    if not sys.platform == 'win32':
        #windows doesn't support handling signals ... don't catch and hope for the best.
        #Note: This will make it hard to cleanly shutdown the distributor on Windows, but should be OK for testing and
        #development
        signal.signal(signal.SIGHUP, on_SIGHUP)

    try:
        run(int(port))
    finally:
        if profile:
コード例 #25
0
ファイル: taskServerZC.py プロジェクト: carandraug/PyME
def main():
    print('Starting PYME taskServer ...')
    import socket
    from PYME import config
    ip_addr = socket.gethostbyname(socket.gethostname())

    profile = False
    if len(sys.argv) > 1 and sys.argv[1] == '-p':
        print('profiling')
        profile = True
        from PYME.util.mProfile import mProfile
        mProfile.profileOn(
            ['taskServerZC.py', 'HDFTaskQueue.py', 'TaskQueue.py'])

    if len(sys.argv) > 1 and sys.argv[1] == '-fp':
        print('profiling')
        #profile = True
        from PYME.util.fProfile import fProfile
        tp = fProfile.thread_profiler()
        tp.profileOn('.*taskServerZC.*|.*TaskQueue.*|.*h5rFile.*',
                     'taskServer_prof.txt')

    Pyro.config.PYRO_MOBILE_CODE = 0
    Pyro.core.initServer()
    #ns=Pyro.naming.NameServerLocator().getNS()

    ns = pzc.getNS()
    daemon = Pyro.core.Daemon(host=ip_addr)
    daemon.useNameServer(ns)

    #check to see if we've got the TaskQueues group
    #if not 'TaskQueues' in [n[0] for n in ns.list('')]:
    #    ns.createGroup('TaskQueues')

    #get rid of any previous queue
    #try:
    #    ns.unregister(taskQueueName)
    #except Pyro.errors.NamingError:
    #    pass

    tq = TaskQueueSet(process_queues_in_order=config.get(
        'TaskServer.process_queues_in_order', True))
    uri = daemon.connect(tq, taskQueueName)

    logging.debug('taskserver URI -> %s, %s' % (uri, type(uri)))

    tw = TaskWatcher(tq)
    tw.start()

    try:
        daemon.requestLoop(tq.isAlive)

    except (KeyboardInterrupt, SystemExit):
        logging.debug(
            'Got a keyboard interrupt, attempting to shut down cleanly')
        #raise
    finally:
        daemon.shutdown(True)
        tw.alive = False
        #ns.unregister(taskQueueName)
        logging.info('Task server is shut down')

        if profile:
            mProfile.report()
コード例 #26
0
class H5RFile(object):
    KEEP_ALIVE_TIMEOUT = 20  #keep the file open for 20s after the last time it was used
    FLUSH_INTERVAL = config.get('h5r-flush_interval', 1)

    def __init__(self, filename, mode='r'):
        self.filename = filename
        self.mode = mode

        logging.debug('pytables open call: %s' % filename)
        with tablesLock:
            self._h5file = tables.open_file(filename, mode)
        logging.debug('pytables file open: %s' % filename)

        #metadata and events are created on demand
        self._mdh = None
        self._events = None

        # lock for adding things to our queues. This is local to the file and synchronises between the calling thread
        # and our local thread
        self.appendQueueLock = threading.Lock()
        self.appendQueues = {}
        #self.appendVLQueues = {}

        self.keepAliveTimeout = time.time() + self.KEEP_ALIVE_TIMEOUT
        self.useCount = 0
        self.is_alive = True

        #logging.debug('H5RFile - starting poll thread')
        self._lastFlushTime = 0
        self._pollThread = threading.Thread(target=self._pollQueues)
        self._pollThread.daemon = False  #make sure we finish and close the fiels properly on exit
        self._pollThread.start()

        self._pzf_index = None

        #logging.debug('H5RFile - poll thread started')

    def __enter__(self):
        #logging.debug('entering H5RFile context manager')
        with self.appendQueueLock:
            self.useCount += 1

        return self

    def __exit__(self, *args):
        with self.appendQueueLock:
            self.keepAliveTimeout = time.time() + self.KEEP_ALIVE_TIMEOUT
            self.useCount -= 1

    @property
    def mdh(self):
        if self._mdh is None:
            try:
                self._mdh = MetaDataHandler.HDFMDHandler(self._h5file)
                if self.mode == 'r':
                    self._mdh = MetaDataHandler.NestedClassMDHandler(self._mdh)
            except IOError:
                # our file was opened in read mode and didn't have any metadata to start with
                self._mdh = MetaDataHandler.NestedClassMDHandler()

        return self._mdh

    def updateMetadata(self, mdh):
        """Update the metadata, acquiring the necessary locks"""
        with tablesLock:
            self.mdh.update(mdh)

    @property
    def events(self):
        try:
            return self._h5file.root.Events
        except AttributeError:
            return []

    def addEvents(self, events):
        self.appendToTable('Events', events)

    def _appendToTable(self, tablename, data):
        with tablesLock:
            try:
                table = getattr(self._h5file.root, tablename)
                table.append(data)
            except AttributeError:
                # we don't have a table with that name - create one
                if isinstance(data, six.string_types):
                    table = self._h5file.create_vlarray(
                        self._h5file.root, tablename, tables.VLStringAtom())
                    table.append(data)
                else:
                    self._h5file.create_table(self._h5file.root,
                                              tablename,
                                              data,
                                              filters=tables.Filters(
                                                  complevel=5, shuffle=True),
                                              expectedrows=500000)

            if (tablename == 'PZFImageData'):
                from PYME.IO import PZFFormat
                #special case  for pzf data - also build an index table
                frameNum = PZFFormat.load_header(data)['FrameNum']

                #record a mapping from frame number to the row we added
                idx_entry = np.array(
                    [frameNum, table.nrows - 1],
                    dtype='i4').view(dtype=[('FrameNum',
                                             'i4'), ('Position', 'i4')])

                try:
                    index = getattr(self._h5file.root, 'PZFImageIndex')
                    index.append(idx_entry)
                except AttributeError:
                    self._h5file.create_table(self._h5file.root,
                                              'PZFImageIndex',
                                              idx_entry,
                                              filters=tables.Filters(
                                                  complevel=5, shuffle=True),
                                              expectedrows=50000)

                self._pzf_index = None

    def appendToTable(self, tablename, data):
        #logging.debug('h5rfile - append to table: %s' % tablename)
        with self.appendQueueLock:
            if not tablename in self.appendQueues.keys():
                self.appendQueues[tablename] = collections.deque()
            self.appendQueues[tablename].append(data)

    def getTableData(self, tablename, _slice):
        with tablesLock:
            try:
                table = getattr(self._h5file.root, tablename)
                res = table[_slice]
            except AttributeError:
                res = []

        return res

    def _pollQueues(self):
        queuesWithData = False

        # logging.debug('h5rfile - poll')

        try:
            while self.useCount > 0 or queuesWithData or time.time(
            ) < self.keepAliveTimeout:
                #logging.debug('poll - %s' % time.time())
                with self.appendQueueLock:
                    #find queues with stuff to save
                    tablenames = [
                        k for k, v in self.appendQueues.items() if len(v) > 0
                    ]

                queuesWithData = len(tablenames) > 0

                #iterate over the queues
                # for tablename in tablenames:
                #     with self.appendQueueLock:
                #         entries = self.appendQueues[tablename]
                #         self.appendQueues[tablename] = collections.deque()
                #
                #     #save the data - note that we can release the lock here, as we are the only ones calling this function.
                #     rows = np.hstack(entries)
                #     self._appendToTable(tablename, rows)

                #iterate over the queues (in a threadsafe manner)
                for tablename in tablenames:
                    waiting = self.appendQueues[tablename]
                    try:
                        while len(waiting) > 0:
                            self._appendToTable(tablename, waiting.popleft())
                    except IndexError:
                        pass

                curTime = time.time()
                if (curTime - self._lastFlushTime) > self.FLUSH_INTERVAL:
                    with tablesLock:
                        self._h5file.flush()
                    self._lastFlushTime = curTime

                time.sleep(0.1)

        except:
            traceback.print_exc()
            logging.error(traceback.format_exc())
        finally:
            logging.debug('H5RFile - closing: %s' % self.filename)
            #remove ourselves from the cache
            with openLock:
                try:
                    file_cache.pop((self.filename, self.mode))
                except KeyError:
                    pass

                self.is_alive = False
                #finally, close the file
                with tablesLock:
                    self._h5file.close()

            logging.debug('H5RFile - closed: %s' % self.filename)

    def fileFitResult(self, fitResult):
        """
        Legacy handling for fitResult objects as returned by remFitBuf

        Parameters
        ----------
        fitResult

        Returns
        -------

        """
        if len(fitResult.results) > 0:
            self.appendToTable('FitResults', fitResult.results)

        if len(fitResult.driftResults) > 0:
            self.appendToTable('DriftResults', fitResult.driftResults)
コード例 #27
0
    def __init__(self, parent, filename=None, id=wx.ID_ANY, 
                 title="PYME Visualise", pos=wx.DefaultPosition,
                 size=(900,750), style=wx.DEFAULT_FRAME_STYLE, use_shaders=True, cmd_args=None, pipeline_vars = {}):

        # populate about box info
        self._component_name = 'PYMEVisualise'
        self._long_desc = "Visualisation of localisation microscopy data."
        
        AUIFrame.__init__(self, parent, id, title, pos, size, style)
        
        
        self.cmd_args = cmd_args
        self._flags = 0
        
        self.pipeline = pipeline.Pipeline(visFr=self)
        self.pipeline.dataSources.update(pipeline_vars)

        visCore.VisGUICore.__init__(self, use_shaders=use_shaders)
        
        #self.Quads = None
               
        #self.SetMenuBar(self.CreateMenuBar())
        self.CreateMenuBar(use_shaders=use_shaders)

        self.statusbar = self.CreateStatusBar(1, wx.STB_SIZEGRIP)

        self.statusbar.SetStatusText("", 0)
       
        #self._leftWindow1 = wx.Panel(self, -1, size = wx.Size(220, 1000))
        #self._pnl = 0
        
        #initialize the common parts
        ###############################
        #NB: this has to come after the shell has been generated, but before the fold panel
        

        ################################   

        self.MainWindow = self #so we can access from shell
        self.sh = wx.py.shell.Shell(id=-1,
                                    parent=self, size=wx.Size(-1, -1), style=0, locals=self.__dict__,
                                    startupScript=config.get('VisGUI-console-startup-file', None),
              introText='PYMEVisualize - note that help, license, etc. below is for Python, not PYME\n\n')

        #self._mgr.AddPane(self.sh, aui.AuiPaneInfo().
        #                  Name("Shell").Caption("Console").Centre().CloseButton(False).CaptionVisible(False))

        self.AddPage(self.sh, caption='Shell')
             
        
        self.elv = None
        self.colp = None
        self.mdp = None
        self.rav = None

        self.generatedImages = []
        
        self.sh.Execute('from pylab import *')
        self.sh.Execute('from PYME.DSView.dsviewer import View3D')
        
        import os
        if os.getenv('PYMEGRAPHICSFIX'): # fix issue with graphics freezing on some machines (apparently matplotlib related)
            self.sh.Execute('plot()')
            self.sh.Execute('close()')

        #self.workspace = workspaceTree.WorkWrap(self.__dict__)
        ##### Make certain things visible in the workspace tree

        #components of the pipeline
        #col = self.workspace.newColour()
        #self.workspace.addKey('pipeline', col)
        
        #Generated stuff
        #col = self.workspace.newColour()
        #self.workspace.addKey('GeneratedMeasures', col)
        #self.workspace.addKey('generatedImages', col)
        #self.workspace.addKey('objects', col)

        #main window, so we can get everything else if needed
        #col = self.workspace.newColour()
        #self.workspace.addKey('MainWindow', col)

        ######

        #self.workspaceView = workspaceTree.WorkspaceTree(self, workspace=self.workspace, shell=self.sh)
        #self.AddPage(page=wx.StaticText(self, -1, 'foo'), select=False, caption='Workspace')

#        self.glCanvas = gl_render.LMGLCanvas(self)
#        self.AddPage(page=self.glCanvas, select=True, caption='View')
#        self.glCanvas.cmap = pylab.cm.gist_rainbow #pylab.cm.hot

        #self.Bind(wx.EVT_SIZE, self.OnSize)
        self.Bind(wx.EVT_MOVE, self.OnMove)
        self.Bind(wx.EVT_CLOSE, self.OnClose)

        #self.Bind(wx.EVT_IDLE, self.OnIdle)
        #self.refv = False

        statusLog.SetStatusDispFcn(self.SetStatus)
        
        
        self.paneHooks.append(self.GenPanels)
        self.CreateFoldPanel()

        #from .layer_panel import CreateLayerPane, CreateLayerPanel
        #CreateLayerPane(sidePanel, self)
        #CreateLayerPanel(self)
        
        self._recipe_manager = recipeGui.PipelineRecipeManager(self.pipeline)
        self._recipe_editor = recipeGui.RecipeView(self, self._recipe_manager)
        self.AddPage(page=self._recipe_editor, select=False, caption='Pipeline Recipe')
        
        self.AddMenuItem('Recipe', 'Reconstruct from open image', self.reconstruct_pipeline_from_open_image)
        self.AddMenuItem('Recipe', 'Reconstruct from image file', self.reconstruct_pipeline_from_image_file)

        if not filename is None:
            def _recipe_callback():
                recipe = getattr(self.cmd_args, 'recipe', None)
                print('Using recipe: %s' % recipe)
                if recipe:
                    from PYME.recipes import modules
                    self.pipeline.recipe.update_from_yaml(recipe)
                    #self.recipeView.SetRecipe(self.pipeline.recipe)
                    self.update_datasource_panel()

                self._recipe_editor.update_recipe_text()
            
            wx.CallLater(50,self.OpenFile,filename, recipe_callback=_recipe_callback)
            #self.refv = False
        
        wx.CallAfter(self.RefreshView)

        nb = self._mgr.GetNotebooks()[0]
        nb.SetSelection(0)
        self.add_common_menu_items()
コード例 #28
0
def main(protocol="HTTP/1.0"):
    global GPU_STATS
    """Test the HTTP request handler class.

    This runs an HTTP server on port 8000 (or the first command line
    argument).

    """
    from optparse import OptionParser

    op = OptionParser(usage='usage: %s [options]' % sys.argv[0])

    #NOTE - currently squatting on port 15348 for testing - TODO can we use an ephemeral port?
    op.add_option(
        '-p',
        '--port',
        dest='port',
        default=config.get('dataserver-port', 15348),
        help=
        "port number to serve on (default: 15348, see also 'dataserver-port' config entry)"
    )
    op.add_option('-t',
                  '--test',
                  dest='test',
                  help="Set up for bandwidth test (don't save files)",
                  action="store_true",
                  default=False)
    op.add_option('-v',
                  '--protocol',
                  dest='protocol',
                  help="HTTP protocol version",
                  default="1.1")
    op.add_option('-l',
                  '--log-requests',
                  dest='log_requests',
                  help="Display http request info",
                  default=False,
                  action="store_true")
    default_root = config.get('dataserver-root', os.curdir)
    op.add_option(
        '-r',
        '--root',
        dest='root',
        help=
        "Root directory of virtual filesystem (default %s, see also 'dataserver-root' config entry)"
        % dataserver_root,
        default=default_root)
    op.add_option('-k',
                  '--profile',
                  dest='profile',
                  help="Enable profiling",
                  default=False,
                  action="store_true")
    op.add_option('--thread-profile',
                  dest='thread_profile',
                  help="Enable thread profiling",
                  default=False,
                  action="store_true")
    default_server_filter = config.get('dataserver-filter', compName)
    op.add_option(
        '-f',
        '--server-filter',
        dest='server_filter',
        help='Add a serverfilter for distinguishing between different clusters',
        default=default_server_filter)
    op.add_option(
        '--timeout-test',
        dest='timeout_test',
        help=
        'deliberately make requests timeout for testing error handling in calling modules',
        default=0)
    op.add_option('-a',
                  '--advertisements',
                  dest='advertisements',
                  choices=['zeroconf', 'local'],
                  default='zeroconf',
                  help='Optionally restrict advertisements to local machine')

    options, args = op.parse_args()
    if options.profile:
        from PYME.util import mProfile
        mProfile.profileOn(['HTTPDataServer.py', 'clusterListing.py'])

        profileOutDir = options.root + '/LOGS/%s/mProf' % compName

    if options.thread_profile:
        from PYME.util import fProfile

        tp = fProfile.ThreadProfiler()
        #tp.profile_on(subs=['PYME/', 'http/server', 'socketserver'],outfile=options.root + '/LOGS/%s/tProf/dataserver.txt' % compName)
        tp.profile_on(subs=[
            'PYME/',
        ],
                      outfile=options.root +
                      '/LOGS/%s/tProf/dataserver.txt' % compName)

    # setup logging to file
    log_dir = '%s/LOGS/%s' % (options.root, compName)
    makedirs_safe(log_dir)

    log_file = '%s/LOGS/%s/PYMEDataServer.log' % (options.root, compName)
    fh = logging.handlers.RotatingFileHandler(filename=log_file,
                                              mode='w',
                                              maxBytes=1e6,
                                              backupCount=1)
    logger.addHandler(fh)

    logger.info(
        '========================================\nPYMEDataServer, running on python %s\n'
        % sys.version)

    #change to the dataserver root if given'
    logger.info('Serving from directory: %s' % options.root)
    os.chdir(options.root)

    if options.advertisements == 'local':
        # preference is to avoid zeroconf on clusterofone due to poor
        # performance on crowded networks
        if config.get('clusterIO-hybridns', True):
            ns = sqlite_ns.getNS('_pyme-http')
        else:
            # if we aren't using the hybridns, we are using zeroconf in clusterIO
            # TODO - warn that we might run into performance issues???
            ns = pzc.getNS('_pyme-http')
        server_address = ('127.0.0.1', int(options.port))
        ip_addr = '127.0.0.1'
    else:
        #default
        ns = pzc.getNS('_pyme-http')
        server_address = ('', int(options.port))

        try:
            ip_addr = socket.gethostbyname(socket.gethostname())
        except:
            ip_addr = socket.gethostbyname(socket.gethostname() + '.local')

    PYMEHTTPRequestHandler.protocol_version = 'HTTP/%s' % options.protocol
    PYMEHTTPRequestHandler.bandwidthTesting = options.test
    PYMEHTTPRequestHandler.timeoutTesting = options.timeout_test
    PYMEHTTPRequestHandler.logrequests = options.log_requests

    httpd = ThreadedHTTPServer(server_address, PYMEHTTPRequestHandler)
    #httpd = http.server.HTTPServer(server_address, PYMEHTTPRequestHandler)
    httpd.daemon_threads = True

    #get the actual adress (port) we bound to
    sa = httpd.socket.getsockname()
    service_name = get_service_name('PYMEDataServer [%s]' %
                                    options.server_filter)
    ns.register_service(service_name, ip_addr, sa[1])

    status['IPAddress'] = ip_addr
    status['BindAddress'] = server_address
    status['Port'] = sa[1]
    status['Protocol'] = options.protocol
    status['TestMode'] = options.test
    status['ComputerName'] = GetComputerName()

    if GPU_STATS:
        try:
            pynvml.nvmlInit()
        except:
            GPU_STATS = False

    sp = statusPoller()
    sp.start()

    logger.info("Serving HTTP on %s port %d ..." % (ip_addr, sa[1]))
    try:
        httpd.serve_forever()
    finally:
        logger.info('Shutting down ...')
        httpd.shutdown()
        httpd.server_close()

        ns.unregister(service_name)

        if options.profile:
            mProfile.report(display=False, profiledir=profileOutDir)

        if options.thread_profile:
            tp.profile_off()

        sp.stop()

        if GPU_STATS:
            pynvml.nvmlShutdown()

        try:
            from pytest_cov.embed import cleanup
            cleanup()
        except:
            pass

        sys.exit()
コード例 #29
0
    def __init__(
        self,
        parent,
        scopeState,
        winid=-1,
    ):
        # begin wxGlade: MyFrame1.__init__
        #kwds["style"] = wx.DEFAULT_FRAME_STYLE
        wx.Panel.__init__(self, parent, winid)

        #self.cam = cam
        self.scopeState = scopeState

        self.laserNames = []
        for k in self.scopeState.keys():
            m = re.match(r'Lasers\.(?P<laser_name>.*)\.Power', k)
            if not m is None:
                self.laserNames.append(m.group('laser_name'))

        self.laserNames.sort()
        #self.lasers = [l for l in lasers if l.IsPowerControlable()]
        #self.laserNames=[l.GetName() for l in lasers]

        self.sliders = []
        self.labels = []
        self.buttons = []
        self.sliding = False
        self.mode = SCALING_MODES[config.get('laser-slider-scaling',
                                             default='log')]

        sizer_2 = wx.BoxSizer(wx.VERTICAL)

        for c, laserName in enumerate(self.laserNames):
            sz = wx.BoxSizer(wx.HORIZONTAL)
            b = wx.ToggleButton(self, -1, laserName, style=wx.BU_EXACTFIT)
            b.Bind(wx.EVT_TOGGLEBUTTON, self.on_toggle)
            self.buttons.append(b)
            sz.Add(b, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 2)

            if self.mode == 1:
                min_power = 0
                max_power = 100
            else:
                min_power = self.scopeState['Lasers.%s.MinPower' % laserName]
                max_power = self.scopeState['Lasers.%s.MaxPower' % laserName]
            sl = wx.Slider(
                self,
                -1,
                self.scopeState['Lasers.%s.Power' % laserName],
                minValue=min_power,
                maxValue=max_power,
                size=wx.Size(150, -1),
                style=wx.SL_HORIZONTAL)  #|wx.SL_AUTOTICKS|wx.SL_LABELS)

            if wx.version() < '4':
                #FIXME for wx >= 4
                sl.SetTickFreq(10, 1)

            sz.Add(sl, 1, wx.ALL | wx.ALIGN_CENTER_VERTICAL | wx.EXPAND, 2)
            self.sliders.append(sl)

            l = wx.StaticText(self, -1, '100.0')
            self.labels.append(l)
            sz.Add(l, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 2)

            sizer_2.Add(sz, 1, wx.EXPAND, 0)

        #sizer_2.AddSpacer(5)

        self.Bind(wx.EVT_SCROLL, self.onSlide)

        # discover our switchable (but not power controllable) lasers
        self.switchedLaserNames = []
        self.cBoxes = []

        for k in self.scopeState.keys():
            m = re.match(r'Lasers\.(?P<laser_name>.*)\.On', k)
            if m is not None:
                ln = m.group('laser_name')
                if ln not in self.laserNames:
                    self.switchedLaserNames.append(ln)

        self.switchedLaserNames.sort()

        hsizer = wx.BoxSizer(wx.HORIZONTAL)
        n = 0
        self.cBoxes = []

        for laserName in self.switchedLaserNames:
            cb = wx.CheckBox(self, -1, laserName)
            cb.SetValue(self.scopeState['Lasers.%s.On' % laserName])
            cb.Bind(wx.EVT_CHECKBOX, self.OnCbOn)

            self.cBoxes.append(cb)
            hsizer.Add(cb, 1, wx.EXPAND, 0)
            n += 1
            if (n % 3) == 0:
                sizer_2.Add(hsizer, 0, wx.EXPAND, 0)
                hsizer = wx.BoxSizer(wx.HORIZONTAL)

        sizer_2.Add(hsizer, 0, wx.EXPAND, 0)
        #self.SetAutoLayout(1)
        self.SetSizer(sizer_2)
        sizer_2.Fit(self)
コード例 #30
0
def main():
    global LOG_STREAMS
    confFile = os.path.join(conf.user_config_dir, 'distributor.yaml')
    with open(confFile) as f:
        config = yaml.load(f)

    serverAddr, serverPort = config['distributor']['http_endpoint'].split(':')
    externalAddr = socket.gethostbyname(socket.gethostname())

    #set up logging
    #logfile_error = None
    #logfile_debug = None

    data_root = conf.get('dataserver-root')
    if data_root:
        #logfile_error = open('%s/LOGS/distributor_error.log' % data_root, 'w')
        #logfile_debug = open('%s/LOGS/distributor_debug.log' % data_root, 'w')

        distr_log_dir = '%s/LOGS' % data_root

        dist_log_err_file = os.path.join(distr_log_dir,
                                         'distributor_error.log')
        if os.path.exists(dist_log_err_file):
            os.remove(dist_log_err_file)

        dist_err_handler = logging.handlers.RotatingFileHandler(
            dist_log_err_file, 'w', maxBytes=1e6, backupCount=1)
        dist_err_handler.setFormatter(logging.Formatter('%(message)s'))
        distLogErr = logging.getLogger('dist_err')
        distLogErr.addHandler(dist_err_handler)
        distLogErr.setLevel(logging.DEBUG)
        distLogErr.propagate = False

        dist_log_dbg_file = os.path.join(distr_log_dir,
                                         'distributor_debug.log')
        if os.path.exists(dist_log_dbg_file):
            os.remove(dist_log_dbg_file)

        dist_dbg_handler = logging.handlers.RotatingFileHandler(
            dist_log_dbg_file, 'w', maxBytes=1e6, backupCount=1)
        dist_dbg_handler.setFormatter(logging.Formatter('%(message)s'))
        distLogDbg = logging.getLogger('dist_debug')
        distLogDbg.addHandler(dist_dbg_handler)
        distLogDbg.setLevel(logging.DEBUG)
        distLogDbg.propagate = False

        if not (len(sys.argv) == 2 and sys.argv[1] == '-n'):
            proc = subprocess.Popen('distributor -c %s' % confFile,
                                    shell=True,
                                    stdout=subprocess.PIPE,
                                    stderr=subprocess.PIPE)
        else:
            proc = subprocess.Popen('python -m PYME.cluster.distributor 1234',
                                    shell=True,
                                    stdout=subprocess.PIPE,
                                    stderr=subprocess.PIPE)

        t_log_stderr = threading.Thread(target=log_stream,
                                        args=(proc.stderr, distLogErr))
        t_log_stderr.setDaemon(False)
        t_log_stderr.start()

        t_log_stdout = threading.Thread(target=log_stream,
                                        args=(proc.stdout, distLogDbg))
        t_log_stdout.setDaemon(False)
        t_log_stdout.start()
    else:
        if not (len(sys.argv) == 2 and sys.argv[1] == '-n'):
            proc = subprocess.Popen('distributor -c %s' % confFile, shell=True)
        else:
            proc = subprocess.Popen('python -m PYME.cluster.distributor 1234',
                                    shell=True)

    ns = pyme_zeroconf.getNS('_pyme-taskdist')
    service_name = get_service_name('PYMEDistributor')
    ns.register_service(service_name, externalAddr, int(serverPort))

    try:
        while not proc.poll():
            time.sleep(1)

            # if logfile_error:
            #     #do crude log rotation
            #     if logfile_error.tell() > 1e6:
            #         logfile_error.seek(0)
            #
            #     if logfile_debug.tell() > 1e6:
            #         logfile_debug.seek(0)

    finally:
        ns.unregister(service_name)
        #try and shut down the distributor cleanly
        proc.send_signal(1)
        time.sleep(2)
        proc.kill()

        LOG_STREAMS = False