Exemplo n.º 1
0
    def _checkTQ(self):
        def _genURI(taskQueueName):
            try:
                from PYME.misc import hybrid_ns
                ns = hybrid_ns.getNS()
                return ns.resolve(taskQueueName)
            except:
                return 'PYRONAME://' + taskQueueName

        try:
            self.tq.isAlive()
        except:
            self.tq = None

        if self.tq is None:
            import Pyro.core
            from PYME.misc.computerName import GetComputerName
            compName = GetComputerName()

            try:
                taskQueueName = 'TaskQueues.%s' % compName
                self.tq = Pyro.core.getProxyForURI(_genURI(taskQueueName))
            except:
                logger.exception(
                    'Error finding task queue, looking for a local queue instead'
                )
                taskQueueName = 'PrivateTaskQueues.%s' % compName
                self.tq = Pyro.core.getProxyForURI('PYRONAME://' +
                                                   _genURI(taskQueueName))
Exemplo n.º 2
0
    def __init__(self, filename, frameSource, frameShape, complevel=6, complib='zlib', **kwargs):
#       if 'PYME_TASKQUEUENAME' in os.environ.keys():
#            taskQueueName = os.environ['PYME_TASKQUEUENAME']
#       else:
#            taskQueueName = 'taskQueue'
        import Pyro.core

        from PYME.misc.computerName import GetComputerName
        compName = GetComputerName()

        taskQueueName = 'TaskQueues.%s' % compName

        if ns:
            URI = ns.resolve(taskQueueName)
        else:
            URI = 'PYRONAME://' + taskQueueName

        self.tq = Pyro.core.getProxyForURI(URI)
        self.tq._setOneway(['postTask', 'postTasks', 'addQueueEvents', 'setQueueMetaData', 'logQueueEvent'])

        self.seriesName = filename
        self.buffer = []
        self.buflen = 30

        self.tq.createQueue('HDFTaskQueue',self.seriesName, filename, frameSize = frameShape, complevel=complevel, complib=complib)

        self.md = MetaDataHandler.QueueMDHandler(self.tq, self.seriesName)
        self.evtLogger = EventLogger(self, self.tq, self.seriesName)

        sp.Spooler.__init__(self, filename, frameSource, **kwargs)
Exemplo n.º 3
0
    def checkTQ(self):

        try:
            #if 'PYME_TASKQUEUENAME' in os.environ.keys():
            #    taskQueueName = os.environ['PYME_TASKQUEUENAME']
            #else:
            #    taskQueueName = 'taskQueue'
            self.tq.isAlive()

        except:
            self.tq = None

        if self.tq == None:
            from PYME.misc.computerName import GetComputerName
            compName = GetComputerName()

            try:
                taskQueueName = 'TaskQueues.%s' % compName

                self.tq = Pyro.core.getProxyForURI('PYRONAME://' +
                                                   taskQueueName)
            except:
                taskQueueName = 'PrivateTaskQueues.%s' % compName

                self.tq = Pyro.core.getProxyForURI('PYRONAME://' +
                                                   taskQueueName)
Exemplo n.º 4
0
    def LoadQueue(self, filename):
        '''Load data from a remote PYME.ParallelTasks.HDFTaskQueue queue using
        Pyro.
        
        Parameters:

            filename    the name of the queue         
        
        '''
        import Pyro.core
        from PYME.Analysis.DataSources import TQDataSource
        from PYME.misc.computerName import GetComputerName
        compName = GetComputerName()

        if self.queueURI == None:
            #do a lookup
            taskQueueName = 'TaskQueues.%s' % compName
            self.tq = Pyro.core.getProxyForURI('PYRONAME://' + taskQueueName)
        else:
            self.tq = Pyro.core.getProxyForURI(self.queueURI)

        self.seriesName = filename[len('QUEUE://'):]

        self.dataSource = TQDataSource.DataSource(self.seriesName, self.tq)
        self.data = self.dataSource  #this will get replaced with a wrapped version

        self.mdh = MetaDataHandler.QueueMDHandler(self.tq, self.seriesName)
        MetaData.fillInBlanks(self.mdh, self.dataSource)

        #self.timer.WantNotification.append(self.dsRefresh)

        self.events = self.dataSource.getEvents()
Exemplo n.º 5
0
    def __init__(self, driftTracker):
        threading.Thread.__init__(self)

        import socket
        ip_addr = socket.gethostbyname(socket.gethostname())

        compName = GetComputerName()

        Pyro.core.initServer()

        pname = "%s.DriftTracker" % compName

        try:
            from PYME.misc import pyme_zeroconf
            ns = pyme_zeroconf.getNS()
        except:
            ns = Pyro.naming.NameServerLocator().getNS()

            if not compName in [n[0] for n in ns.list('')]:
                ns.createGroup(compName)

            #get rid of any previous instance
            try:
                ns.unregister(pname)
            except Pyro.errors.NamingError:
                pass

        self.daemon = Pyro.core.Daemon(host=ip_addr)
        self.daemon.useNameServer(ns)

        self.driftCorr = piezoOffsetProxy(driftTracker)

        #pname = "%s.Piezo" % compName

        uri = self.daemon.connect(self.driftCorr, pname)
Exemplo n.º 6
0
def getClient(compName=GetComputerName()):
    try:
        from PYME.misc import pyme_zeroconf
        ns = pyme_zeroconf.getNS()
        URI = ns.resolve('%s.DriftTracker' % compName)
    except:
        URI = 'PYRONAME://%s.DriftTracker' % compName

    return Pyro.core.getProxyForURI(URI)
Exemplo n.º 7
0
def main():
    global LOG_STREAMS
    confFile = os.path.join(conf.user_config_dir, 'distributor.yaml')
    with open(confFile) as f:
        config = yaml.load(f)

    serverAddr, serverPort = config['distributor']['http_endpoint'].split(':')
    externalAddr = socket.gethostbyname(socket.gethostname())
    
    #set up logging
    data_root = conf.get('dataserver-root')
    if data_root:
        distr_log_dir = '%s/LOGS' % data_root

        dist_log_err_file = os.path.join(distr_log_dir, 'distributor.log')
        if os.path.exists(dist_log_err_file):
            os.remove(dist_log_err_file)

        dist_err_handler = logging.handlers.RotatingFileHandler(filename=dist_log_err_file, mode='w', maxBytes=1e6, backupCount=1)
        #dist_err_handler.setFormatter(logging.Formatter('%(message)s'))
        distLogErr = logging.getLogger('distributor')
        distLogErr.setLevel(logging.DEBUG)
        distLogErr.addHandler(dist_err_handler)
    
    
    proc = ruleserver.ServerThread(serverPort, profile=False)
    proc.start()
    #proc = subprocess.Popen('python -m PYME.ParallelTasks.distributor 1234', shell=True)

    ns = pyme_zeroconf.getNS('_pyme-taskdist')
    ns.register_service('PYMERuleServer: ' + GetComputerName(), externalAddr, int(serverPort))

    try:
        while proc.is_alive():
            time.sleep(1)

    finally:
        logger.debug('trying to shut down server')
        proc.shutdown()
        ns.unregister('PYMERuleServer: ' + GetComputerName())
Exemplo n.º 8
0
def getClient(compName = GetComputerName()):
    #try:
    from PYME.misc import pyme_zeroconf 
    ns = pyme_zeroconf.getNS()
    time.sleep(3)
    #print ns.list()
    URI = ns.resolve('%s.Piezo' % compName)
    #except:
    #    URI ='PYRONAME://%s.Piezo'%compName

    #print URI

    return Pyro.core.getProxyForURI(URI)
Exemplo n.º 9
0
    def checkTQ(self):
        import Pyro.core
        if self.tq is None:
            #if 'PYME_TASKQUEUENAME' in os.environ.keys():
            #    taskQueueName = os.environ['PYME_TASKQUEUENAME']
            #else:
            #    taskQueueName = 'taskQueue'

            from PYME.misc.computerName import GetComputerName
            compName = GetComputerName()

            taskQueueName = 'TaskQueues.%s' % compName

            self.tq = Pyro.core.getProxyForURI('PYRONAME://' + taskQueueName)
Exemplo n.º 10
0
    def _loadQueue(self, filename):
        """Load data from a remote PYME.ParallelTasks.HDFTaskQueue queue using
        Pyro.
        
        Parameters:
        -----------

        filename  : string
            the name of the queue         
        
        """
        import Pyro.core
        from PYME.IO.DataSources import TQDataSource
        from PYME.misc.computerName import GetComputerName
        compName = GetComputerName()

        if self.queueURI is None:
            #do a lookup
            taskQueueName = 'TaskQueues.%s' % compName

            try:
                from PYME.misc import pyme_zeroconf
                ns = pyme_zeroconf.getNS()
                URI = ns.resolve(taskQueueName)
            except:
                URI = 'PYRONAME://' + taskQueueName

            self.tq = Pyro.core.getProxyForURI(URI)
        else:
            self.tq = Pyro.core.getProxyForURI(self.queueURI)

        self.seriesName = filename[len('QUEUE://'):]

        self.dataSource = TQDataSource.DataSource(self.seriesName, self.tq)
        self.data = self.dataSource  #this will get replaced with a wrapped version

        self.mdh = MetaDataHandler.QueueMDHandler(self.tq, self.seriesName)
        MetaData.fillInBlanks(self.mdh, self.dataSource)

        #self.timer.WantNotification.append(self.dsRefresh)

        self.events = self.dataSource.getEvents()
        self.mode = 'LM'
Exemplo n.º 11
0
    def __init__(self,
                 scope,
                 filename,
                 acquisator,
                 protocol=p.NullProtocol,
                 parent=None,
                 complevel=2,
                 complib='zlib'):
        #       if 'PYME_TASKQUEUENAME' in os.environ.keys():
        #            taskQueueName = os.environ['PYME_TASKQUEUENAME']
        #       else:
        #            taskQueueName = 'taskQueue'
        from PYME.misc.computerName import GetComputerName
        compName = GetComputerName()

        taskQueueName = 'TaskQueues.%s' % compName

        self.tq = Pyro.core.getProxyForURI('PYRONAME://' + taskQueueName)
        self.tq._setOneway([
            'postTask', 'postTasks', 'addQueueEvents', 'setQueueMetaData',
            'logQueueEvent'
        ])

        self.seriesName = filename
        self.buffer = []
        self.buflen = 30

        self.tq.createQueue('HDFTaskQueue',
                            self.seriesName,
                            filename,
                            frameSize=(scope.cam.GetPicWidth(),
                                       scope.cam.GetPicHeight()),
                            complevel=complevel,
                            complib=complib)

        self.md = MetaDataHandler.QueueMDHandler(self.tq, self.seriesName)
        self.evtLogger = EventLogger(self, scope, self.tq, self.seriesName)

        sp.Spooler.__init__(self, scope, filename, acquisator, protocol,
                            parent)
Exemplo n.º 12
0
    def _getTaskQueueURI(cls, n_retries=2):
        """Discover the distributors using zeroconf and choose one"""
        from PYME.misc import hybrid_ns
        import socket
        import random
        import time
        from PYME.misc.computerName import GetComputerName
        compName = GetComputerName()

        ns = hybrid_ns.getNS('_pyme-taskdist')

        queueURLs = {}

        def _search():
            for name, info in ns.get_advertised_services():
                if name.startswith('PYMERuleServer'):
                    print(info, info.address)
                    queueURLs[name] = 'http://%s:%d' % (socket.inet_ntoa(
                        info.address), info.port)

        _search()
        while not queueURLs and (n_retries > 0):
            logging.info(
                'could not find a rule server, waiting 5s and trying again')
            time.sleep(5)
            n_retries -= 1
            _search()

        try:
            #try to grab the distributor on the local computer
            local_queues = [q for q in queueURLs if compName in q]
            logger.debug('local_queues: %s' % local_queues)
            return queueURLs[local_queues[0]]
        except (KeyError, IndexError):
            #if there is no local distributor, choose one at random
            logger.info('no local rule server, choosing one at random')
            return random.choice(list(queueURLs.values()))
Exemplo n.º 13
0
def get_service_name(process_name):
    """Generate an appropriate service name for zeroconf, pyro, etc.

    Parameters
    ----------
    process_name : str
        name of the process, e.g. 'PYMEDataServer [serverfilter]'

    Returns
    -------
    str
        service name including process ID and as much of the process name and
        computer as possible given length constraints. Something like
        PYMEDataServer [trout]:cutthroat - PID:2020
    """
    from PYME.misc.computerName import GetComputerName

    pid = str(os.getpid())

    base_name = process_name + ':' + GetComputerName() + ' - '
    # max of 63 characters for zeroconf compatibility
    base_name = base_name[:(63 - 4 - len(pid))]
    
    return base_name + 'PID:' + pid
Exemplo n.º 14
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# generated by wxGlade 0.5 on Mon Jun 23 16:22:12 2008 from /home/david/taskMon.wxg

import wx
from wx.lib.dialogs import ScrolledMessageDialog
import wx.grid

import logging

import Pyro.core
import time
import os

from PYME.misc.computerName import GetComputerName
compName = GetComputerName()

if 'PYRO_NS_HOSTNAME' in os.environ.keys():
    Pyro.config.PYRO_NS_HOSTNAME = os.environ['PYRO_NS_HOSTNAME']

#if 'PYME_TASKQUEUENAME' in os.environ.keys():
#    taskQueueName = os.environ['PYME_TASKQUEUENAME']
#else:
#    taskQueueName = 'taskQueue'

taskQueueName = 'TaskQueues.%s' % compName


class MyFrame(wx.Frame):
    def __init__(self, *args, **kwds):
        # begin wxGlade: MyFrame.__init__
Exemplo n.º 15
0
def main():
    global LOG_STREAMS
    cluster_root = conf.get('dataserver-root', conf.user_config_dir)

    confFile = os.path.join(conf.user_config_dir, 'nodeserver.yaml')
    with open(confFile) as f:
        config = yaml.load(f)

    serverAddr, serverPort = config['nodeserver']['http_endpoint'].split(':')
    externalAddr = socket.gethostbyname(socket.gethostname())

    ns = pyme_zeroconf.getNS('_pyme-taskdist')
    #
    # #find distributor(s)
    # distributors = []
    # for name, info in ns.advertised_services.items():
    #     if name.startswith('PYMEDistributor'):
    #         distributors.append('%s:%d' % (socket.inet_ntoa(info.address), info.port))

    distributors = [
        u.lstrip('http://').rstrip('/')
        for u in distribution.getDistributorInfo().values()
    ]

    #modify the configuration to reflect the discovered distributor(s)
    config['nodeserver']['distributors'] = distributors

    #write a new config file for the nodeserver
    with tempfile.NamedTemporaryFile(suffix='.yaml',
                                     delete=False) as temp_conf_file:
        temp_conf_file_name = temp_conf_file.name
        temp_conf_file.write(yaml.dump(config))

    logging.debug('Config file: ' + temp_conf_file_name)

    #set up nodeserver logging
    nodeserver_log_dir = os.path.join(cluster_root, 'LOGS', GetComputerName())

    #remove old log files
    try:
        os.remove(os.path.join(nodeserver_log_dir, 'nodeserver.log'))
    except OSError:  # if we cant clear out old log files, we might not have a log directory set up
        try:
            os.makedirs(
                os.path.join(nodeserver_log_dir)
            )  # NB - this will create all intermediate directories as well
        except:  # throw error because the RotatingFileHandler will fail to initialize
            raise IOError('Unable to initialize log files at %s' %
                          nodeserver_log_dir)
        pass

    try:
        shutil.rmtree(os.path.join(nodeserver_log_dir, 'taskWorkerHTTP'))
    except:
        pass

    #nodeserverLog = open(os.path.join(nodeserver_log_dir, 'nodeserver.log'), 'w')
    nodeserver_log_handler = logging.handlers.RotatingFileHandler(
        os.path.join(nodeserver_log_dir, 'nodeserver.log'),
        'w',
        maxBytes=1e6,
        backupCount=0)
    nodeserver_log_handler.setFormatter(logging.Formatter('%(message)s'))
    nodeserverLog = logging.getLogger('nodeserver')
    nodeserverLog.addHandler(nodeserver_log_handler)
    nodeserverLog.setLevel(logging.DEBUG)
    nodeserverLog.propagate = False

    if not (len(sys.argv) == 2 and sys.argv[1] == '-n'):
        proc = subprocess.Popen('nodeserver -c %s' % temp_conf_file_name,
                                shell=True,
                                stdout=subprocess.PIPE,
                                stderr=subprocess.PIPE)
    else:
        proc = subprocess.Popen('python -m PYME.cluster.nodeserver %s %s' %
                                (distributors[0], serverPort),
                                shell=True,
                                stdout=subprocess.PIPE,
                                stderr=subprocess.PIPE)

    t_log_stderr = threading.Thread(target=log_stream,
                                    args=(proc.stderr, nodeserverLog))
    t_log_stderr.setDaemon(False)
    t_log_stderr.start()

    t_log_stdout = threading.Thread(target=log_stream,
                                    args=(proc.stdout, nodeserverLog))
    t_log_stdout.setDaemon(False)
    t_log_stdout.start()

    ns.register_service('PYMENodeServer: ' + GetComputerName(), externalAddr,
                        int(serverPort))

    time.sleep(2)
    logging.debug('Launching worker processors')
    numWorkers = config.get('numWorkers', cpu_count())

    workerProcs = [
        subprocess.Popen('python -m PYME.cluster.taskWorkerHTTP',
                         shell=True,
                         stdin=subprocess.PIPE) for i in range(numWorkers - 1)
    ]

    #last worker has profiling enabled
    profiledir = os.path.join(nodeserver_log_dir, 'mProf')
    workerProcs.append(
        subprocess.Popen('python -m PYME.cluster.taskWorkerHTTP -p %s' %
                         profiledir,
                         shell=True,
                         stdin=subprocess.PIPE))

    try:
        while not proc.poll():
            time.sleep(1)

            #try to keep log size under control by doing crude rotation
            #if nodeserverLog.tell() > 1e6:
            #    nodeserverLog.seek(0)
    except KeyboardInterrupt:
        pass
    finally:
        LOG_STREAMS = False
        logging.info('Shutting down workers')
        try:
            ns.unregister('PYMENodeServer: ' + GetComputerName())
        except:
            pass

        os.unlink(temp_conf_file_name)

        for p in workerProcs:
            #ask the workers to quit (nicely)
            try:
                p.send_signal(1)
            except:
                pass

        time.sleep(2)

        for p in workerProcs:
            #now kill them off
            try:
                p.kill()
            except:
                pass

        logging.info('Shutting down nodeserver')
        try:
            proc.kill()
        except:
            pass

        logging.info('Workers and nodeserver are shut down')

        sys.exit()
Exemplo n.º 16
0
def main():
    global LOG_STREAMS
    confFile = os.path.join(conf.user_config_dir, 'distributor.yaml')
    with open(confFile) as f:
        config = yaml.load(f)

    serverAddr, serverPort = config['distributor']['http_endpoint'].split(':')
    externalAddr = socket.gethostbyname(socket.gethostname())

    #set up logging
    #logfile_error = None
    #logfile_debug = None

    data_root = conf.get('dataserver-root')
    if data_root:
        #logfile_error = open('%s/LOGS/distributor_error.log' % data_root, 'w')
        #logfile_debug = open('%s/LOGS/distributor_debug.log' % data_root, 'w')

        distr_log_dir = '%s/LOGS' % data_root

        dist_log_err_file = os.path.join(distr_log_dir,
                                         'distributor_error.log')
        if os.path.exists(dist_log_err_file):
            os.remove(dist_log_err_file)

        dist_err_handler = logging.handlers.RotatingFileHandler(
            dist_log_err_file, 'w', maxBytes=1e6, backupCount=1)
        dist_err_handler.setFormatter(logging.Formatter('%(message)s'))
        distLogErr = logging.getLogger('dist_err')
        distLogErr.addHandler(dist_err_handler)
        distLogErr.setLevel(logging.DEBUG)
        distLogErr.propagate = False

        dist_log_dbg_file = os.path.join(distr_log_dir,
                                         'distributor_debug.log')
        if os.path.exists(dist_log_dbg_file):
            os.remove(dist_log_dbg_file)

        dist_dbg_handler = logging.handlers.RotatingFileHandler(
            dist_log_dbg_file, 'w', maxBytes=1e6, backupCount=1)
        dist_dbg_handler.setFormatter(logging.Formatter('%(message)s'))
        distLogDbg = logging.getLogger('dist_debug')
        distLogDbg.addHandler(dist_dbg_handler)
        distLogDbg.setLevel(logging.DEBUG)
        distLogDbg.propagate = False

        if not (len(sys.argv) == 2 and sys.argv[1] == '-n'):
            proc = subprocess.Popen('distributor -c %s' % confFile,
                                    shell=True,
                                    stdout=subprocess.PIPE,
                                    stderr=subprocess.PIPE)
        else:
            proc = subprocess.Popen('python -m PYME.cluster.distributor 1234',
                                    shell=True,
                                    stdout=subprocess.PIPE,
                                    stderr=subprocess.PIPE)

        t_log_stderr = threading.Thread(target=log_stream,
                                        args=(proc.stderr, distLogErr))
        t_log_stderr.setDaemon(False)
        t_log_stderr.start()

        t_log_stdout = threading.Thread(target=log_stream,
                                        args=(proc.stdout, distLogDbg))
        t_log_stdout.setDaemon(False)
        t_log_stdout.start()
    else:
        if not (len(sys.argv) == 2 and sys.argv[1] == '-n'):
            proc = subprocess.Popen('distributor -c %s' % confFile, shell=True)
        else:
            proc = subprocess.Popen('python -m PYME.cluster.distributor 1234',
                                    shell=True)

    ns = pyme_zeroconf.getNS('_pyme-taskdist')
    ns.register_service('PYMEDistributor: ' + GetComputerName(), externalAddr,
                        int(serverPort))

    try:
        while not proc.poll():
            time.sleep(1)

            # if logfile_error:
            #     #do crude log rotation
            #     if logfile_error.tell() > 1e6:
            #         logfile_error.seek(0)
            #
            #     if logfile_debug.tell() > 1e6:
            #         logfile_debug.seek(0)

    finally:
        ns.unregister('PYMEDistributor: ' + GetComputerName())
        #try and shut down the distributor cleanly
        proc.send_signal(1)
        time.sleep(2)
        proc.kill()

        LOG_STREAMS = False
Exemplo n.º 17
0
from PYME import config
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render

from PYME.misc.computerName import GetComputerName
server_filter = config.get('dataserver-filter', GetComputerName())

# Create your views here.


def recipe_form(request):
    """stand in until we have a better recipe GUI"""
    return render(request, 'recipes/form_recipe.html',
                  {'serverfilter': server_filter})


def recipe_standalone(request):
    """This allows file selection with globs like bakeshop"""
    return render(request, 'recipes/recipe_standalone.html',
                  {'serverfilter': server_filter})


def recipe_template(request):
    """This allows file selection with globs like bakeshop"""
    return render(request, 'recipes/recipe_template.html',
                  {'serverfilter': server_filter})


def get_input_glob(request):
    from PYME.IO import clusterIO
Exemplo n.º 18
0
"""
import time
import socket
import threading
import requests
import posixpath
import sys
import numpy as np

from PYME import config

from multiprocessing.pool import ThreadPool

from PYME.misc.computerName import GetComputerName

local_computer_name = GetComputerName()

import logging

logger = logging.getLogger(__name__)

SERVICE_CACHE_LIFETIME = 1  #seconds
DIR_CACHE_TIME = 1  #seconds

import PYME.misc.pyme_zeroconf as pzc
from PYME.misc import hybrid_ns

_ns = None
_ns_lock = threading.Lock()

if config.get('clusterIO-hybridns', True):
Exemplo n.º 19
0
def main():
    global LOG_STREAMS

    op = ArgumentParser(
        description=
        "PYME rule server for task distribution. This should run once per cluster."
    )

    #NOTE - currently squatting on port 15346 for testing - TODO can we use an ephemeral port
    op.add_argument(
        '-p',
        '--port',
        dest='port',
        default=config.get('ruleserver-port', 15346),
        type=int,
        help=
        "port number to serve on (default: 15346, see also 'ruleserver-port' config entry)"
    )

    op.add_argument('-a',
                    '--advertisements',
                    dest='advertisements',
                    choices=['zeroconf', 'local'],
                    default='zeroconf',
                    help='Optionally restrict advertisements to local machine')

    args = op.parse_args()

    serverPort = args.port

    if args.advertisements == 'local':
        #bind on localhost
        bind_addr = '127.0.0.1'
    else:
        bind_addr = ''  #bind all interfaces

    #set up logging
    data_root = config.get('dataserver-root')
    if data_root:
        distr_log_dir = '%s/LOGS' % data_root

        dist_log_err_file = os.path.join(distr_log_dir, 'distributor.log')
        if os.path.exists(dist_log_err_file):
            os.remove(dist_log_err_file)

        dist_err_handler = logging.handlers.RotatingFileHandler(
            filename=dist_log_err_file, mode='w', maxBytes=1e6, backupCount=1)
        #dist_err_handler.setFormatter(logging.Formatter('%(message)s'))
        distLogErr = logging.getLogger('distributor')
        distLogErr.setLevel(logging.DEBUG)
        distLogErr.addHandler(dist_err_handler)

    proc = ruleserver.ServerThread(serverPort,
                                   bind_addr=bind_addr,
                                   profile=False)
    proc.start()
    #proc = subprocess.Popen('python -m PYME.ParallelTasks.distributor 1234', shell=True)

    if args.advertisements == 'zeroconf':
        ns = pyme_zeroconf.getNS('_pyme-taskdist')
    else:
        #assume 'local'
        ns = sqlite_ns.getNS('_pyme-taskdist')

    time.sleep(0.5)
    #get the actual adress (port) we bound to
    sa = proc.distributor.socket.getsockname()
    ns.register_service('PYMERuleServer: ' + GetComputerName(),
                        proc.externalAddr, int(sa[1]))

    try:
        while proc.is_alive():
            time.sleep(1)

    finally:
        logger.debug('trying to shut down server')
        proc.shutdown()
        ns.unregister('PYMERuleServer: ' + GetComputerName())
Exemplo n.º 20
0
def main(protocol="HTTP/1.0"):
    global GPU_STATS
    """Test the HTTP request handler class.

    This runs an HTTP server on port 8000 (or the first command line
    argument).

    """
    from optparse import OptionParser

    op = OptionParser(usage='usage: %s [options]' % sys.argv[0])

    op.add_option(
        '-p',
        '--port',
        dest='port',
        default=config.get('dataserver-port', 8080),
        help=
        "port number to serve on (default: 8080, see also 'dataserver-port' config entry)"
    )
    op.add_option('-t',
                  '--test',
                  dest='test',
                  help="Set up for bandwidth test (don't save files)",
                  action="store_true",
                  default=False)
    op.add_option('-v',
                  '--protocol',
                  dest='protocol',
                  help="HTTP protocol version",
                  default="1.1")
    op.add_option('-l',
                  '--log-requests',
                  dest='log_requests',
                  help="Display http request info",
                  default=False,
                  action="store_true")
    default_root = config.get('dataserver-root', os.curdir)
    op.add_option(
        '-r',
        '--root',
        dest='root',
        help=
        "Root directory of virtual filesystem (default %s, see also 'dataserver-root' config entry)"
        % dataserver_root,
        default=default_root)
    op.add_option('-k',
                  '--profile',
                  dest='profile',
                  help="Enable profiling",
                  default=False,
                  action="store_true")
    default_server_filter = config.get('dataserver-filter', compName)
    op.add_option(
        '-f',
        '--server-filter',
        dest='server_filter',
        help='Add a serverfilter for distinguishing between different clusters',
        default=default_server_filter)
    op.add_option(
        '--timeout-test',
        dest='timeout_test',
        help=
        'deliberately make requests timeout for testing error handling in calling modules',
        default=0)

    options, args = op.parse_args()
    if options.profile:
        from PYME.util import mProfile
        mProfile.profileOn(['HTTPDataServer.py', 'clusterListing.py'])

        profileOutDir = options.root + '/LOGS/%s/mProf' % compName

    logger.info(
        '========================================\nPYMEDataServer, running on python %s\n'
        % sys.version)

    #change to the dataserver root if given'
    logger.info('Serving from directory: %s' % options.root)
    os.chdir(options.root)

    server_address = ('', int(options.port))

    PYMEHTTPRequestHandler.protocol_version = 'HTTP/%s' % options.protocol
    PYMEHTTPRequestHandler.bandwidthTesting = options.test
    PYMEHTTPRequestHandler.timeoutTesting = options.timeout_test
    PYMEHTTPRequestHandler.logrequests = options.log_requests

    httpd = ThreadedHTTPServer(server_address, PYMEHTTPRequestHandler)
    #httpd = http.server.HTTPServer(server_address, PYMEHTTPRequestHandler)
    httpd.daemon_threads = True

    sa = httpd.socket.getsockname()

    try:
        ip_addr = socket.gethostbyname(socket.gethostname())
    except:
        ip_addr = socket.gethostbyname(socket.gethostname() + '.local')

    ns = pzc.getNS('_pyme-http')
    ns.register_service(
        'PYMEDataServer [%s]: ' % options.server_filter + procName, ip_addr,
        sa[1])

    status['IPAddress'] = ip_addr
    status['BindAddress'] = server_address
    status['Port'] = sa[1]
    status['Protocol'] = options.protocol
    status['TestMode'] = options.test
    status['ComputerName'] = GetComputerName()

    if GPU_STATS:
        try:
            pynvml.nvmlInit()
        except:
            GPU_STATS = False

    sp = statusPoller()
    sp.start()

    logger.info("Serving HTTP on %s port %d ..." % (ip_addr, sa[1]))
    try:
        httpd.serve_forever()
    finally:
        logger.info('Shutting down ...')
        httpd.shutdown()
        httpd.server_close()

        if options.profile:
            mProfile.report(display=False, profiledir=profileOutDir)

        sp.stop()

        if GPU_STATS:
            pynvml.nvmlShutdown()

        sys.exit()
Exemplo n.º 21
0
def main(protocol="HTTP/1.0"):
    global GPU_STATS
    """Test the HTTP request handler class.

    This runs an HTTP server on port 8000 (or the first command line
    argument).

    """
    from optparse import OptionParser

    op = OptionParser(usage='usage: %s [options]' % sys.argv[0])

    #NOTE - currently squatting on port 15348 for testing - TODO can we use an ephemeral port?
    op.add_option(
        '-p',
        '--port',
        dest='port',
        default=config.get('dataserver-port', 15348),
        help=
        "port number to serve on (default: 15348, see also 'dataserver-port' config entry)"
    )
    op.add_option('-t',
                  '--test',
                  dest='test',
                  help="Set up for bandwidth test (don't save files)",
                  action="store_true",
                  default=False)
    op.add_option('-v',
                  '--protocol',
                  dest='protocol',
                  help="HTTP protocol version",
                  default="1.1")
    op.add_option('-l',
                  '--log-requests',
                  dest='log_requests',
                  help="Display http request info",
                  default=False,
                  action="store_true")
    default_root = config.get('dataserver-root', os.curdir)
    op.add_option(
        '-r',
        '--root',
        dest='root',
        help=
        "Root directory of virtual filesystem (default %s, see also 'dataserver-root' config entry)"
        % dataserver_root,
        default=default_root)
    op.add_option('-k',
                  '--profile',
                  dest='profile',
                  help="Enable profiling",
                  default=False,
                  action="store_true")
    op.add_option('--thread-profile',
                  dest='thread_profile',
                  help="Enable thread profiling",
                  default=False,
                  action="store_true")
    default_server_filter = config.get('dataserver-filter', compName)
    op.add_option(
        '-f',
        '--server-filter',
        dest='server_filter',
        help='Add a serverfilter for distinguishing between different clusters',
        default=default_server_filter)
    op.add_option(
        '--timeout-test',
        dest='timeout_test',
        help=
        'deliberately make requests timeout for testing error handling in calling modules',
        default=0)
    op.add_option('-a',
                  '--advertisements',
                  dest='advertisements',
                  choices=['zeroconf', 'local'],
                  default='zeroconf',
                  help='Optionally restrict advertisements to local machine')

    options, args = op.parse_args()
    if options.profile:
        from PYME.util import mProfile
        mProfile.profileOn(['HTTPDataServer.py', 'clusterListing.py'])

        profileOutDir = options.root + '/LOGS/%s/mProf' % compName

    if options.thread_profile:
        from PYME.util import fProfile

        tp = fProfile.ThreadProfiler()
        #tp.profile_on(subs=['PYME/', 'http/server', 'socketserver'],outfile=options.root + '/LOGS/%s/tProf/dataserver.txt' % compName)
        tp.profile_on(subs=[
            'PYME/',
        ],
                      outfile=options.root +
                      '/LOGS/%s/tProf/dataserver.txt' % compName)

    # setup logging to file
    log_dir = '%s/LOGS/%s' % (options.root, compName)
    makedirs_safe(log_dir)

    log_file = '%s/LOGS/%s/PYMEDataServer.log' % (options.root, compName)
    fh = logging.handlers.RotatingFileHandler(filename=log_file,
                                              mode='w',
                                              maxBytes=1e6,
                                              backupCount=1)
    logger.addHandler(fh)

    logger.info(
        '========================================\nPYMEDataServer, running on python %s\n'
        % sys.version)

    #change to the dataserver root if given'
    logger.info('Serving from directory: %s' % options.root)
    os.chdir(options.root)

    if options.advertisements == 'local':
        # preference is to avoid zeroconf on clusterofone due to poor
        # performance on crowded networks
        if config.get('clusterIO-hybridns', True):
            ns = sqlite_ns.getNS('_pyme-http')
        else:
            # if we aren't using the hybridns, we are using zeroconf in clusterIO
            # TODO - warn that we might run into performance issues???
            ns = pzc.getNS('_pyme-http')
        server_address = ('127.0.0.1', int(options.port))
        ip_addr = '127.0.0.1'
    else:
        #default
        ns = pzc.getNS('_pyme-http')
        server_address = ('', int(options.port))

        try:
            ip_addr = socket.gethostbyname(socket.gethostname())
        except:
            ip_addr = socket.gethostbyname(socket.gethostname() + '.local')

    PYMEHTTPRequestHandler.protocol_version = 'HTTP/%s' % options.protocol
    PYMEHTTPRequestHandler.bandwidthTesting = options.test
    PYMEHTTPRequestHandler.timeoutTesting = options.timeout_test
    PYMEHTTPRequestHandler.logrequests = options.log_requests

    httpd = ThreadedHTTPServer(server_address, PYMEHTTPRequestHandler)
    #httpd = http.server.HTTPServer(server_address, PYMEHTTPRequestHandler)
    httpd.daemon_threads = True

    #get the actual adress (port) we bound to
    sa = httpd.socket.getsockname()
    service_name = get_service_name('PYMEDataServer [%s]' %
                                    options.server_filter)
    ns.register_service(service_name, ip_addr, sa[1])

    status['IPAddress'] = ip_addr
    status['BindAddress'] = server_address
    status['Port'] = sa[1]
    status['Protocol'] = options.protocol
    status['TestMode'] = options.test
    status['ComputerName'] = GetComputerName()

    if GPU_STATS:
        try:
            pynvml.nvmlInit()
        except:
            GPU_STATS = False

    sp = statusPoller()
    sp.start()

    logger.info("Serving HTTP on %s port %d ..." % (ip_addr, sa[1]))
    try:
        httpd.serve_forever()
    finally:
        logger.info('Shutting down ...')
        httpd.shutdown()
        httpd.server_close()

        ns.unregister(service_name)

        if options.profile:
            mProfile.report(display=False, profiledir=profileOutDir)

        if options.thread_profile:
            tp.profile_off()

        sp.stop()

        if GPU_STATS:
            pynvml.nvmlShutdown()

        try:
            from pytest_cov.embed import cleanup
            cleanup()
        except:
            pass

        sys.exit()
def main():
    op = ArgumentParser(
        description=
        "PYME node server for task distribution. This should run on every node of the cluster"
    )

    #NOTE - currently squatting on port 15347 for testing - TODO can we use an ephemeral port?
    op.add_argument(
        '-p',
        '--port',
        dest='port',
        default=conf.get('nodeserver-port', 15347),
        type=int,
        help=
        "port number to serve on (default: 15347, see also 'nodeserver-port' config entry)"
    )

    op.add_argument('-a',
                    '--advertisements',
                    dest='advertisements',
                    choices=['zeroconf', 'local'],
                    default='zeroconf',
                    help='Optionally restrict advertisements to local machine')

    args = op.parse_args()

    serverPort = args.port
    externalAddr = socket.gethostbyname(socket.gethostname())

    if args.advertisements == 'zeroconf':
        ns = pyme_zeroconf.getNS('_pyme-taskdist')
    else:
        #assume local
        ns = sqlite_ns.getNS('_pyme-taskdist')
        externalAddr = '127.0.0.1'  #bind to localhost

    #TODO - move this into the nodeserver proper so that the ruleserver doesn't need to be up before we start
    print(distribution.getDistributorInfo(ns).values())
    distributors = [
        u.lstrip('http://').rstrip('/')
        for u in distribution.getDistributorInfo(ns).values()
    ]

    #set up nodeserver logging
    cluster_root = conf.get('dataserver-root')
    if cluster_root:
        nodeserver_log_dir = os.path.join(cluster_root, 'LOGS',
                                          GetComputerName())

        #remove old log files
        try:
            os.remove(os.path.join(nodeserver_log_dir, 'nodeserver.log'))
        except OSError:  # if we cant clear out old log files, we might not have a log directory set up
            try:
                if not os.path.exists(os.path.join(nodeserver_log_dir)):
                    os.makedirs(
                        os.path.join(nodeserver_log_dir)
                    )  # NB - this will create all intermediate directories as well
            except:  # throw error because the RotatingFileHandler will fail to initialize
                raise IOError('Unable to initialize log files at %s' %
                              nodeserver_log_dir)

        try:
            shutil.rmtree(os.path.join(nodeserver_log_dir, 'taskWorkerHTTP'))
        except:
            pass

        nodeserver_log_handler = logging.handlers.RotatingFileHandler(
            os.path.join(nodeserver_log_dir, 'nodeserver.log'),
            'w',
            maxBytes=1e6,
            backupCount=0)
        nodeserverLog = logging.getLogger('nodeserver')
        nodeserverLog.setLevel(logging.DEBUG)
        nodeserver_log_handler.setLevel(logging.DEBUG)
        nodeserver_log_handler.setFormatter(formatter)
        nodeserverLog.addHandler(nodeserver_log_handler)
        nodeserverLog.addHandler(stream_handler)

        #nodeserverLog.propagate=False

    else:
        nodeserver_log_dir = os.path.join(os.curdir, 'LOGS', GetComputerName())

    proc = rulenodeserver.ServerThread(distributors[0],
                                       serverPort,
                                       externalAddr=externalAddr,
                                       profile=False)
    proc.start()

    # TODO - do we need this advertisement
    #get the actual adress (port) we bound to
    time.sleep(0.5)
    sa = proc.nodeserver.socket.getsockname()
    serverPort = int(sa[1])
    ns.register_service('PYMENodeServer: ' + GetComputerName(), externalAddr,
                        serverPort)

    time.sleep(2)
    logger.debug('Launching worker processors')
    numWorkers = conf.get('nodeserver-num_workers', cpu_count())

    workerProcs = [
        subprocess.Popen('python -m PYME.cluster.taskWorkerHTTP -s %d' %
                         serverPort,
                         shell=True,
                         stdin=subprocess.PIPE) for i in range(numWorkers - 1)
    ]

    #last worker has profiling enabled
    profiledir = os.path.join(nodeserver_log_dir, 'mProf')
    workerProcs.append(
        subprocess.Popen(
            'python -m PYME.cluster.taskWorkerHTTP -s % d -p --profile-dir=%s'
            % (serverPort, profiledir),
            shell=True,
            stdin=subprocess.PIPE))

    try:
        while proc.is_alive():
            time.sleep(1)

    finally:
        logger.info('Shutting down workers')

        try:
            ns.unregister('PYMENodeServer: ' + GetComputerName())
        except:
            pass

        for p in workerProcs:
            #ask the workers to quit (nicely)
            try:
                p.send_signal(1)
            except:
                pass

        time.sleep(2)

        for p in workerProcs:
            #now kill them off
            try:
                p.kill()
            except:
                pass

        logger.info('Shutting down nodeserver')

        proc.shutdown()
        proc.join()

        logger.info('Workers and nodeserver are shut down')

        sys.exit()
Exemplo n.º 23
0
import numpy as np
import threading
from PYME.IO import unifiedIO

if six.PY2:
    import httplib
else:
    import http.client as httplib

import socket
import os

USE_RAW_SOCKETS = True

from PYME.misc.computerName import GetComputerName
compName = GetComputerName()


def to_bytes(input):
    """
    Helper function for python3k to force urls etc to byte strings

    Parameters
    ----------
    input

    Returns
    -------

    """
Exemplo n.º 24
0
def main(protocol="HTTP/1.0"):
    """Test the HTTP request handler class.

    This runs an HTTP server on port 8000 (or the first command line
    argument).

    """
    from optparse import OptionParser

    op = OptionParser(usage='usage: %s [options] [filename]' % sys.argv[0])

    op.add_option('-p',
                  '--port',
                  dest='port',
                  default=config.get('dataserver-port', 8080),
                  help="port number to serve on")
    op.add_option('-t',
                  '--test',
                  dest='test',
                  help="Set up for bandwidth test (don't save files)",
                  action="store_true",
                  default=False)
    op.add_option('-v',
                  '--protocol',
                  dest='protocol',
                  help="HTTP protocol version",
                  default="1.1")
    op.add_option('-l',
                  '--log-requests',
                  dest='log_requests',
                  help="Display http request info",
                  default=False,
                  action="store_true")
    op.add_option('-r',
                  '--root',
                  dest='root',
                  help="Root directory of virtual filesystem",
                  default=config.get('dataserver-root', os.curdir))

    options, args = op.parse_args()

    #change to the dataserver root if given
    logger.info('Serving from directory: %s' % options.root)
    os.chdir(options.root)

    #PYMEHTTPRequestHandler.protocol_version = 'HTTP/%s' % options.protocol
    server_config['bandwidthTesting'] = options.test
    #PYMEHTTPRequestHandler.logrequests = options.log_requests

    #httpd = ThreadedHTTPServer(server_address, PYMEHTTPRequestHandler)
    ip_addr = socket.gethostbyname(socket.gethostname())
    server_address = ('', int(options.port))

    global_status['IPAddress'] = ip_addr
    global_status['BindAddress'] = server_address
    global_status['Port'] = int(options.port)
    global_status['Protocol'] = options.protocol
    global_status['TestMode'] = options.test
    global_status['ComputerName'] = GetComputerName()

    ns = pzc.getNS('_pyme-http')
    ns.register_service('PYMEDataServer: ' + procName, ip_addr,
                        int(options.port))

    print("Serving HTTP on %s port %d ..." % (ip_addr, options.port))

    #wsgiref_server(options)
    cherrypy_server(options)
Exemplo n.º 25
0
def main():

    #prof = fProfile.thread_profiler()
    #prof.profileOn('.*PYME.*|.*zeroconf.*', 'ruleserver_prof.txt')
    #mProfile.profileOn(['rulenodeserver.py', 'zeroconf.py'])

    confFile = os.path.join(conf.user_config_dir, 'nodeserver.yaml')
    with open(confFile) as f:
        config = yaml.load(f)

    serverAddr, serverPort = config['nodeserver']['http_endpoint'].split(':')
    externalAddr = socket.gethostbyname(socket.gethostname())

    print(distribution.getDistributorInfo().values())

    distributors = [
        u.lstrip('http://').rstrip('/')
        for u in distribution.getDistributorInfo().values()
    ]

    #set up nodeserver logging
    cluster_root = conf.get('dataserver-root')
    if cluster_root:
        nodeserver_log_dir = os.path.join(cluster_root, 'LOGS',
                                          GetComputerName())

        #remove old log files
        try:
            os.remove(os.path.join(nodeserver_log_dir, 'nodeserver.log'))
        except OSError:  # if we cant clear out old log files, we might not have a log directory set up
            try:
                if not os.path.exists(os.path.join(nodeserver_log_dir)):
                    os.makedirs(
                        os.path.join(nodeserver_log_dir)
                    )  # NB - this will create all intermediate directories as well
            except:  # throw error because the RotatingFileHandler will fail to initialize
                raise IOError('Unable to initialize log files at %s' %
                              nodeserver_log_dir)

        try:
            shutil.rmtree(os.path.join(nodeserver_log_dir, 'taskWorkerHTTP'))
        except:
            pass

        nodeserver_log_handler = logging.handlers.RotatingFileHandler(
            os.path.join(nodeserver_log_dir, 'nodeserver.log'),
            'w',
            maxBytes=1e6,
            backupCount=0)
        nodeserverLog = logging.getLogger('nodeserver')
        nodeserverLog.setLevel(logging.DEBUG)
        nodeserver_log_handler.setLevel(logging.DEBUG)
        nodeserver_log_handler.setFormatter(formatter)
        nodeserverLog.addHandler(nodeserver_log_handler)
        nodeserverLog.addHandler(stream_handler)

        #nodeserverLog.propagate=False

    else:
        nodeserver_log_dir = os.path.join(os.curdir, 'LOGS', GetComputerName())

    #proc = subprocess.Popen('python -m PYME.cluster.rulenodeserver %s %s' % (distributors[0], serverPort), shell=True,
    #                            stdout=subprocess.PIPE, stderr=subprocess.PIPE)

    proc = rulenodeserver.ServerThread(distributors[0],
                                       serverPort,
                                       profile=False)
    proc.start()

    ns = pyme_zeroconf.getNS('_pyme-taskdist')
    ns.register_service('PYMENodeServer: ' + GetComputerName(), externalAddr,
                        int(serverPort))

    time.sleep(2)
    logger.debug('Launching worker processors')
    numWorkers = config.get('nodeserver-num_workers', cpu_count())

    workerProcs = [
        subprocess.Popen('python -m PYME.cluster.taskWorkerHTTP',
                         shell=True,
                         stdin=subprocess.PIPE) for i in range(numWorkers - 1)
    ]

    #last worker has profiling enabled
    profiledir = os.path.join(nodeserver_log_dir, 'mProf')
    workerProcs.append(
        subprocess.Popen('python -m PYME.cluster.taskWorkerHTTP -p %s' %
                         profiledir,
                         shell=True,
                         stdin=subprocess.PIPE))

    try:
        while proc.is_alive():
            time.sleep(1)

    finally:
        logger.info('Shutting down workers')

        try:
            ns.unregister('PYMENodeServer: ' + GetComputerName())
        except:
            pass

        for p in workerProcs:
            #ask the workers to quit (nicely)
            try:
                p.send_signal(1)
            except:
                pass

        time.sleep(2)

        for p in workerProcs:
            #now kill them off
            try:
                p.kill()
            except:
                pass

        logger.info('Shutting down nodeserver')
        #try:
        proc.shutdown()
        proc.join()
        #except:
        #    pass

        logger.info('Workers and nodeserver are shut down')

        #mProfile.report()
        #prof.profileOff()

        sys.exit()