Beispiel #1
0
 def __init__(self):
     # if any jobs marked in run state when scheduler starts 
     # replace their state with X to mark that they have been shutdown
     db = DAL(config.uri, auto_import=True, migrate=False, 
              folder=config.dbdir)
     myset = db(db.jobs.state == 'R')
     myset.update(state='X')
     db.commit()
     self.sem = BoundedSemaphore(config.np) 
     self.mutex = Lock()
Beispiel #2
0
class SessionManager(object):
    _prepared_statement_cache = {}
    _multiprocess_lock = BoundedSemaphore(4)

    @classmethod
    def create_pool(cls, cluster, keyspace, consistency_level=None, fetch_size=None,
                    default_timeout=None, process_count=None):
        # cls.__pool = Pool(processes=process_count, initializer=cls._setup,
        #                   initargs=(cluster, keyspace, consistency_level, fetch_size, default_timeout))
        cls._setup(cluster, keyspace, consistency_level, fetch_size, default_timeout)

    @classmethod
    def _setup(cls, cluster, keyspace, consistency_level, fetch_size, default_timeout):
        cls.cluster = cluster
        with cls._multiprocess_lock:
            cls.__session = cls.cluster.connect(keyspace)
        cls.__session.row_factory = tuple_factory
        if consistency_level is not None:
            cls.__session.default_consistency_level = consistency_level
        if fetch_size is not None:
            cls.__session.default_fetch_size = fetch_size
        if default_timeout is not None:
            cls.__session.default_timeout = default_timeout
        cls._prepared_statement_cache = {}

    @classmethod
    def prepare(cls, statement):
        if statement not in cls._prepared_statement_cache:
            cls._prepared_statement_cache[statement] = cls.__session.prepare(statement)
        return cls._prepared_statement_cache[statement]

    def close_pool(self):
        self.pool.close()
        self.pool.join()

    @classmethod
    def get_query_columns(cls, table):
        # grab the column names from our metadata
        cols = cls.cluster.metadata.keyspaces[cls.__session.keyspace].tables[table].columns.keys()
        cols = map(_clean_column_name, cols)
        unneeded = ['subsite', 'node', 'sensor', 'method']
        cols = [c for c in cols if c not in unneeded]
        return cols

    @classmethod
    def execute(cls, *args, **kwargs):
        return cls.__session.execute(*args, **kwargs)

    @classmethod
    def session(cls):
        return cls.__session

    @classmethod
    def pool(cls):
        return cls.__pool
Beispiel #3
0
 def __init__(self):
     # if any jobs marked in run state when scheduler starts
     # replace their state with X to mark that they have been shutdown
     db = DAL(config.uri, auto_import=True, migrate=False, folder=config.dbdir)
     myset = db(db.jobs.state == STATE_RUN)
     myset.update(state=STATE_STOPPED)
     db.commit()
     self.sem = BoundedSemaphore(config.np)
     self.mutex = Lock()
     # set time zone
     try:
         os.environ['TZ'] = config.time_zone
         time.tzset()
     except: pass
Beispiel #4
0
def analyze_eula(eula):
    # Categories to analyse, these will be done in parallel
    categories = [
        formal.Formal, procedural.Procedural, substantive.Substantive
    ]

    # Create a semaphore to limit number of running processes
    running = BoundedSemaphore(int(os.getenv('analyze_max_threads', 1)))

    # We cannot return variables from threads, so instead create managed dictionary to pass objects back through
    ret_vars = Manager().dict()

    # Create a process declaration for each category in the above array
    processes = []
    for cat in categories:
        # Allocate a space in the dictionary for their return values
        ret_vars[cat.__name__.lower()] = None
        # Describe the process, giving the eula (us), the semaphore, and the return dict
        processes.append(
            Process(target=cat_score, args=(eula, cat, ret_vars, running)))

    # Start processes in order of above array
    for process in processes:
        # Start process once sempahore aquired
        process.start()

    # Join each process so we don't exit until all are done
    for process in processes:
        process.join()

    # De-parallelize dict now that we are done
    ret_vars = ret_vars.copy()

    # Calculate overall score by summing the weighted score of each category then dividing by number of categories
    # i.e. simple average
    overall_score = int(
        sum(map(lambda x: x['weighted_score'], ret_vars.values())) /
        len(ret_vars))
    grades = ['F', 'D', 'C', 'B', 'A']

    return {
        'title': eula.title,
        'url': eula.url,
        'overall_score': overall_score,
        'overall_grade': grades[overall_score],
        'categories': ret_vars
    }
Beispiel #5
0
def measure(args):
    procs = []
    sema = BoundedSemaphore(int(args.procs))

    for proc in range(args.procs):

        sema.acquire()
        p = Process(target=readTiles, args=(args, proc, sema))
        p.start()
        procs.append((p, dir))
        if args.verbosity > 1:
            print("start {}".format(proc), file=sys.stdout)

    for p in procs:
        p[0].join()
        if args.verbosity > 1:
            print("join {}".format(p), file=sys.stdout)
Beispiel #6
0
def main_threaded(iniconfig):
    semaphore = BoundedSemaphore(CONCURRENCY_LIMIT)
    tasks = []
    for appid in iniconfig:
        section = iniconfig[appid]
        task = Thread(target=checker, args=(section, appid, semaphore))
        tasks.append(task)
        task.start()

    try:
        for t in tasks:
            t.join()
    except KeyboardInterrupt:
        for t in tasks:
            if hasattr(t, 'terminate'):  # multiprocessing
                t.terminate()
        print 'Validation aborted.'
        sys.exit(1)
Beispiel #7
0
def measure(args):

    procs=[]
    sema=BoundedSemaphore(int(args.procs))

    inputf = file(args.tiles,'r')
    for line in inputf:
        s = "curl -s 'http://35.203.177.233/fcgi-bin/iipsrv.fcgi?DeepZoom='"+args.slide+line.rstrip()+">/dev/null"
#        print (line.rstrip())
#        os.system(s)
        sema.acquire()
        p = Process(target = readTile, args = (s,sema))
        p.start()
        procs.append((p,dir))
            
    for p in procs:
        p[0].join()

    inputf.close()
Beispiel #8
0
def main(concurrency, dsn, jobs, file_input, file_output, skip_header):
    """Console script for timescale_bench."""
    exit_code = 0
    exec_sem = BoundedSemaphore(jobs)
    worker_queues = [WorkerQueue(dsn, exec_sem) for i in range(concurrency)]

    reader = csv.reader(file_input)
    if file_output:
        writer = csv.writer(file_output)
    if skip_header:
        next(reader, None)

    try:
        results = list(flush_batch(reader, worker_queues))
        if not results:
            raise RuntimeError('No input provided')
        if file_output:
            for query_result in results:
                for result in query_result.results:
                    writer.writerow(result)
        timing = dict(
            total=sum((res.duration for res in results)),
            shortest=min((res.duration for res in results)),
            median=statistics.median_high((res.duration for res in results)),
            avg=statistics.mean((res.duration for res in results)),
            longest=max((res.duration for res in results)),
        )
        # Convert to timedelta for display
        timing = {
            key: timedelta(seconds=value)
            for key, value in timing.items()
        }
        click.echo(f"Number of queries processed: {len(results)}")
        click.echo(f"      Total processing time: {timing['total']}")
        click.echo(f"        Shortest query time: {timing['shortest']}")
        click.echo(f"          Median query time: {timing['median']}")
        click.echo(f"               Average time: {timing['avg']}")
        click.echo(f"         Longest query time: {timing['longest']}")
    except Exception as err:
        click.echo(f'Failure: {err}')
        exit_code = 1
    finally:
        return exit_code
Beispiel #9
0
    def __init__(self, maxsize=0):
        if maxsize <= 0:
            # Can raise ImportError (see issues #3770 and #23400)
            from multiprocessing.synchronize import SEM_VALUE_MAX as maxsize
        self._maxsize = maxsize
        self._reader, self._writer = connection.Pipe(duplex=False)
        self._rlock = Lock()
        self._opid = os.getpid()
        if sys.platform == 'win32':
            self._wlock = None
        else:
            self._wlock = Lock()
        self._sem = BoundedSemaphore(maxsize)
        # For use by concurrent.futures
        self._ignore_epipe = False

        self._after_fork()

        if sys.platform != 'win32':
            register_after_fork(self, Queue._after_fork)
Beispiel #10
0
def measure(args, ranges):
    procs = []
    sema = BoundedSemaphore(int(args.procs))

    next = 0
    left = len(ranges)
    batch = (left / args.procs) + 1

    for proc in range(args.procs):

        sema.acquire()
        counts = max(min(left, batch), 0)
        if args.verbosity > 1:
            print("{} {} {}".format(proc, next, counts))
        p = Process(target=readRanges, args=(args, ranges, next, counts, sema))
        p.start()
        procs.append((p, dir))
        next += batch
        left -= batch

    for p in procs:
        p[0].join()
        if args.verbosity > 1:
            print("join {}".format(p), file=sys.stdout)
Beispiel #11
0
                sense.show_message(humidity, scroll_speed = 0.1,text_colour=[100,150,150])
                num = 1
            elif(event.direction[:1] == "m" and num == 0):
                sense.show_message("Bye", scroll_speed = 0.1,text_colour=[100,150,150])
                num = 1
                isRunning = False
                t1.terminate()
                semaphore.release()
                
            elif(num == 1):
                num = 0
                semaphore.release()
                time.sleep(1)
                                          
    sense.clear()

c = getMatrix()
sense = SenseHat()
sense.clear()
sense.set_rotation(180)
sense.low_light = True
semaphore = BoundedSemaphore(value=1)
t1 = Process(name="getColor",target=getColor)
t2 = Process(name="main",target=myMainLogic)
#t1.daemon = True
#t2.daemon = True
t1.start()
t2.start()
        
        
    def __init__(self,
                 source: str,
                 *,
                 n_consumers: int = 0,
                 should_start: bool = True,
                 show_pipeline: bool = True,
                 image_mask_enabled: bool = True,
                 debug: bool = False):
        """
    Declares instance variables (_show_pipeline, _debug, _capture) and starts the pipeline according to should_start

    :param source: the filename or device that the pipeline should be run on
    :param should_start: a flag indicating whether or not the pipeline should start as soon as it is instantiated
    :param show_pipeline: a flag indicating whether or not each step in the pipeline should be shown
    :param debug: a flag indicating whether or not the use is debugging the pipeline. In debug, the pipeline is
                  shown and debug statements are enabled
    """

        # call superclass constructor
        super().__init__()

        # initialize instance variables

        # private - use property accessor
        self._source = source
        self._frame = None
        self._name = self.__class__.__name__
        self._fps = None
        self._image_mask_enabled = image_mask_enabled
        self._debug = debug
        self._show_pipeline = show_pipeline or self._debug
        self._show_pipeline_steps = settings.display.show_pipeline_steps
        self._knots = []

        screen_dimensions = (settings.window.height, settings.window.width,
                             NUM_IMAGE_CHANNELS)
        # protected
        self._screen = numpy.zeros(screen_dimensions, numpy.uint8)
        self._visualizer = None
        self._region_of_interest = None
        self._capture = None

        self._n_consumers = n_consumers  # the number of objects consuming the result of this pipeline
        if self._n_consumers > 0:
            # stores the currently detected lane - must be a queue as the detected result is accessed from a seperate process
            self._lanes_queue = Queue()
            self.__n_consuming = 0  # tracks the number of consumers currently 'consuming' the detected result
            self.__n_consuming_mutex = Lock(
            )  # blocks access to self.__n_consuming
            # used to block pipeline from running while consumers are
            self.__consumer_semaphore = BoundedSemaphore(1)
            # used to prevent multiple iterations of pipeline from running before being consumed
            self.__producer_sempahore = BoundedSemaphore(1)

        # private - only accessible by class
        self.__current_knots = [
        ]  # maybe not be filled (most likely, will be partially filled)
        self.__stop = False
        self.__paused = False
        self.__while_paused = None

        # check if the pipeline should start immediately
        if should_start and not self.is_alive():
            self.start()
Beispiel #13
0
 def __init__(self, bound, max_workers):
     self.executor = ThreadPoolExecutor(max_workers=max_workers)
     self.semaphore = BoundedSemaphore(bound + max_workers)
Beispiel #14
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import math, os, sys, random
from multiprocessing import Process, BoundedSemaphore, Value, Array

INBUF = []
OUTBUF = None
sem = BoundedSemaphore(8)


def f(x, N):
    return x * N - x * (x - 1) // 2


def single_test(INBUF, OUTBUF, INDEX):
    sem.acquire()
    result = 0
    N, M, data = INBUF[INDEX]

    enter = {}
    leave = {}
    events = set()

    cost = 0

    for d in data:
        cost += d[2] * f(d[1] - d[0], N)
        cost %= 1000002013
        if d[0] not in events: events.add(d[0])
        if d[0] not in enter: enter[d[0]] = d[2]
Beispiel #15
0
from structlog import get_logger

import cassandra.cluster
import cassandra.auth
from cassandra.query import  BatchStatement, tuple_factory

from multiprocessing import BoundedSemaphore
from data.cassandra_wrapper.model import FIELDS_Quote, FIELDS_Trades, FIELDS_Trades_min, FIELDS_Trades_over_under, \
    FIELDS_Trades_over_under_basic
from common import singleton, get_config, process_singleton

MAX_PARALLEL_QUERIES = 256

QUOTE_SAMPLINGS = ('raw', 'sec', 'sec_shift', 'min', 'hr')
MAX_BATCH_SIZE = 10
_query_parallel_sema = BoundedSemaphore(MAX_PARALLEL_QUERIES)

_cassandra_enabled = True


@process_singleton
def get_cassandra_session():
    global _cassandra_enabled

    config = get_config()

    hostname = config.get('cassandra', 'hostname')
    username = config.get('cassandra', 'username')
    password = config.get('cassandra', 'password')
    keyspace = config.get('cassandra', 'keyspace')
Beispiel #16
0
def non_critic_section():
    p = current_process()
    for i in range(N_non_critic):
        print p.name, "in non critic section", "(%i/%i)" % (i, N_non_critic)
        delay()


def critic_section():
    p = current_process()
    for i in range(N_critic):
        print p.name, "in CRITIC section", "(%i/%i)" % (i, N_critic - 1)
        delay()


def task(semaphore):
    non_critic_section()
    semaphore.acquire()
    critic_section()
    semaphore.release()


if __name__ == '__main__':
    names = ["Ana", "Eva", "Pi", "Pam", "Pum"]
    jobs = []
    K = 2
    semaphore = BoundedSemaphore(K)
    for x in names:
        jobs.append(Process(target=task, name=x, args=(semaphore, )))
    for p in jobs:
        p.start()
 def __init__(self, max_workers):
     self.max_workers = max_workers
     self.lock = Lock()
     self.sem = BoundedSemaphore(max_workers)
     self.last_request = Value('d', 0.0)
     self.last_restricted_request = Value('d', 0.0)
import subprocess
import os
import sys
from multiprocessing import BoundedSemaphore

# To share 'semaphore' among multiple workers on gunicorn, use '--preload' option
semaphore = BoundedSemaphore(int(os.environ.get('APP_WEBPACK_LIMIT', '2')))


class _WPManager():
    """
    Used in 'WebpackManager'.
    """
    def __init__(self):
        self.semaphore = semaphore

    def run(self, folder, instance_path, fname):

        if not self.semaphore.acquire(False):
            return False

        # Parent process: return 'True' immediately if fork is succeeded
        # Child process : executes 'webpack', releases semaphore and exits
        try:
            pid = os.fork()
            if pid == 0:  # Child
                (cwd, cmd) = self.gen_command(instance_path)
                my_env = self.gen_exec_env(folder, fname)
                proc = subprocess.Popen(cmd, env=my_env, cwd=cwd)
                outs, errs = proc.communicate()
            else:
Beispiel #19
0
#!/usr/bin/env python

from multiprocessing import Pool, BoundedSemaphore
import sys
import time

import cassandra
from cassandra.cluster import Cluster
from cassandra.query import tuple_factory, SimpleStatement

lock = BoundedSemaphore(6)

class QueryManager(object):

    batch_size = 10

    def __init__(self, cluster, process_count=None):
        self.pool = Pool(processes=process_count, initializer=self._setup, initargs=(cluster,))

    @classmethod
    def _setup(cls, cluster):
        with lock:
            cls.session = cluster.connect('ooi')
            cls.session.row_factory = tuple_factory
            print 'worker ready'

    def close_pool(self):
        self.pool.close()
        self.pool.join()

    def get_results(self, query, params):