Beispiel #1
0
class MKDockerClient(docker.DockerClient):
    '''a docker.DockerClient that caches containers and node info'''
    API_VERSION = "auto"
    _DEVICE_MAP_LOCK = multiprocessing.Lock()

    def __init__(self, config):
        super(MKDockerClient,
              self).__init__(config['base_url'],
                             version=MKDockerClient.API_VERSION)
        all_containers = self.containers.list(all=True)
        if config['container_id'] == "name":
            self.all_containers = dict([(c.attrs["Name"].lstrip('/'), c)
                                        for c in all_containers])
        elif config['container_id'] == "long":
            self.all_containers = dict([(c.attrs["Id"], c)
                                        for c in all_containers])
        else:
            self.all_containers = dict([(c.attrs["Id"][:12], c)
                                        for c in all_containers])
        self._env = {"REMOTE": os.getenv("REMOTE", "")}
        self._container_stats = {}
        self._device_map = None
        self.node_info = self.info()

    def device_map(self):
        with self._DEVICE_MAP_LOCK:
            if self._device_map is not None:
                return self._device_map

            self._device_map = {}
            for device in os.listdir('/sys/block'):
                with open('/sys/block/%s/dev' % device) as handle:
                    self._device_map[handle.read().strip()] = device

        return self._device_map

    @staticmethod
    def iter_socket(sock, descriptor):
        '''iterator to recv data from container socket
        '''
        header = sock.recv(8)
        while header:
            actual_descriptor, length = struct.unpack('>BxxxL', header)
            while length:
                data = sock.recv(length)
                length -= len(data)
                LOGGER.debug("Received data: %r", data)
                if actual_descriptor == descriptor:
                    yield data
            header = sock.recv(8)

    def get_stdout(self, exec_return_val):
        '''read stdout from container process
        '''
        if isinstance(exec_return_val, tuple):
            # it's a tuple since version 3.0.0
            exit_code, sock = exec_return_val
            if exit_code not in (0, None):
                return ''
        else:
            sock = exec_return_val

        return ''.join(self.iter_socket(sock, 1))

    def run_agent(self, container):
        '''run checkmk agent in container'''
        result = container.exec_run(['check_mk_agent'],
                                    environment=self._env,
                                    socket=True)
        return self.get_stdout(result)

    def get_container_stats(self, container_key):
        '''return cached container stats'''
        try:
            return self._container_stats[container_key]
        except KeyError:
            pass

        container = self.all_containers[container_key]
        if not container.status == "running":
            return self._container_stats.setdefault(container_key, None)

        stats = container.stats(stream=False)
        return self._container_stats.setdefault(container_key, stats)
Beispiel #2
0
 def __init__(self, fd):
     self.reader = multiprocessing.connection.Connection(fd, writable=False)
     self.rlock = multiprocessing.Lock()
Beispiel #3
0
from utils import *
from trainer import Trainer
from model import *
from sklearn.preprocessing import LabelBinarizer
import numpy as np
import multiprocessing as mp
import time
from sklearn.metrics import confusion_matrix

lock = mp.Lock()
counter = mp.Value('i', 0)


def main():
    # load 參數
    config = get_args()
    # load 資料
    X_train, y_train, X_val, y_val, X_test, y_test = load_data(config)
    # normalize X
    X_train, X_val, X_test = np.array(X_train) / 255, np.array(
        X_val) / 255, np.array(X_test) / 255
    # 把 y 的 string 做成 one hot encoding 形式
    label_encoder = LabelBinarizer()
    y_train_onehot = label_encoder.fit_transform(y_train)
    y_val_onehot = label_encoder.transform(y_val)
    y_test_onehot = label_encoder.transform(y_test)
    # 印一些有的沒的
    print('X_train size:', len(X_train))
    max_x = 0
    for x in X_train:
        if max_x < len(x):
Beispiel #4
0
 def __init__(self):
     super().__init__()
     self.note = Note("夜不语诡异档案")
     self.connection = database.open_connection()
     self.lock = mp.Lock()
Beispiel #5
0
    with lock:
        fs = open(f, 'a+')
        n = 10
        while n > 1:
            fs.write("Lockd acquired via with\n")
            n -= 1
        fs.close()


def worker_no_with(lock, f):
    lock.acquire()
    try:
        fs = open(f, 'a+')
        n = 10
        while n > 1:
            fs.write("Lock acquired directly\n")
            n -= 1
        fs.close()
    finally:
        lock.release()


if __name__ == '__main__':
    lock = multiprocessing.Lock()
    f = 'file.txt'
    w = multiprocessing.Process(target=worker_with, args=(lock, f))
    nw = multiprocessing.Process(target=worker_no_with, args=(lock, f))
    w.start()
    nw.start()
    print "end!"
Beispiel #6
0
class BaseStore(MutableMapping):
    """The Base class for Daemon stores"""

    _kind = ''
    _status_model = StoreStatus
    _lock = multiprocessing.Lock()

    def __init__(self):
        self._logger = JinaLogger(self.__class__.__name__, **vars(jinad_args))
        self.status = self.__class__._status_model()

    def add(self, *args, **kwargs) -> DaemonID:
        """Add a new element to the store. This method needs to be overridden by the subclass


        .. #noqa: DAR101"""
        raise NotImplementedError

    def update(self, *args, **kwargs) -> DaemonID:
        """Updates the element to the store. This method needs to be overridden by the subclass


        .. #noqa: DAR101"""
        raise NotImplementedError

    def delete(self, *args, **kwargs) -> DaemonID:
        """Deletes an element from the store. This method needs to be overridden by the subclass


        .. #noqa: DAR101"""
        raise NotImplementedError

    def clear(self) -> None:
        """Deletes an element from the store. This method needs to be overridden by the subclass


        .. #noqa: DAR101"""
        raise NotImplementedError

    def __iter__(self):
        return iter(self.status.items)

    def __len__(self):
        return len(self.status.items)

    def __repr__(self) -> str:
        return str(self.status.dict())

    def keys(self) -> Sequence['DaemonID']:
        """Get keys in the store

        :return: Keys in the local store
        """

        return self.status.items.keys()

    def values(self) -> Sequence[Union['WorkspaceItem', 'ContainerItem']]:
        """Get values in the store

        :return: Values in the local store
        """

        return self.status.items.values()

    def items(
        self,
    ) -> Sequence[Tuple['DaemonID', Union['WorkspaceItem', 'ContainerItem']]]:
        """Get items in the store

        :return: Items in the local store
        """

        return self.status.items.items()

    def __getitem__(self,
                    key: DaemonID) -> Union['WorkspaceItem', 'ContainerItem']:
        """Fetch a Container/Workspace object from the store

        :param key: the key (DaemonID) of the object
        :return: the value of the object
        """
        return self.status.items[key]

    def __setitem__(self, key: DaemonID, value: StoreItem) -> None:
        """Add a Container/Workspace object to the store

        :param key: the key (DaemonID) of the object
        :param value: the value to be assigned
        """
        self.status.items[key] = value
        self.status.num_add += 1
        self.status.time_updated = datetime.now()

    def __delitem__(self, key: DaemonID) -> None:
        """Release a Container/Workspace object from the store

        :param key: the key (DaemonID) of the object


        .. #noqa: DAR201"""
        self.status.items.pop(key)
        self.status.num_del += 1
        self.status.time_updated = datetime.now()

    def __setstate__(self, state: Dict):
        self._logger = JinaLogger(self.__class__.__name__, **vars(jinad_args))
        now = datetime.now()
        self.status = self._status_model(**state)
        self.status.time_updated = now

    def __getstate__(self) -> Dict:
        return self.status.dict()

    @classmethod
    def dump(cls, func) -> Callable:
        """Dump store as a pickle to local workspace

        :param func: function to be wrapped
        :return: decorator for dump
        """
        def wrapper(self, *args, **kwargs):
            r = func(self, *args, **kwargs)
            filepath = os.path.join(__root_workspace__, f'{self._kind}.store')
            if Path(filepath).is_file():
                shutil.copyfile(filepath, f'{filepath}.backup')
            with open(filepath, 'wb') as f:
                pickle.dump(self, f)
            return r

        return wrapper

    @classmethod
    def load(cls) -> 'BaseStore':
        """Load store from a pickle in local workspace

        :return: Store from local or empty store
        """

        filepath = os.path.join(__root_workspace__, f'{cls._kind}.store')
        if Path(filepath).is_file() and os.path.getsize(filepath) > 0:
            with open(filepath, 'rb') as f:
                return pickle.load(f)
        else:
            return cls()

    def reset(self) -> None:
        """Calling :meth:`clear` and reset all stats """

        self.clear()
        self.status = self._status_model()
Beispiel #7
0
 def __init__(self):
     self.lock = multiprocessing.Lock()
Beispiel #8
0
 def __init__(self):
     self.cpu = 0.
     self.wall = 0.
     self.lock = multiprocessing.Lock()
Beispiel #9
0
def fill_samples(
    meta: Meta,
    dataset: Dataset,
    max_samples=SAMPLES_PER_LANGUAGE,
    n_jobs=-1,
    filter_lang: T.Optional[str] = None,
    query_extra: T.Optional[str] = None,
):
    if n_jobs == -1:
        n_jobs = multiprocessing.cpu_count()

    q_req = multiprocessing.Queue(maxsize=20 * 400)
    q_resp = multiprocessing.Queue(maxsize=20 * 400)
    lock = multiprocessing.Lock()
    workers = []
    for n in range(1, n_jobs + 1):
        p = multiprocessing.Process(
            name="harvest-worker-%d" % n,
            target=_worker,
            args=(meta, dataset, lock, q_req, q_resp),
        )
        p.start()
        workers.append(p)

    # Search repos
    needed_samples = count_missing_samples(meta, dataset, max_samples)
    if filter_lang:
        needed_samples = {filter_lang: needed_samples[filter_lang]}
    seen_repos = get_seen_repos()
    all_repos = {}
    for lang, n_need in needed_samples.items():
        if not n_need:
            continue
        all_repos[lang] = search_repos(lang=lang,
                                       n=n_need * 5,
                                       query_extra=query_extra,
                                       seen_repos=seen_repos)
        random.shuffle(all_repos[lang])

    while True:
        n_added = 0
        for lang, n_need in needed_samples.items():
            if not n_need:
                continue
            repos = [r for r in all_repos[lang] if r not in seen_repos]
            repos_next, repos_remaining = repos[:n_need], repos[n_need:]
            logger.info(f"trying to get {len(repos_next)} repos for {lang}")
            for repo in repos_next:
                seen_repos.add(repo)
                q_req.put((repo, lang))
                n_added += 1
        if n_added == 0:
            logger.info("no more jobs to add")
            break
        for _ in range(n_added):
            repo, lang, ok = q_resp.get()
            if ok:
                needed_samples[lang] -= 1
    q_req.close()
    q_resp.close()
    for p in workers:
        p.join()
 def __init__(self):
     self._log = multiprocessing.get_logger()
     self._lock = multiprocessing.Lock()
Beispiel #11
0
def compute_voxel_depth(xyz,
                        inner,
                        outer,
                        S2E_mat,
                        method='equivolume',
                        n_jobs=4,
                        dtype=None,
                        lock=None):
    '''
    Parameters
    ----------
    method : str
        "equivolume"
        "equidistance"

    Notes
    -----
    1. Unfortunately, dtype=np.float32 doesn't work for high density surface meshes
       (because the element area becomes zero in some locations, which is invalid). 
       Although it does work for ordinary meshes, it is unnecessary in that case.
    '''
    if isinstance(xyz, six.string_types):
        xyz = io.Mask(xyz, kind='full').xyz
    xyz = xyz.astype(dtype)
    if isinstance(S2E_mat, six.string_types):
        S2E_mat = afni.get_S2E_mat(S2E_mat, mat='S2B')
    S2E_mat = S2E_mat.astype(dtype)
    if method == 'equivolume':
        method = 'equivolume_inside'
    if lock is None:
        lock = multiprocessing.Lock()
    min_depth, max_depth = -0.2, 1.2
    n_depths = round((max_depth - min_depth) / 0.1) + 1
    alphas = np.linspace(min_depth, max_depth, n_depths, dtype=dtype)
    print('>> Compute intermediate meshes...')
    verts, faces = compute_intermediate_mesh(inner,
                                             outer,
                                             alphas,
                                             method=method,
                                             dtype=dtype)  # 234s
    n_faces = faces.shape[0]
    LPI2RAI = np.array([-1, -1, 1], dtype=dtype)
    verts = np.dot(S2E_mat[:, :3], (verts * LPI2RAI).transpose(
        0, 2, 1)).transpose(1, 2, 0) + S2E_mat[:, 3]
    face_xyz = (verts[:, faces[:, 0], :] + verts[:, faces[:, 1], :] +
                verts[:, faces[:, 2], :]).reshape(-1, 3) / 3
    print('>> Construct k-d tree...')
    # kdt = spatial.cKDTree(face_xyz.reshape(-1,3)) # 171s
    kdt = spatial.cKDTree(face_xyz)  # This is slightly more memory efficient
    depths = utils.SharedMemoryArray.zeros(
        xyz.shape[0], dtype=dtype, lock=False)  # np.zeros(xyz.shape[0])
    print('>> Compute cortical depth...')

    def compute_depth(ids, depths, xyz, kdt, verts, faces, alphas, n_faces,
                      n_depths, min_depth, max_depth):
        for k in ids:
            p = xyz[k]
            # idx = np.argmin(np.linalg.norm(p - face_xyz, axis=-1)) // faces.shape[0]
            idx = kdt.query(p)[1]  # This is like 4000x faster!
            fidx = idx % n_faces
            didx = idx // n_faces
            if didx == 0:
                depths[k] = min_depth
            elif didx == n_depths - 1:
                depths[k] = max_depth
            else:
                A, B, C = verts[didx - 1:didx + 2][:,
                                                   faces[fidx, :], :].swapaxes(
                                                       0, 1)
                N = np.cross(B - A, C - A)
                N = N / np.linalg.norm(N, axis=-1, keepdims=True)
                T = np.sum(A * N, axis=-1) - np.sum(p * N, axis=-1)
                W = np.abs(T)
                if T[0] * T[1] < 0:
                    w = W[1] / (W[0] + W[1])
                    depths[k] = w * alphas[didx - 1] + (1 - w) * alphas[didx]
                else:
                    w = W[1] / (W[2] + W[1])
                    depths[k] = w * alphas[didx + 1] + (1 - w) * alphas[didx]

    with lock:
        pc = utils.PooledCaller(pool_size=n_jobs)
        pc(
            pc.run(compute_depth, ids, depths, xyz, kdt, verts, faces, alphas,
                   n_faces, n_depths, min_depth, max_depth)
            for ids in pc.idss(len(depths)))
    return depths
Beispiel #12
0
class GmailService(IssueService):
    APPLICATION_NAME = 'Bugwarrior Gmail Service'
    SCOPES = 'https://www.googleapis.com/auth/gmail.readonly'
    DEFAULT_CLIENT_SECRET_PATH = '~/.gmail_client_secret.json'

    ISSUE_CLASS = GmailIssue
    CONFIG_PREFIX = 'gmail'
    AUTHENTICATION_LOCK = multiprocessing.Lock()

    def __init__(self, *args, **kw):
        super(GmailService, self).__init__(*args, **kw)

        self.query = self.config.get('query', 'label:Starred')
        self.login_name = self.config.get('login_name', 'me')
        self.client_secret_path = self.get_config_path(
            'client_secret_path', self.DEFAULT_CLIENT_SECRET_PATH)

        credentials_name = clean_filename(
            self.login_name if self.login_name != 'me' else self.target)
        self.credentials_path = os.path.join(
            self.config.data.path,
            'gmail_credentials_%s.json' % (credentials_name, ))
        self.gmail_api = self.build_api()

    def get_config_path(self, varname, default_path=None):
        return os.path.expanduser(self.config.get(varname, default_path))

    def build_api(self):
        credentials = self.get_credentials()
        http = credentials.authorize(httplib2.Http())
        return googleapiclient.discovery.build('gmail', 'v1', http=http)

    def get_credentials(self):
        """Gets valid user credentials from storage.

        If nothing has been stored, or if the stored credentials are invalid,
        the OAuth2 flow is completed to obtain the new credentials.

        Returns:
            Credentials, the obtained credential.
        """
        with self.AUTHENTICATION_LOCK:
            log.info('Starting authentication for %s', self.target)
            store = oauth2client.file.Storage(self.credentials_path)
            credentials = store.get()
            if not credentials or credentials.invalid:
                log.info("No valid login. Starting OAUTH flow.")
                flow = oauth2client.client.flow_from_clientsecrets(
                    self.client_secret_path, self.SCOPES)
                flow.user_agent = self.APPLICATION_NAME
                flags = oauth2client.tools.argparser.parse_args([])
                credentials = oauth2client.tools.run_flow(flow, store, flags)
                log.info('Storing credentials to %r', self.credentials_path)
            return credentials

    def get_labels(self):
        result = self.gmail_api.users().labels().list(
            userId=self.login_name).execute()
        return {label['id']: label['name'] for label in result['labels']}

    def get_threads(self):
        thread_service = self.gmail_api.users().threads()

        result = thread_service.list(userId=self.login_name,
                                     q=self.query).execute()
        return [
            thread_service.get(userId='me', id=thread['id']).execute()
            for thread in result.get('threads', [])
        ]

    def annotations(self, issue):
        sender = issue.extra['last_sender_name']
        subj = issue.extra['subject']
        issue_url = issue.get_processed_url(issue.extra['url'])
        return self.build_annotations([(sender, subj)], issue_url)

    def issues(self):
        labels = self.get_labels()
        for thread in self.get_threads():
            issue = self.get_issue_for_record(thread,
                                              thread_extras(thread, labels))
            extra = {
                'annotations': self.annotations(issue),
            }
            issue.update_extra(extra)
            yield issue
Beispiel #13
0
 def __init__(self):
     self.tlock = threading.Lock()
     self.mlock = multiprocessing.Lock()
Beispiel #14
0
def commencementdujeu(nbjoueurs):
    decklist = []
    cartesonboard = []
    keys = []
    mqverifT = []
    lockverif = mp.Lock()
    deck = mp.Queue(20)
    # On crée un deck mélangé
    for i in range(10):
        decklist.append(Cards(i + 1, "bleu"))
        decklist.append(Cards(i + 1, "rouge"))
    random.shuffle(decklist)
    # On le convertit ensuite en queue afin de pouvoir le partager à tous les process
    for card in decklist:
        deck.put(card)
    cartesonboard.append(deck.get())

    mqtouchejouee1 = sysv_ipc.MessageQueue
    mqtouchejouee2 = sysv_ipc.MessageQueue
    mqcards = sysv_ipc.MessageQueue
    mqhandsize = sysv_ipc.MessageQueue
    mqaffichage = sysv_ipc.MessageQueue

    try:
        mqtouchejouee1 = sysv_ipc.MessageQueue(201, sysv_ipc.IPC_CREX)
    except sysv_ipc.ExistentialError:
        sysv_ipc.MessageQueue(201).remove()
        mqtouchejouee1 = sysv_ipc.MessageQueue(201, sysv_ipc.IPC_CREX)
    try:
        mqtouchejouee2 = sysv_ipc.MessageQueue(202, sysv_ipc.IPC_CREX)
    except sysv_ipc.ExistentialError:
        sysv_ipc.MessageQueue(202).remove()
        mqtouchejouee2 = sysv_ipc.MessageQueue(202, sysv_ipc.IPC_CREX)
    try:
        mqcards = sysv_ipc.MessageQueue(100, sysv_ipc.IPC_CREX)
    except sysv_ipc.ExistentialError:
        sysv_ipc.MessageQueue(100).remove()
        mqcards = sysv_ipc.MessageQueue(100, sysv_ipc.IPC_CREX)
    try:
        mqhandsize = sysv_ipc.MessageQueue(300, sysv_ipc.IPC_CREX)
    except sysv_ipc.ExistentialError:
        sysv_ipc.MessageQueue(300).remove()
        mqhandsize = sysv_ipc.MessageQueue(300, sysv_ipc.IPC_CREX)
    try:
        mqaffichage = sysv_ipc.MessageQueue(400, sysv_ipc.IPC_CREX)
    except sysv_ipc.ExistentialError:
        sysv_ipc.MessageQueue(400).remove()
        mqaffichage = sysv_ipc.MessageQueue(400, sysv_ipc.IPC_CREX)

    # Initialisation des différents process (board et les joueurs)
    board = Board(deck, cartesonboard, nbjoueurs)

    for i in range(nbjoueurs):
        keys.append(i + 1)
        try:
            mqverif = sysv_ipc.MessageQueue(keys[i], sysv_ipc.IPC_CREX)
            mqverifT.append(mqverif)
        except sysv_ipc.ExistentialError:
            sysv_ipc.MessageQueue(keys[i]).remove()

    # Démarrage des process
    board.start()

    # print('Lancement de la lecture de clavier non bloquante')
    touchesjoueurs = [['a', 'z', 'e', 'r', 't', 'y', 'u', 'i', 'o'],
                      ['1', '2', '3', '4', '5', '6', '7', '8', '9']]
    try:
        kb = KBHit()
        while not deck.empty():
            if kb.kbhit():
                c = kb.getch()
                if c in touchesjoueurs[0]:
                    mqtouchejouee1.send(
                        str((touchesjoueurs[0].index(c) + 1)).encode())
                elif c in touchesjoueurs[1]:
                    mqtouchejouee2.send(c.encode())
                else:
                    print("Veuillez entrer une touche valide")
        mqcards.remove()
        mqtouchejouee1.remove()
        mqtouchejouee2.remove()
        mqhandsize.remove()
        mqaffichage.remove()
        for x in mqverifT:
            x.remove()
        os.killpg(os.getpgid(board.pid), signal.SIGTERM)

    except KeyboardInterrupt:
        print("Le grand nettoyage")
        mqcards.remove()
        mqtouchejouee1.remove()
        mqtouchejouee2.remove()
        mqhandsize.remove()
        mqaffichage.remove()
        for x in mqverifT:
            x.remove()
        os.killpg(os.getpgid(board.pid), signal.SIGTERM)
Beispiel #15
0
def parallel_try(func,
                 args=None,
                 kwargs=None,
                 nworkers=None,
                 progress=None,
                 progress_interval=0.1,
                 update_interval=0.05,
                 initializer=None,
                 join=True,
                 use_threads=False):
    """ Execute `func` in parallel until it returns a value.
        Return that value.
        
            `func`      the function to call
            `args`      the arguments to `func`
            `kwargs`    the keyword arguments to `func`
            `nworkers`  the number of worker processes (/threads) to use
            `progress`  a function to periodically call with the number of
                        tries already performed
            `progress_interval`     the approximate time interval to call
                        the `progress` function
            `update_interval`       the approximate interval in which the
                        worker threads report their status --- a higher
                        values gives better performance, but worse latency
                        (if `join` is True)
            `initializer`           called in each worker process when spawn,
                        with (args, kwargs) as arguments.  `initializer`
                        may change args, kwargs.
            `join`      specifies whether to wait for all threads to finish
            `use_threads`   specifies to use threads instead of processes. """
    def worker(c_func, c_args, c_kwargs, c_lock, c_done, c_output, c_counter,
               c_initializer):
        try:
            if c_initializer is not None:
                c_initializer(c_args, c_kwargs)
            last_update = time.time()
            iterations = 0
            while True:
                now = time.time()
                if now - last_update > update_interval:
                    last_update = now
                    with c_lock:
                        if c_done.is_set():
                            return
                        c_counter.value += iterations
                    iterations = 0
                ret = c_func(*c_args, **c_kwargs)
                iterations += 1
                if ret is None:
                    continue
                with c_lock:
                    if c_done.is_set():
                        return
                    c_output.put(ret)
                    c_done.set()
        except KeyboardInterrupt:
            pass

    if args is None:
        args = ()
    if kwargs is None:
        kwargs = {}
    if nworkers is None:
        nworkers = multiprocessing.cpu_count()
    p_done = multiprocessing.Event()
    p_lock = multiprocessing.Lock()
    p_counter = multiprocessing.RawValue('i', 0)
    p_input = multiprocessing.Queue()
    processes = []
    constr = threading.Thread if use_threads else multiprocessing.Process
    try:
        for i in xrange(nworkers):
            process = constr(target=worker,
                             args=(func, args, kwargs, p_lock, p_done, p_input,
                                   p_counter, initializer))
            processes.append(process)
            process.start()
        if progress is None:
            p_done.wait()
        else:
            while not p_done.is_set():
                p_done.wait(progress_interval)
                progress(p_counter.value)
        if join:
            for process in processes:
                process.join()
    except KeyboardInterrupt:
        for process in processes:
            process.terminate()
        raise
    return p_input.get()
class RemoteManager(object):
    NODES = {}
    LOCK = mp.Lock()
    PORT_ID = mp.Value('i', 8700)
    MASTER_IP = None
    __instance = None

    def __new__(cls):
        # Singleton
        if cls.__instance is None:
            cls.__instance = object.__new__(cls)
            cls.MASTER_IP = get_ip()
            cls.start_local_node()
        return cls.__instance

    @classmethod
    def get_master_node(cls):
        return cls.NODES[cls.MASTER_IP]

    @classmethod
    def start_local_node(cls):
        port = cls.get_port_id()
        with warning_filter():
            remote = Remote(cls.MASTER_IP, port, local=True)
        with cls.LOCK:
            cls.NODES[cls.MASTER_IP] = remote

    @classmethod
    def launch_each(cls, launch_fn, *args, **kwargs):
        for node in cls.NODES.values():
            node.submit(launch_fn, *args, **kwargs)

    @classmethod
    def get_remotes(cls):
        return list(cls.NODES.values())

    @classmethod
    def upload_files(cls, files, **kwargs):
        if isinstance(files, str):
            files = [files]
        for node in cls.NODES.values():
            node.upload_files(files, **kwargs)

    @classmethod
    def add_remote_nodes(cls, ip_addrs):
        ip_addrs = [ip_addrs] if isinstance(ip_addrs, str) else ip_addrs
        remotes = []
        for node_ip in ip_addrs:
            if node_ip in cls.NODES.keys():
                logger.warning('Already added remote {}'.format(node_ip))
                continue
            port = cls.get_port_id()
            remote = Remote(node_ip, port)
            with cls.LOCK:
                cls.NODES[node_ip] = remote
            remotes.append(remote)
        return remotes

    @classmethod
    def shutdown(cls):
        for node in cls.NODES.values():
            node.shutdown()
        cls.NODES = {}
        cls.__instance = None

    @classmethod
    def get_port_id(cls):
        with cls.LOCK:
            cls.PORT_ID.value += 1
            return cls.PORT_ID.value

    def __enter__(self):
        return self

    def __exit__(self, *args):
        for node in cls.NODES.values():
            node.shutdown()

    def __repr__(self):
        reprstr = self.__class__.__name__ + '(\n'
        for node in self.NODES.values():
            reprstr += '{}, \n'.format(node)
        reprstr += ')\n'
        return reprstr
Beispiel #17
0
#!/usr/bin/python2.7
#coding:utf-8

import multiprocessing

manager = multiprocessing.Manager()

#	targets
target_lock = multiprocessing.Lock()
# done_targets = []
# undone_targets =[]
done_targets = manager.list()
undone_targets = manager.list()
willdone_targets = manager.list()


#	main scan task

# scan_task_dict just like
# {'pid': 3280,
# 'scanID': None,
# 'subtargets': {},
# 'target': 'http://www.leesec.com/'}

scan_task_dict = {}

scan_task_dict_lock = multiprocessing.Lock()


#	for each sub scan task
Beispiel #18
0
    def __init__(self, name, config, prepped_frame_queue, mqtt_client,
                 mqtt_config, ha_config):
        self.name = name
        self.config = config
        self.detected_objects = []
        self.recent_frames = {}
        self.rtsp_url = get_rtsp_url(self.config['rtsp'])
        self.regions = self.config.get('regions', {})
        self.frame_shape = get_frame_shape(self.rtsp_url)
        self.mqtt_client = mqtt_client

        # compute the flattened array length from the shape of the frame
        flat_array_length = self.frame_shape[0] * self.frame_shape[
            1] * self.frame_shape[2]
        # create shared array for storing the full frame image data
        self.shared_frame_array = mp.Array(ctypes.c_uint8, flat_array_length)
        # create shared value for storing the frame_time
        self.shared_frame_time = mp.Value('d', 0.0)
        # Lock to control access to the frame
        self.frame_lock = mp.Lock()
        # Condition for notifying that a new frame is ready
        self.frame_ready = mp.Condition()
        # Condition for notifying that objects were parsed
        self.objects_parsed = mp.Condition()

        # shape current frame so it can be treated as a numpy image
        self.shared_frame_np = tonumpyarray(self.shared_frame_array).reshape(
            self.frame_shape)

        self.capture_process = None

        # for each region, create a separate thread to resize the region and prep for detection
        self.detection_prep_threads = []
        for region in self.config.get('regions', []):
            # set a default threshold of 0.5 if not defined
            if not 'threshold' in region:
                region['threshold'] = 0.5
            if not isinstance(region['threshold'], float):
                print('Threshold is not a float. Setting to 0.5 default.')
                region['threshold'] = 0.5
            self.detection_prep_threads.append(
                FramePrepper(self.name, self.shared_frame_np,
                             self.shared_frame_time, self.frame_ready,
                             self.frame_lock, region['size'],
                             region['x_offset'], region['y_offset'],
                             region['threshold'], prepped_frame_queue))

        # start a thread to store recent motion frames for processing
        self.frame_tracker = FrameTracker(self.shared_frame_np,
                                          self.shared_frame_time,
                                          self.frame_ready, self.frame_lock,
                                          self.recent_frames)
        self.frame_tracker.start()

        # start a thread to store the highest scoring recent person frame
        self.best_person_frame = BestPersonFrame(self.objects_parsed,
                                                 self.recent_frames,
                                                 self.detected_objects)
        self.best_person_frame.start()

        # start a thread to expire objects from the detected objects list
        self.object_cleaner = ObjectCleaner(self.objects_parsed,
                                            self.detected_objects)
        self.object_cleaner.start()

        # start a thread to publish object scores (currently only person)
        if mqtt_client:
            self.mqtt_topic_prefix = '{}/{}'.format(
                mqtt_config.get(CONF_MQTT_TOPIC, ""), self.name)
            mqtt_publisher = MqttPublisher(self.name, self.mqtt_client,
                                           self.mqtt_topic_prefix,
                                           self.objects_parsed,
                                           self.detected_objects)
            mqtt_publisher.start()
        if ha_config:
            ha_publisher = HomeAssistantPublisher(
                self.name, ha_config[CONF_HOME_ASSISTANT_URL],
                ha_config[CONF_HOME_ASSISTANT_TOKEN], self.objects_parsed,
                self.detected_objects)
            ha_publisher.start()

        # create a watchdog thread for capture process
        self.watchdog = CameraWatchdog(self)

        # load in the mask for person detection
        if 'mask' in self.config:
            self.mask = cv2.imread("/config/{}".format(self.config['mask']),
                                   cv2.IMREAD_GRAYSCALE)
        else:
            self.mask = np.zeros((self.frame_shape[0], self.frame_shape[1], 1),
                                 np.uint8)
            self.mask[:] = 255
    def plot(self):
        """
        This function will plot analysis data as a funciton of the number of images. uses multiprocessing to speed things up
        :return: void
        """
        a = analysis_concurrent(self.y_start, self.y_stop, self.x_start,
                                self.x_stop, self.selection)
        trunc_list = []
        cpu_count = 10  #multiprocessing.cpu_count()
        temp_list = []
        for i in range(0, cpu_count):

            if i == cpu_count - 1:
                temp_list = self.tif_list[(
                    i * len(self.tif_list) //
                    cpu_count):(((1 + i) * len(self.tif_list) // cpu_count) +
                                (len(self.tif_list) % cpu_count))]
                temp_list.insert(0, i)

            else:
                temp_list = self.tif_list[(i * len(self.tif_list) //
                                           cpu_count):((1 + i) *
                                                       len(self.tif_list) //
                                                       cpu_count)]
                temp_list.insert(0, i)
            trunc_list.append(temp_list)

    # print(self.check_lists(self.tif_list, trunc_list, cpu_count))

        process_list = []
        x = range(0, len(self.tif_list))
        y = []
        q = multiprocessing.Queue()
        #a = multiprocessing.Array()
        l = multiprocessing.Lock()
        #p = multiprocessing.Process(a.x_and_y_vals, args=(l,))

        for i in range(0, cpu_count):
            process_list.append(
                multiprocessing.Process(target=a.x_and_y_vals,
                                        args=(l, q, trunc_list[i])))

        for process in process_list:
            process.start()

        for process in process_list:
            y.append(q.get())
            process.join()

        # for i in range(0,cpu_count):
        #     y.append(q.get())

        y = self.selectionSort(y)
        flattened_y = [val for sublist in y for val in sublist]

        assert len(flattened_y) == len(self.tif_list)
        plt.scatter(x, flattened_y)

        plt.xlabel("file num")
        plt.ylabel(self.selection)

        #  plt.xscale()

        plt.show()
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 11 08:56:17 2018

@author: busby
"""

import multiprocessing as mp
import time


def job(v, num, l):
    l.acquire()
    for _ in range(10):
        time.sleep(0.1)
        v.value += num
        print(v.value)
    l.release()


if __name__ == '__main__':
    l = mp.Lock()
    v = mp.Value('i', 0)
    p1 = mp.Process(target=job, args=(v, 1, l))
    p2 = mp.Process(target=job, args=(v, 3, l))
    p1.start()
    p2.start()
    p1.join()
    p2.join()
Beispiel #21
0
    def __init__(self,
                 logger_manager,
                 filename=None,
                 cam_ip=None,
                 width=1920,
                 height=1200,
                 auto_expose=True,
                 auto_balance=True):

        self.logger = logger_manager.get_logger("BQ_Cam")
        self.sched_run = None
        if filename is None:
            # 设置相机模式标识。
            self.mode = 0
            # 相机模式分辨率自设,默认1920*1200
            (self.w, self.h) = (width, height)
            self.cam_ip = cam_ip
            self.auto_expose = auto_expose
            self.auto_balance = auto_balance
            self.logger.debug("网络相机模式,IP: {}, (w, h): ({}, {}).".format(
                cam_ip, self.w, self.h))
        else:
            # 设置文件模式标识。
            self.mode = 1
            # 文件模式获取分辨率
            (self.w, self.h) = gige.get_resolution(filename)
            self.logger.debug("文件读取模式: {}, (w, h): ({}, {}).".format(
                filename, self.w, self.h))

        self.width = self.w
        self.height = self.h

        # 准备进程锁和进程间共享空间。
        self.process_lock = multiprocessing.Lock()
        self.array_temp = np.ones(shape=(self.h * self.w * 3), dtype=np.ubyte)
        self.shared_array = RawArray(ctypes.c_ubyte, self.array_temp)
        self.shared_value = RawValue(ctypes.c_uint, 0)
        self.logger.debug("进程共享内存准备完毕。")

        if self.mode == 0:
            self.sched_run = schedrun.SchedRun(
                func=gige.get_frame_from_camera,
                args=(
                    self.shared_array,
                    self.shared_value,
                    self.process_lock,
                    False,
                ),
                init_func=gige.init_camera,
                init_args=(self.cam_ip, self.w, self.h, self.auto_expose,
                           self.auto_balance),
                clean_func=gige.close_camera,
                clean_args={},
                interval=0.0,
                init_interval=0.0)
        else:
            self.sched_run = schedrun.SchedRun(func=gige.get_frame_from_file,
                                               args=(
                                                   self.shared_array,
                                                   self.shared_value,
                                                   self.process_lock,
                                               ),
                                               init_func=gige.init_file,
                                               init_args=(filename, ),
                                               clean_func=gige.close_file,
                                               clean_args={},
                                               interval=0.025,
                                               init_interval=0.0)

        self.logger.debug("帧读取进程启动。")

        #while self.shared_value != 0:

        for i in range(10):
            if self.shared_value == 0:
                self.logger.debug("帧读取进程没准备好,等待100ms。")
                # 相机进程还没有准备好。
                time.sleep(0.1)

        if self.shared_value == 0:
            return None
Beispiel #22
0
The solution: Have one thread (process via multiprocessing) poll the buttons at 60 Hz
              and generate events via multiprocessing's shared queues.
              We can also do rendering in this process (Note: this is starting to sound like an X11 server ...)
"""
import multiprocessing
import time
import logging
import lcd
import random
import time

logging.basicConfig(level=logging.INFO)

LCD_WIDTH, LCD_HEIGHT = lcd.LCD_WIDTH, lcd.LCD_HEIGHT

framebuffer_lock = multiprocessing.Lock()
framebuffer = multiprocessing.Array('b', LCD_WIDTH * LCD_HEIGHT, lock=False)
framebuffer_needs_redraw = multiprocessing.Value('b')
eventqueue = multiprocessing.Queue()


def server_main():
    """
    Read input and render at a fixed frame rate of 60 Hz.
    """
    ALL_KEYS = [
        fakelcd.KEY_LEFT, fakelcd.KEY_RIGHT, fakelcd.KEY_UP, fakelcd.KEY_DOWN,
        fakelcd.KEY_CENTER
    ]
    logger = logging.getLogger('server')
    logger.info('starting up')
Beispiel #23
0
def main(args):

	# variables
	fp = None
	sequence = ""
	cline = ""
	lreads = []
	rreads = []
	lheaders = []
	header = ""
	result = 0
	seq_temp = ""
	fout_stub = args.stub
	ltemp = []
	dmap = {}
	using_map = False
	min_seq_len = 0
	made_reads = 0
	num_keys = 0
	num_threads = 0
	num_tasks = 0
	i = 0
	j = 0
	
	result = []
	
	# create result queue for parallel threads
	# processes = []
	qtasks = mp.Queue()
	qresult = mp.Queue()
	lock = mp.Lock()	

	fai = args.infile + ".fai"
	dfai = {}

	if args.pseudo_map and args.prob_map:
		sys.stderr.write("Error: you can't specify both --probability-map and --pseudo-map\n")
		return 1

	# 
	# check input file
	#
	if not file_exists(args.infile):
		sys.stderr.write("Error: input file doesn't exist!\n")
		return(1)
	#}

	#
	# check for fasta index
	#
	if not file_exists(fai):
		sys.stderr.write("> Cannot find fasta index, rebuilding now...\n")
		p1 = Popen("samtools faidx {:s}".format(args.infile).split())
		p1.wait()
	#}

	# hash fasta index
	sys.stderr.write("> Hashing fasta index...")
	fin = open(fai, "r")
	for szl in fin:
		ll = szl.strip().split("\t")
		dfai[ll[0]] = list(map(int, ll[1:]))
	#}
	fin.close()
	sys.stderr.write("done\n")

	#
	# set minimum reference length based on read length or fragment length
	if args.make_paired:
		min_seq_len = args.frag_len * 3
	else:
		min_seq_len = args.read_length * 3
	#}

	#
	# check if there is a map file
	#
	if args.map is not None or args.make_random_map > 0:
		
		if args.map is not None:
			# hash map file
			using_map = True
			fin = open(args.map, "r")
			for szl in fin:
				ll = szl.strip().split("\t")
				if len(ll) > 1:
					dmap[ll[0]] = float(ll[1])
				else:
					dmap[ll[0]] = args.coverage
				#}
			#}
	
			fin.close()
		else:
			#
			# Generate a random set of N features where N = args.make_random_map.
			#
			
			# make a new dict of fasta ref ids that are longer than the minimum
			# sequence length			
			dfai_temp = {}
			for rid in dfai.keys():
				if dfai[rid][0] > min_seq_len:
					# keep it
					dfai_temp[rid] = dfai[rid][0]
				#}
			#}
			
			if len(dfai_temp.keys()) < args.make_random_map:
				# not enough references are long enough to make the requested number of random
				# refs
				sys.stderr.write("Warning: only {:d} references are long enough to sample\n")
				args.make_random_map = len(dfai_temp.keys())
			#}
			
			# make a random set
			rset = sample(dfai_temp.keys(), args.make_random_map)
			
			dcounts = []
			if args.pseudo_map:
				dcounts = make_multinom_counts(args.total_reads, args.make_random_map)
			
			temp = sorted(dcounts)
			temp.reverse()
			dcounts = temp
			
			#print dcounts
			
			dmap = {}
			i = 0
			for rid in rset:
				if args.prob_map:
					dmap[rid] = dfai[rid][0]
					
				elif args.pseudo_map:
					if args.make_paired:
						dmap[rid] = dcounts[i]*args.read_length*2.0/dfai[rid][0]
					else:
						dmap[rid] = dcounts[i]*args.read_length*1.0/dfai[rid][0]
					i += 1
						
				else:
					dmap[rid] = args.coverage
				#}
			#}
		#}
		
		# check list against the fasta index
		for rid in dmap.keys():
			if rid in dfai:
				# check length against minimum length
				if dfai[rid][0] < min_seq_len:
					# remove feature
					dmap.pop(rid, None)
					sys.stderr.write("Warning: removing {:s} from simulation because it's too short ({:d})\n".format(rid, dfai[rid][0]))
				#}
			#}
		#}

		if args.prob_map:
			# values dmap are intended to be proportions of reads. first we have to 
			# make them sum to one and convert them to total read counts
			n_sum = 0
			for rid in dmap.keys():
				n_sum += dmap[rid]
			n_ratio = 1.0/n_sum

			for rid in dmap.keys():
				dmap[rid] = dmap[rid]*n_ratio * args.total_reads
				if dmap[rid] < 1:
					dmap[rid] = 1

				# scale by read length (or read length x2 for paired)
				if args.make_paired:
					dmap[rid] *= args.read_length*2
				else:
					dmap[rid] *= args.read_length

				# now the total base count is converted to coverage based on the
				# feature's length (from the fasta index)
				dmap[rid] = dmap[rid]/dfai[rid][0]
			#}
		#}

	elif args.make_random_map == -1:
		# for this mode all transcripts that are long enough to be sampled will be used and a semi-realistic count
		# distribution will be randomly generated.
		sys.stderr.write("> generating multinomial count distribution...")
		# first figure out which references can generate reads. this will be the dmap
		dmap = {}
		count_prob = []
		for rid in sorted(dfai.keys()):
			if dfai[rid][0] > min_seq_len:
				# keep it
				dmap[rid] = dfai[rid][0]
				count_prob.append(10**r.rnorm(1, 2.5, 1)[0]-0.01)
				if count_prob[-1] < 0:
					count_prob[-1] = 0
		
		# generate random multinomial count distribution
		count_levels = r.rmultinom(1, args.total_reads, count_prob)
		# copy count_levels into the dmap and convert to coverages (they get converted back into counts
		# in the read generating functions)
		i =0
		for rid in sorted(dmap.keys()):
			if args.make_paired:
				dmap[rid] = count_levels[i]*args.read_length*2.0/dfai[rid][0]
			else:
				dmap[rid] = count_levels[i]*args.read_length*1.0/dfai[rid][0]
			i += 1 
		
		sys.stderr.write("done\n")

	else:
		# no map file so we'll take all features
		for rid in dfai.keys():
			dmap[rid] = args.coverage
		#}
	#}
	
	#
	# split off extention from input file
#	ltemp = os.path.basename(args.infile).split(".")
#	fout_stub = ".".join(ltemp[0:(len(ltemp)-1)])

	#
	# open output file(s)
	if args.make_paired:
		fp1 = open(fout_stub + ".1.fq","w")
		fp2 = open(fout_stub + ".2.fq","w")
	else:
		fp = open(fout_stub + ".fq","w")


	sys.stderr.write("> Generating reads...\n")

	#
	# open fasta file and get to work
	#
	fpin = open(args.infile,"r")

	klist = dmap.keys()
	num_keys = len(klist)
	i = 0
	num_tasks = 0
	processes = []

	# make workers
	for i in range(args.num_threads):
		mp.Process(target=worker, args=(qtasks, qresult, lock)).start()
		
	# load up the tasks in the queue so the workers have something to do
	for rid in dmap.keys():
		if dfai[rid][0] >= min_seq_len and dmap[rid] > 0:
#			print "sending %s to workers" % rid
			# pull sequence
			sequence = fetch_ref_seq(fpin, dfai, rid).upper()
			
			# push into queue
			if args.make_paired:
				qtasks.put((make_paired_reads, (rid, sequence, args.read_length, dmap[rid], args.frag_len, args.frag_sdev, args.error_rate)))
			else:
				qtasks.put((make_reads, (rid, sequence, args.read_length, dmap[rid], args.error_rate)))
			
			# count tasks
			num_tasks += 1
	
	# retrieve results and print out
	for i in range(num_tasks):
		result = qresult.get()
#		print "received %d" % i
		if args.make_paired:
			write_paired_reads(fp1, fp2, result)
		else:
			write_reads(fp, result)
	
	# tell the processes to stop
	for i in range(args.num_threads):
#		print "stopping..."
		qtasks.put('STOP')
				
	fpin.close()

	if args.make_paired:
		fp1.close()
		fp2.close()
	else:
		fp.close()

	return 0
REQUIRED_CONFIG_KEYS = {
    'tap': ['bucket', 'start_date'],
    'target': [
        'account',
        'dbname',
        'user',
        'password',
        'warehouse',
        's3_bucket',
        'stage',
        'file_format',
    ],
}

LOCK = multiprocessing.Lock()


def tap_type_to_target_type(csv_type):
    """Data type mapping from S3 csv to Snowflake"""

    return {
        'integer': 'INTEGER',
        'number': 'FLOAT',
        'string': 'VARCHAR',
        'boolean':
        'VARCHAR',  # The guess sometimes can be wrong, we'll use varchar for now.
        'date':
        'VARCHAR',  # The guess sometimes can be wrong, we'll use varchar for now.
        'date_override':
        'TIMESTAMP_NTZ',  # Column type to use when date_override defined in YAML
MANIPULATION = "ON"  #ON/OFF								Manipulation of preferences

#	Thresholds	#
To = 0.25  #Overlap time (hours)
Tl = 5  #Off-routing (percentage)
Tm = 1  #Meal size  (kilograms)
Ta = 20  #Extra payload (percentage)
Tpm = 20  #Default perishable food travel distance (kilometers)
Tpnm = 5  #Default perishable food travel distance (kilometers)
Tnp = 100  #Default non-perishable food travel distance (kilometers)
Td = 2  #Process advance start threshold for donors (hours)
Tr = 3  #Process advance start threshold for receivers (hours)
Tw = 10  #Match acceptance window (minutes)

#   Do not change   #
LOCK = multiprocessing.Lock()  #Multiprocessing lock
CPU_COUNT = multiprocessing.cpu_count()  #Logical CPUs
MEMORY = math.ceil(psutil.virtual_memory().total / (1024.**3))  #RAM capacity
DATA_LOAD_LOCATION = os.path.dirname(sys.argv[0]) + "/"  #Data load location
DATA_STORE_LOCATION = os.path.dirname(sys.argv[0]) + "/"  #Data store location

##  Function definitions    ##


#   Print with lock    #
def print_locked(*content, sep=" ", end="\n"):

    store = DATA_STORE_LOCATION

    with open(store + "_Log_File.txt", "a") as log_file:
Beispiel #26
0
                pass
            turn_flag.value = 1
            gyro_arr = []
        if stop_key == True:
            break


#main
bus = smbus.SMBus(1)
address = 0x68  # via i2cdetect
bus.write_byte_data(address, power_mgmt_1, 0)

s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#s.connect((HOST,PORT))

mutex = mp.Lock()

distance = mp.Value("d", 0)
turn = mp.Value("i", 0)

turn_flag = mp.Value("i", 0)
dis_flag = mp.Value("i", 0)

p = mp.Process(target=get_bes, args=(mutex, distance, dis_flag))
p1 = mp.Process(target=check_turning, args=(mutex, turn, turn_flag))
p.start()
p1.start()

try:
    while True:
        #print(mp.current_process())
Beispiel #27
0
 def __init__(self, fd):
     self.writer = multiprocessing.connection.Connection(fd, readable=False)
     self.wlock = multiprocessing.Lock()
     # Why bb.event needs this I have no idea
     self.event = self
def color_trace_multi(inputs,
                      outputs,
                      colors,
                      processcount,
                      tracemasksdir,
                      quantization='mc',
                      dither=None,
                      remap=None,
                      stack=False,
                      prescale=2,
                      despeckle=2,
                      smoothcorners=1.0,
                      optimizepaths=0.2):
    """color trace input images with specified options

    inputs: list of input paths, source png files
    outputs: list of output paths, dest svg files
    colors: number of colors to quantize to, 0 for no quantization
    processcount: number of process to launch for image processing
    quantization: color quantization algorithm to use:
        - 'mc' = median-cut (default, for few colors, uses pngquant)
        - 'as' = adaptive spatial subdivision (uses imagemagick, may result in fewer colors)
        - 'nq' = neuquant (for lots of colors, uses pngnq)
    dither: dithering algorithm to use. (Remember, final output is affected by despeckle.)
        None: the default, performs no dithering
        'floydsteinberg': available with 'mc', 'as', and 'nq'
        'riemersma': only available with 'as'
    palette: source of custom palette image for color reduction (overrides
        colors and quantization)
    stack: whether to stack color traces (recommended for more accurate output)
    despeckle: supress speckles of this many pixels
    smoothcorners: corner smoothing: 0 for no smoothing, 1.334 for max
    optimizepaths: Bezier curve optimization: 0 for least, 5 for most
"""
    tmp = tempfile.mkdtemp()

    # create a two job queues
    # q1 = scaling + color reduction
    q1 = multiprocessing.JoinableQueue()
    # q2 = isolation + tracing
    q2 = multiprocessing.JoinableQueue()

    # create a manager to share the layers between processes
    manager = multiprocessing.Manager()
    layers = []
    for i in range(min(len(inputs), len(outputs))):
        layers.append(manager.list())
    # and make a lock for reading and modifying layers
    layers_lock = multiprocessing.Lock()

    # create a shared memory counter of completed and total tasks for measuring progress
    progress = multiprocessing.Value('i', 0)
    if colors is not None:
        # this is only an estimate because quantization can result in less colors
        # than in the "colors" variable. This value is corrected by q1 tasks to converge
        # on the real total.
        total = multiprocessing.Value('i', len(layers) * colors)
    elif remap is not None:
        # get the number of colors in the palette image
        palettesize = len(make_palette(remap))
        # this is only an estimate because remapping can result in less colors
        # than in the remap variable. This value is corrected by q1 tasks to converge
        # on the real total.
        total = multiprocessing.Value('i', len(layers) * palettesize)
    else:
        #argparse should have caught this
        raise Exception(
            "One of the arguments 'colors' or 'remap' must be specified")

    # create and start processes
    processes = []
    for i in range(processcount):
        p = multiprocessing.Process(target=process_worker,
                                    args=(q1, q2, progress, total, layers,
                                          layers_lock, locals()))
        p.name = "color_trace worker #" + str(i)
        p.start()
        processes.append(p)

    try:
        # so for each input and (dir-appended) output...
        for index, (i, o) in enumerate(zip(inputs, outputs)):
            verbose(i, ' -> ', o)

            # add a job to the first job queue
            q1.put({
                'input': i,
                'output': o,
                'findex': index,
                'tracemasksdir': tracemasksdir
            })

        # show progress until all jobs have been completed
        # while progress.value < total.value:
        #     sys.stdout.write("\r%.1f%%" % (progress.value / total.value * 100))
        #     sys.stdout.flush()
        #     time.sleep(0.25)

        sys.stdout.write("\rTracing complete!\n")

        # join the queues just in case progress is wrong
        q1.join()
        q2.join()
    except (Exception, KeyboardInterrupt) as e:
        # shut down subproesses
        for p in processes:
            p.terminate()
        shutil.rmtree(tmp)
        raise e

    # close all processes
    for p in processes:
        p.terminate()
Beispiel #29
0
# --*--coding:utf-8
# Author:cnn
from time import sleep
import multiprocessing

g_num = 0
# 创建进程锁
mutex = multiprocessing.Lock()


# 多任务--进程,第二种实现方式
class MutiProcess(object):
    def update(self, num):
        global g_num
        for i in range(0, num + 1):
            # 上锁
            mutex.acquire()
            g_num += i
            mutex.release()
        print(g_num)
        sleep(1)

    def main(self):
        m1 = multiprocessing.Process(target=self.update, args=(100, ))
        m2 = multiprocessing.Process(target=self.update, args=(200, ))
        m1.start()
        m2.start()


if __name__ == '__main__':
    mu = MutiProcess()
def main():

    options = get_args()

    label = options.label if options.label else options.config

    prefix = '.{}.{}'.format(options.scheduler, options.agent.lower())

    config_dir = os.path.abspath(options.config_dir)
    config_file = os.path.join(config_dir, options.config + '.sumocfg')
    additional_file = os.path.join(config_dir,
                                   label + prefix + '.additional.xml')

    log_dir = os.path.abspath(options.log_dir)
    log_file = os.path.join(log_dir, label + prefix + '.summary.log')
    trip_file = os.path.join(log_dir, label + prefix + '.trip.xml')
    tls_switch_file = os.path.join(log_dir, label + prefix + '.tlsSwitch.xml')

    if options.save_output:
        debug_file = os.path.join(log_dir, label + prefix + '.log')
        logging.basicConfig(filename=debug_file, level=logging.DEBUG)

    with open(options.info_file, 'r') as fp:
        info = json.load(fp)

    with open(additional_file, 'w') as fp:
        fp.write('<additional>')
        for tlsID in info:
            fp.write(
                '<timedEvent type="SaveTLSSwitchStates" source="{}" dest="{}"/>'
                .format(tlsID, tls_switch_file))
        fp.write('</additional>')

    sumo_server = multiprocessing.Process(target=start_sumo,
                                          args=(config_file, log_file,
                                                trip_file, additional_file,
                                                options.port))
    sumo_server.start()

    logger.debug("Loaded tlsinfo")

    sets = []
    for sample_set in options.samples:
        with open(sample_set, 'r') as fp:
            samples = pickle.load(fp)
            sets.append(samples)

    processes = []

    manager = multiprocessing.Manager()
    shared_results = manager.dict()
    shared_lock = multiprocessing.Lock()

    registry = adasco.registry.Registry()

    agent_module = importlib.import_module('adasco.agents.{}.{}'.format(
        options.scheduler, options.agent.lower()))
    agent_class = getattr(agent_module, '{}Agent'.format(options.agent))

    for tls in info:

        shared_results[tls] = manager.dict()

        Y = {int(phase): value for phase, value in info[tls]['Y'].items()}
        Gmin = {
            int(phase): value
            for phase, value in info[tls]['Gmin'].items()
        }
        Gmax = {
            int(phase): value
            for phase, value in info[tls]['Gmax'].items()
        }

        request_queue = multiprocessing.JoinableQueue()
        response_queue = multiprocessing.JoinableQueue()

        process = agent_class(
            ID=info[tls]['id'],
            phases=info[tls]['phases'],
            Y=Y,
            Gmin=Gmin,
            Gmax=Gmax,
            incoming_edges=info[tls]['incoming_edges'],
            outgoing_edges=info[tls]['outgoing_edges'],
            edge_lengths=info[tls]['edge_lengths'],
            turn_proportions=info[tls]['turn_proportions'],
            upstream_agents=info[tls]['upstream_agents'],
            startup_lost_time=info[tls]['startup_lost_time'],
            free_flow_speed=info[tls]['free_flow_speed'],
            headway=info[tls]['headway'],
            saturation_flow_rate=info[tls]['saturation_flow_rate'],
            time_resolution=info[tls]['time_resolution'],
            sampling_interval=info[tls]['sampling_interval'],
            merging_threshold=info[tls]['merging_threshold'],
            horizon_extension=info[tls]['horizon_extension'],
            extension_threshold=info[tls]['extension_threshold'],
            minimum_extension=info[tls]['minimum_extension'],
            shared_results_directory=shared_results,
            shared_lock=shared_lock,
            publish_directory=shared_results[tls],
            coordinate=options.coordinate,
            request_queue=request_queue,
            response_queue=response_queue,
            sample_count=options.sample_count,
            timelimit=options.timelimit,
            samples=sets)

        processes.append(process)

        first_phase = info[tls]['phases'][0]
        entry = adasco.registry.Entry(process, request_queue, response_queue,
                                      None, 0, Gmin[first_phase])
        registry.append(entry)

    master = adasco.master.Master(options.port, registry)
    processes.append(master)

    for process in processes:
        process.start()

    for process in processes:
        process.join()