Example #1
0
 def setUp(self):
     self.updater = None
     self.received_message = None
     self.message_count = 0
     self.lock = Lock()
Example #2
0
    )
)

def get_client():
    return qbClient(host="localhost", port=8090)

trackers = check_output(["curl -Ns https://raw.githubusercontent.com/XIU2/TrackersListCollection/master/all.txt https://ngosang.github.io/trackerslist/trackers_all_http.txt https://newtrackon.com/api/all | awk '$0'"], shell=True).decode('utf-8')
trackerslist = set(trackers.split("\n"))
trackerslist.remove("")
trackerslist = "\n\n".join(trackerslist)
get_client().application.set_preferences({"add_trackers": f"{trackerslist}"})

DOWNLOAD_DIR = None
BOT_TOKEN = None

download_dict_lock = Lock()
status_reply_dict_lock = Lock()
# Key: update.effective_chat.id
# Value: telegram.Message
status_reply_dict = {}
# Key: update.message.message_id
# Value: An object of Status
download_dict = {}
# key: rss_title
# value: [rss_feed, last_link, last_title, filter]
rss_dict = {}

AUTHORIZED_CHATS = set()
SUDO_USERS = set()
AS_DOC_USERS = set()
AS_MEDIA_USERS = set()
LISTEN_HOST = "0.0.0.0"
LISTEN_PORT = 25555  #the port you will connect to on minecraft client

TARGET_HOST = "127.0.0.1"
TARGET_PORT = 25565  #the port specified on server.properties

DEBUG = False  #if true more additional information is printed

#---------------------do not modify---------------------------#

players = 0
datacountbytes = 0
server_status = "offline"
timelefttillup = MINECRAFT_SERVER_STARTUPTIME
lock = Lock()
stopinstances = 0


def stop_empty_minecraft_server():
    global server_status, STOP_MINECRAFT_SERVER, players, timelefttillup, stopinstances, lock
    with lock:
        stopinstances -= 1
        if stopinstances > 0 or players > 0 or server_status == "offline":
            return
    server_status = "offline"
    os.system(STOP_MINECRAFT_SERVER)
    print('MINECRAFT SERVER IS SHUTTING DOWN!')
    timelefttillup = MINECRAFT_SERVER_STARTUPTIME

 def __init__(self):
     self._lock = Lock()
     self._data = dict()
Example #5
0
 def __init__(self):
     self.lock = Lock()
     self.elements = []
Example #6
0
# Copyright (C) 2007-2018 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------

# Python modules
from __future__ import absolute_import
from threading import Lock

# Third-party modules
import creole

# NOC modules
from ..macros.loader import loader as macro_loader
from .base import BaseParser

mw_lock = Lock()


class MacroWrapper(object):
    pass


class CreoleParser(BaseParser):
    name = "Creole"
    macro_wrapper = None

    @classmethod
    def to_html(cls, kb_entry):
        def custom_link_emit(node):
            if node.children:
                text = html_emitter.emit_children(node)
Example #7
0
    (select distinct 'LEVEL2' as LEVEL,  name_2||','||name_1||','||name_0 as NAME from location_locationlevel l where name_2=UPPER(%s) limit 5)
    union (select distinct 'LEVEL1' as LEVEL,  name_1||','||name_0 as NAME from location_locationlevel l where name_1=UPPER(%s) limit 5)
    union (select distinct 'LEVEL0' as LEVEL,  name_0 as NAME from location_locationlevel l where name_0=UPPER(%s) limit 5) order by LEVEL;"""
    cursor.execute(
        sql,
        [lowest_level, lowest_level, lowest_level, lowest_level, lowest_level])
    rows = cursor.fetchall()
    location_hierarchy = []
    for level, location in rows:
        location_hierarchy.append(location.split(','))
    return location_hierarchy[0] if len(
        location_hierarchy) > 0 else lowest_level.split(',')


_tree = None
_tree_lock = Lock()


def get_location_tree():
    global _tree
    with _tree_lock:
        if _tree is None:
            _tree = LocationTree()
    return _tree


class LocationTree(object):
    def get_location_hierarchy_for_geocode(self, lat, long):
        row = self._get_location_level_row_for_geo_code(lat, long)
        lowest_level = self._get_lowest_level(row)
        location = []
Example #8
0
    return total


def frame_probe(source: Path):
    """Get frame count."""
    cmd = [
        "ffmpeg", "-hide_banner", "-i",
        source.absolute(), "-map", "0:v:0", "-f", "null", "-"
    ]
    r = subprocess.run(cmd, stdout=PIPE, stderr=PIPE)
    matches = re.findall(r"frame=\s*([0-9]+)\s",
                         r.stderr.decode("utf-8") + r.stdout.decode("utf-8"))
    return int(matches[-1])


doneFileLock = Lock()


def frame_check(source: Path, encoded: Path, temp, check):
    """Checking is source and encoded video frame count match."""
    try:
        status_file = Path(temp / 'done.json')

        if check:
            s1 = frame_probe(source)
            doneFileLock.acquire()
            with status_file.open() as f:
                d = json.load(f)
            d['done'][source.name] = s1
            with status_file.open('w') as f:
                json.dump(d, f)
Example #9
0
    copy_current_request_context
from flask_socketio import SocketIO, emit, join_room, leave_room, \
    close_room, rooms, disconnect
import sqlite3
from flask import request, jsonify

# Set this variable to "threading", "eventlet" or "gevent" to test the
# different async modes, or leave it set to None for the application to choose
# the best option based on installed packages.
async_mode = None

app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret!'
socketio = SocketIO(app, async_mode=async_mode)
thread = None
thread_lock = Lock()
db_name = '/home/pi/flask/Flask-SocketIO/example/com4016.db'


def create_db():
    # 连接
    conn = sqlite3.connect(db_name)
    c = conn.cursor()

    # 创建表 
    c.execute('''DROP TABLE IF EXISTS TB4016''') # 删除旧表,如果存在(因为这是临时数据)
    c.execute('''CREATE TABLE TB4016 (id INTEGER PRIMARY KEY AUTOINCREMENT, port text,insert_time text,device text, step int, valuec float, absval float)''')

    # 关闭
    conn.close()
Example #10
0
def client(queue, port, server_address, args):
    if args.client_cpu_affinity >= 0:
        os.sched_setaffinity(0, [args.client_cpu_affinity])

    import numpy as np

    if args.object_type == "numpy":
        import numpy as xp
    elif args.object_type == "cupy":
        import cupy as xp

        xp.cuda.runtime.setDevice(args.client_dev)
    else:
        import cupy as xp

        import rmm

        rmm.reinitialize(
            pool_allocator=True,
            managed_memory=False,
            initial_pool_size=args.rmm_init_pool_size,
            devices=[args.client_dev],
        )
        xp.cuda.runtime.setDevice(args.client_dev)
        xp.cuda.set_allocator(rmm.rmm_cupy_allocator)

    ctx = ucx_api.UCXContext(
        feature_flags=(
            ucx_api.Feature.AM if args.enable_am is True else ucx_api.Feature.TAG,
        )
    )
    worker = ucx_api.UCXWorker(ctx)
    register_am_allocators(args, worker)
    ep = ucx_api.UCXEndpoint.create(
        worker,
        server_address,
        port,
        endpoint_error_handling=True,
    )

    send_msg = xp.arange(args.n_bytes, dtype="u1")
    if args.reuse_alloc:
        recv_msg = xp.zeros(args.n_bytes, dtype="u1")

    if args.enable_am:
        blocking_am_send(worker, ep, send_msg)
        blocking_am_recv(worker, ep)
    else:
        wireup_recv = bytearray(len(WireupMessage))
        blocking_send(worker, ep, WireupMessage)
        blocking_recv(worker, ep, wireup_recv)

    op_lock = Lock()
    finished = [0]
    outstanding = [0]

    def maybe_progress():
        while outstanding[0] >= args.max_outstanding:
            worker.progress()

    def op_started():
        with op_lock:
            outstanding[0] += 1

    def op_completed():
        with op_lock:
            outstanding[0] -= 1
            finished[0] += 1

    if args.cuda_profile:
        xp.cuda.profiler.start()

    times = []
    for i in range(args.n_iter + args.n_warmup_iter):
        start = clock()

        if args.enable_am:
            blocking_am_send(worker, ep, send_msg)
            blocking_am_recv(worker, ep)
        else:
            if not args.reuse_alloc:
                recv_msg = xp.zeros(args.n_bytes, dtype="u1")

            if args.delay_progress:
                maybe_progress()
                non_blocking_send(worker, ep, send_msg, op_started, op_completed)
                maybe_progress()
                non_blocking_recv(worker, ep, recv_msg, op_started, op_completed)
            else:
                blocking_send(worker, ep, send_msg)
                blocking_recv(worker, ep, recv_msg)

        stop = clock()
        if i >= args.n_warmup_iter:
            times.append(stop - start)

    if args.delay_progress:
        while finished[0] != 2 * (args.n_iter + args.n_warmup_iter):
            worker.progress()

    if args.cuda_profile:
        xp.cuda.profiler.stop()

    assert len(times) == args.n_iter
    bw_avg = format_bytes(2 * args.n_iter * args.n_bytes / sum(times))
    bw_med = format_bytes(2 * args.n_bytes / np.median(times))
    lat_avg = int(sum(times) * 1e9 / (2 * args.n_iter))
    lat_med = int(np.median(times) * 1e9 / 2)

    delay_progress_str = (
        f"True ({args.max_outstanding})" if args.delay_progress is True else "False"
    )

    print("Roundtrip benchmark")
    print_separator(separator="=")
    print_key_value(key="Iterations", value=f"{args.n_iter}")
    print_key_value(key="Bytes", value=f"{format_bytes(args.n_bytes)}")
    print_key_value(key="Object type", value=f"{args.object_type}")
    print_key_value(key="Reuse allocation", value=f"{args.reuse_alloc}")
    print_key_value(key="Transfer API", value=f"{'AM' if args.enable_am else 'TAG'}")
    print_key_value(key="Delay progress", value=f"{delay_progress_str}")
    print_key_value(key="UCX_TLS", value=f"{ucp.get_config()['TLS']}")
    print_key_value(key="UCX_NET_DEVICES", value=f"{ucp.get_config()['NET_DEVICES']}")
    print_separator(separator="=")
    if args.object_type == "numpy":
        print_key_value(key="Device(s)", value="CPU-only")
        s_aff = (
            args.server_cpu_affinity
            if args.server_cpu_affinity >= 0
            else "affinity not set"
        )
        c_aff = (
            args.client_cpu_affinity
            if args.client_cpu_affinity >= 0
            else "affinity not set"
        )
        print_key_value(key="Server CPU", value=f"{s_aff}")
        print_key_value(key="Client CPU", value=f"{c_aff}")
    else:
        print_key_value(key="Device(s)", value=f"{args.server_dev}, {args.client_dev}")
    print_separator(separator="=")
    print_key_value("Bandwidth (average)", value=f"{bw_avg}/s")
    print_key_value("Bandwidth (median)", value=f"{bw_med}/s")
    print_key_value("Latency (average)", value=f"{lat_avg} ns")
    print_key_value("Latency (median)", value=f"{lat_med} ns")
    if not args.no_detailed_report:
        print_separator(separator="=")
        print_key_value(key="Iterations", value="Bandwidth, Latency")
        print_separator(separator="-")
        for i, t in enumerate(times):
            ts = format_bytes(2 * args.n_bytes / t)
            lat = int(t * 1e9 / 2)
            print_key_value(key=i, value=f"{ts}/s, {lat}ns")
Example #11
0
def server(queue, args):
    if args.server_cpu_affinity >= 0:
        os.sched_setaffinity(0, [args.server_cpu_affinity])

    if args.object_type == "numpy":
        import numpy as xp
    elif args.object_type == "cupy":
        import cupy as xp

        xp.cuda.runtime.setDevice(args.server_dev)
    else:
        import cupy as xp

        import rmm

        rmm.reinitialize(
            pool_allocator=True,
            managed_memory=False,
            initial_pool_size=args.rmm_init_pool_size,
            devices=[args.server_dev],
        )
        xp.cuda.runtime.setDevice(args.server_dev)
        xp.cuda.set_allocator(rmm.rmm_cupy_allocator)

    ctx = ucx_api.UCXContext(
        feature_flags=(
            ucx_api.Feature.AM if args.enable_am is True else ucx_api.Feature.TAG,
        )
    )
    worker = ucx_api.UCXWorker(ctx)

    register_am_allocators(args, worker)

    # A reference to listener's endpoint is stored to prevent it from going
    # out of scope too early.
    ep = None

    op_lock = Lock()
    finished = [0]
    outstanding = [0]

    def op_started():
        with op_lock:
            outstanding[0] += 1

    def op_completed():
        with op_lock:
            outstanding[0] -= 1
            finished[0] += 1

    def _send_handle(request, exception, msg):
        # Notice, we pass `msg` to the handler in order to make sure
        # it doesn't go out of scope prematurely.
        assert exception is None
        op_completed()

    def _tag_recv_handle(request, exception, ep, msg):
        assert exception is None
        req = ucx_api.tag_send_nb(
            ep, msg, msg.nbytes, tag=0, cb_func=_send_handle, cb_args=(msg,)
        )
        if req is None:
            op_completed()

    def _am_recv_handle(recv_obj, exception, ep):
        assert exception is None
        msg = Array(recv_obj)
        ucx_api.am_send_nbx(ep, msg, msg.nbytes, cb_func=_send_handle, cb_args=(msg,))

    def _listener_handler(conn_request, msg):
        global ep
        ep = ucx_api.UCXEndpoint.create_from_conn_request(
            worker,
            conn_request,
            endpoint_error_handling=True,
        )

        # Wireup before starting to transfer data
        if args.enable_am is True:
            ucx_api.am_recv_nb(ep, cb_func=_am_recv_handle, cb_args=(ep,))
        else:
            wireup = Array(bytearray(len(WireupMessage)))
            op_started()
            ucx_api.tag_recv_nb(
                worker,
                wireup,
                wireup.nbytes,
                tag=0,
                cb_func=_tag_recv_handle,
                cb_args=(ep, wireup),
            )

        for i in range(args.n_iter + args.n_warmup_iter):
            if args.enable_am is True:
                ucx_api.am_recv_nb(ep, cb_func=_am_recv_handle, cb_args=(ep,))
            else:
                if not args.reuse_alloc:
                    msg = Array(xp.zeros(args.n_bytes, dtype="u1"))

                op_started()
                ucx_api.tag_recv_nb(
                    worker,
                    msg,
                    msg.nbytes,
                    tag=0,
                    cb_func=_tag_recv_handle,
                    cb_args=(ep, msg),
                )

    if not args.enable_am and args.reuse_alloc:
        msg = Array(xp.zeros(args.n_bytes, dtype="u1"))
    else:
        msg = None

    listener = ucx_api.UCXListener(
        worker=worker, port=args.port or 0, cb_func=_listener_handler, cb_args=(msg,)
    )
    queue.put(listener.port)

    while outstanding[0] == 0:
        worker.progress()

    # +1 to account for wireup message
    if args.delay_progress:
        while finished[0] < args.n_iter + args.n_warmup_iter + 1 and (
            outstanding[0] >= args.max_outstanding
            or finished[0] + args.max_outstanding
            >= args.n_iter + args.n_warmup_iter + 1
        ):
            worker.progress()
    else:
        while finished[0] != args.n_iter + args.n_warmup_iter + 1:
            worker.progress()
Example #12
0
 def __init__(self, filename, unsafe=False):
     self.unsafe = unsafe
     self.filename = filename
     self._lock = Lock()
     self.ts_refreshed = None
     self.ts_refreshed_utc = None
Example #13
0
class MapManager:
    map_lock = Lock()
    robot_pose_lock = Lock()

    def __init__(self, robot_radius, robot_fov_radius, static_map_topic, robot_occ_grid_topic, push_poses_topic,
                 simulated_pose_topic, simulated_fov_pointcloud_topic):
        # Get parameters
        self.static_map_topic = static_map_topic
        self.robot_occ_grid_topic = robot_occ_grid_topic
        self.push_poses_topic = push_poses_topic
        self.simulated_pose_topic = simulated_pose_topic
        self.simulated_fov_pointcloud_topic = simulated_fov_pointcloud_topic

        # Declare common parameters
        self.static_map = None
        self.init_map = None
        self.multilayered_map = None
        self.has_free_space_been_created = False
        self.current_robot_pose = None

        # Create subscribers
        rospy.Subscriber(self.static_map_topic, OccupancyGrid, self._static_map_callback)
        rospy.Subscriber(self.simulated_fov_pointcloud_topic, PointCloud, self._simulated_fov_pointcloud_callback)
        rospy.Subscriber(self.simulated_pose_topic, PoseStamped, self._simulated_pose_callback)

        # Create publishers
        self.robot_occ_grid_pub = rospy.Publisher(self.robot_occ_grid_topic, OccupancyGrid, queue_size=1)
        self.push_poses_pub = rospy.Publisher(self.push_poses_topic, PoseArray, queue_size=1)

        # Initialize map
        while self.static_map is None:
            rospy.sleep(0.2)
        self.robot_metadata = RobotMetaData(robot_radius, robot_fov_radius, self.static_map.info.resolution)
        self.init_map = MultilayeredMap(self.static_map, self.robot_metadata)
        self.multilayered_map = copy.deepcopy(self.init_map)

    def _static_map_callback(self, new_map):
        # For the moment, we don't want to manage new static maps for the
        # node's life duration
        if self.static_map is None:
            self.static_map = new_map

    def _simulated_pose_callback(self, pose):
        MapManager.robot_pose_lock.acquire()
        self.current_robot_pose = pose
        MapManager.robot_pose_lock.release()

    def _simulated_fov_pointcloud_callback(self, pointcloud):
        MapManager.robot_pose_lock.acquire()
        robot_pose_copy = copy.deepcopy(self.current_robot_pose)
        MapManager.robot_pose_lock.release()

        if self.multilayered_map is not None and robot_pose_copy is not None:
            if (self.current_robot_pose.header.stamp.to_nsec() <= pointcloud.header.stamp.to_nsec()):
                MapManager.map_lock.acquire()
                self.multilayered_map.update_from_point_cloud(pointcloud, robot_pose_copy)
                MapManager.map_lock.release()
                self.publish_ros_merged_occ_grid()

    def get_init_map(self):
        self.publish_ros_static_inflated_grid()
        return copy.deepcopy(self.init_map) # We make a copy to be sure its not changed

    def get_map_copy(self):
        MapManager.map_lock.acquire()
        map_copy =  copy.deepcopy(self.multilayered_map)
        MapManager.map_lock.release()
        return map_copy

    # FIXME DEPRECATED
    def manually_add_obstacle(self, obstacle):
        self.multilayered_map.manually_add_obstacle(obstacle)

    def publish_ros_merged_occ_grid(self):
        ros_merged_occ_grid = Utils.convert_matrix_to_ros_occ_grid(self.multilayered_map.merged_occ_grid, self.static_map.header, self.static_map.info)
        Utils.publish_once(self.robot_occ_grid_pub, ros_merged_occ_grid)

    def publish_ros_static_inflated_grid(self):
        ros_merged_occ_grid = Utils.convert_matrix_to_ros_occ_grid(self.multilayered_map.inflated_static_occ_grid, self.static_map.header, self.static_map.info)
        Utils.publish_once(self.robot_occ_grid_pub, ros_merged_occ_grid)

    def publish_all_push_poses(self):
        all_push_poses = PoseArray()
        all_push_poses.header = self.static_map.header
        all_push_poses.header.stamp = rospy.Time.now()
        for obstacle_id, obstacle in self.multilayered_map.obstacles.items():
            for push_pose in obstacle.push_poses:
                all_push_poses.poses = all_push_poses.poses + [push_pose.pose]
        Utils.publish_once(self.push_poses_pub, all_push_poses)
 def __init__(self):
     self.__list = []
     self.__lock = Lock()
    def __init__(self,
                 token=None,
                 base_url=None,
                 workers=4,
                 bot=None,
                 private_key=None,
                 private_key_password=None,
                 user_sig_handler=None,
                 request_kwargs=None,
                 persistence=None,
                 defaults=None,
                 use_context=False,
                 dispatcher=None,
                 base_file_url=None):

        if dispatcher is None:
            if (token is None) and (bot is None):
                raise ValueError('`token` or `bot` must be passed')
            if (token is not None) and (bot is not None):
                raise ValueError('`token` and `bot` are mutually exclusive')
            if (private_key is not None) and (bot is not None):
                raise ValueError('`bot` and `private_key` are mutually exclusive')
        else:
            if bot is not None:
                raise ValueError('`dispatcher` and `bot` are mutually exclusive')
            if persistence is not None:
                raise ValueError('`dispatcher` and `persistence` are mutually exclusive')
            if workers is not None:
                raise ValueError('`dispatcher` and `workers` are mutually exclusive')
            if use_context != dispatcher.use_context:
                raise ValueError('`dispatcher` and `use_context` are mutually exclusive')

        self.logger = logging.getLogger(__name__)

        if dispatcher is None:
            con_pool_size = workers + 4

            if bot is not None:
                self.bot = bot
                if bot.request.con_pool_size < con_pool_size:
                    self.logger.warning(
                        'Connection pool of Request object is smaller than optimal value (%s)',
                        con_pool_size)
            else:
                # we need a connection pool the size of:
                # * for each of the workers
                # * 1 for Dispatcher
                # * 1 for polling Updater (even if webhook is used, we can spare a connection)
                # * 1 for JobQueue
                # * 1 for main thread
                if request_kwargs is None:
                    request_kwargs = {}
                if 'con_pool_size' not in request_kwargs:
                    request_kwargs['con_pool_size'] = con_pool_size
                self._request = Request(**request_kwargs)
                self.bot = Bot(token,
                               base_url,
                               base_file_url=base_file_url,
                               request=self._request,
                               private_key=private_key,
                               private_key_password=private_key_password,
                               defaults=defaults)
            self.update_queue = Queue()
            self.job_queue = JobQueue()
            self.__exception_event = Event()
            self.persistence = persistence
            self.dispatcher = Dispatcher(self.bot,
                                         self.update_queue,
                                         job_queue=self.job_queue,
                                         workers=workers,
                                         exception_event=self.__exception_event,
                                         persistence=persistence,
                                         use_context=use_context)
            self.job_queue.set_dispatcher(self.dispatcher)
        else:
            con_pool_size = dispatcher.workers + 4

            self.bot = dispatcher.bot
            if self.bot.request.con_pool_size < con_pool_size:
                self.logger.warning(
                    'Connection pool of Request object is smaller than optimal value (%s)',
                    con_pool_size)
            self.update_queue = dispatcher.update_queue
            self.__exception_event = dispatcher.exception_event
            self.persistence = dispatcher.persistence
            self.job_queue = dispatcher.job_queue
            self.dispatcher = dispatcher

        self.user_sig_handler = user_sig_handler
        self.last_update_id = 0
        self.running = False
        self.is_idle = False
        self.httpd = None
        self.__lock = Lock()
        self.__threads = []

        # Just for passing to WebhookAppClass
        self._default_quote = defaults.quote if defaults else None
Example #16
0
import sys
import os
import re
import socket
import urllib2
import urlparse
import pickle
import random

from time import time, sleep
from threading import Thread, Lock, Semaphore

progs = {}
progsAnalizados = 0
totalProgramas = 0
lockCB = Lock()
lockProgs = Lock()
conexiones = Semaphore(10)
threads = []


def dl(url):
    #print url
    for i in range(2):  #n intentos
        conexiones.acquire()
        try:
            try:
                f = urllib2.urlopen(url)
                html = f.read()
                return html
            except IOError:
 def __init__(self):
     self._balance = 0
     self._lock = Lock()
Example #18
0
from redis.client import Script, StrictRedis
from redis.connection import ConnectionPool, Encoder
from redis.exceptions import ConnectionError, BusyLoadingError
from rediscluster import StrictRedisCluster

from sentry import options
from sentry.exceptions import InvalidConfiguration
from sentry.utils import warnings
from sentry.utils.warnings import DeprecatedSettingWarning
from sentry.utils.versioning import Version, check_versions
from sentry.utils.compat import map

logger = logging.getLogger(__name__)

_pool_cache = {}
_pool_lock = Lock()


def _shared_pool(**opts):
    if "host" in opts:
        key = "%s:%s/%s" % (opts["host"], opts["port"], opts["db"])
    else:
        key = "%s/%s" % (opts["path"], opts["db"])
    pool = _pool_cache.get(key)
    if pool is not None:
        return pool
    with _pool_lock:
        pool = _pool_cache.get(key)
        if pool is not None:
            return pool
        pool = ConnectionPool(**opts)
Example #19
0
    def __init__(self):
        mpv_config = conffile.get(APP_NAME,"mpv.conf", True)
        input_config = conffile.get(APP_NAME,"input.conf", True)
        extra_options = {}
        self._video = None
        self._lock = RLock()
        self._finished_lock = Lock()
        self.last_update = Timer()
        self.__part = 1
        self.timeline_trigger = None
        self.action_trigger = None
        self.external_subtitles = {}
        self.external_subtitles_rev = {}
        self.url = None
        self.evt_queue = Queue()
        self.is_in_intro = False
        self.intro_has_triggered = False

        if is_using_ext_mpv:
            extra_options = {
                "start_mpv": settings.mpv_ext_start,
                "ipc_socket": settings.mpv_ext_ipc,
                "mpv_location": settings.mpv_ext_path,
                "player-operation-mode": "cplayer"
            }
        # todo figure out how to put these in a file
        extra_options = {
            'script-opts': 'osc-layout=slimbox,osc-deadzonesize=.9,osc-valign=1.05',
        }
        self._player = mpv.MPV(input_default_bindings=True, input_vo_keyboard=True,
                               input_media_keys=True, include=mpv_config, input_conf=input_config,
                               log_handler=mpv_log_handler, loglevel=settings.mpv_log_level,
                               **extra_options)
        self.menu = OSDMenu(self)
        self.auto_insert = False

        def on_new_sub(name, text):
            if not self.auto_insert:
                return
            if not text or not text.strip():
                return
            pyperclip.copy(text.replace('\n', ' '))

        self._player.observe_property('sub-text', on_new_sub)

        if hasattr(self._player, 'osc'):
            self._player.osc = settings.enable_osc
        else:
            log.warning("This mpv version doesn't support on-screen controller.")

        # Wrapper for on_key_press that ignores None.
        def keypress(key):
            def wrapper(func):
                if key is not None:
                    self._player.on_key_press(key)(func)
                return func
            return wrapper

        @self._player.on_key_press('CLOSE_WIN')
        @self._player.on_key_press('STOP')
        @keypress(settings.kb_stop)
        def handle_stop():
            self.stop()
            self.timeline_handle()

        @keypress(settings.kb_prev)
        def handle_prev():
            self.put_task(self.play_prev)

        @keypress(settings.kb_next)
        def handle_next():
            self.put_task(self.play_next)

        @self._player.on_key_press('PREV')
        @self._player.on_key_press('XF86_PREV')
        def handle_media_prev():
            if settings.media_key_seek:
                self._player.command("seek", -15)
            else:
                self.put_task(self.play_prev)

        @self._player.on_key_press('NEXT')
        @self._player.on_key_press('XF86_NEXT')
        def handle_media_next():
            if settings.media_key_seek:
                if self.is_in_intro:
                    self.skip_intro()
                else:
                    self._player.command("seek", 30)
            else:
                self.put_task(self.play_next)

        @keypress(settings.kb_watched)
        def handle_watched():
            self.put_task(self.watched_skip)

        @keypress(settings.kb_unwatched)
        def handle_unwatched():
            self.put_task(self.unwatched_quit)

        @keypress(settings.kb_menu)
        def menu_open():
            if not self.menu.is_menu_shown:
                self.menu.show_menu()
            else:
                self.menu.hide_menu()

        @keypress(settings.kb_menu_esc)
        def menu_back():
            if self.menu.is_menu_shown:
                self.menu.menu_action('back')
            else:
                self._player.command('set', 'fullscreen', 'no')

        @keypress(settings.kb_menu_ok)
        def menu_ok():
            self.menu.menu_action('ok')

        @keypress(settings.kb_menu_left)
        def menu_left():
            if self.menu.is_menu_shown:
                self.menu.menu_action('left')
            else:
                self._player.command("seek", settings.seek_left)

        @keypress(settings.kb_menu_right)
        def menu_right():
            if self.menu.is_menu_shown:
                self.menu.menu_action('right')
            else:
                if self.is_in_intro:
                    self.skip_intro()
                else:
                    self._player.command("seek", settings.seek_right)

        @keypress(settings.kb_menu_up)
        def menu_up():
            if self.menu.is_menu_shown:
                self.menu.menu_action('up')
            else:
                if self.is_in_intro:
                    self.skip_intro()
                else:
                    self._player.command("seek", settings.seek_up)

        @keypress(settings.kb_menu_down)
        def menu_down():
            if self.menu.is_menu_shown:
                self.menu.menu_action('down')
            else:
                self._player.command("seek", settings.seek_down)

        @keypress(settings.kb_pause)
        def handle_pause():
            if self.menu.is_menu_shown:
                self.menu.menu_action('ok')
            else:
                self.toggle_pause()

        # This gives you an interactive python debugger prompt.
        @keypress(settings.kb_debug)
        def handle_debug():
            import pdb
            pdb.set_trace()

        @self._player.on_key_press('ctrl+c')
        def copy_current_sub():
            try:
                sub = self._player.sub_text
                pyperclip.copy(sub)
            except AttributeError:
                pass  # no subtitle available.

        def copy_screenshot(subtitles=True):
            includes = 'subtitles' if subtitles else 'video'
            from io import BytesIO
            import win32clipboard
            image = self._player.screenshot_raw(includes=includes)
            output = BytesIO()
            image.convert("RGB").save(output, "BMP")
            data = output.getvalue()[14:]
            output.close()
            win32clipboard.OpenClipboard()
            win32clipboard.EmptyClipboard()
            win32clipboard.SetClipboardData(win32clipboard.CF_DIB, data)
            win32clipboard.CloseClipboard()

        @self._player.on_key_press('ctrl+s')
        def copy_current_image():
            copy_screenshot(subtitles=True)

        @self._player.on_key_press('ctrl+shift+s')
        def copy_current_image():
            copy_screenshot(subtitles=False)

        @self._player.on_key_press('ctrl+v')
        def output_audio():
            import subprocess
            import string
            import unicodedata
            sub_delay = round(self._player.sub_delay, 4)  # round b/c of weird mpv precision
            sub_start = self._player.sub_start + sub_delay
            if sub_start:
                print("Outputting current subtitle...")
                valid_fn_chars = "-_.() %s%s" % (string.ascii_letters, string.digits)
                fn_dirty = "%s - %s" % (self._player.media_title, str(int(sub_start * 1000)))
                fn = unicodedata.normalize('NFKD', fn_dirty).encode('ASCII', 'ignore')
                fn = ''.join(chr(c) for c in fn if chr(c) in valid_fn_chars)
                aid = [x for x in self._player.track_list
                       if x.get("type") == "audio" and x.get("selected")][0].get("id")
                subprocess.Popen([
                    'mpv',
                    self.url,
                    '-o',
                    '%s.mp3' % fn,
                    '--no-video',
                    '--start=%s' % sub_start,
                    '--end=%s' % (self._player.sub_end + sub_delay),
                    '--aid=%s' % aid,
                ])
                self._player.screenshot_to_file("%s.png" % fn, includes='video')
                with open('%s.txt' % fn, 'w+', encoding='utf-8') as f:
                    f.write(self._player.sub_text)

        @self._player.on_key_press('ctrl+a')
        def toggle_auto_insert():
            self.auto_insert = not self.auto_insert
            self._player.show_text('Auto insert %s' % ("on" if self.auto_insert else "off"))

        # Fires between episodes.
        @self._player.property_observer('eof-reached')
        def handle_end(_name, reached_end):
            if self._video and reached_end:
                has_lock = self._finished_lock.acquire(False)
                self.put_task(self.finished_callback, has_lock)

        # Fires at the end.
        @self._player.event_callback('idle')
        def handle_end_idle(event):
            if self._video:
                has_lock = self._finished_lock.acquire(False)
                self.put_task(self.finished_callback, has_lock)
Example #20
0
        default=9995,
        help='Port for communicating with the client; used only by the servers'
    )

    parser.add_argument("--trainDataset",
                        default='profilerDataADF.csv',
                        help='Path to the pretraining dataset')

    args = sys.argv[1:]
    args = parser.parse_args(args)

    df = loadTrainingDataset(args.trainDataset)
    global_mod = train(df)
    per_device_models = {}

    mutex = Lock()

    conn = openServerConn(port=args.clientPort)
    while True:
        with mutex:
            stats = getMessage(conn)
            #prediction or learn stats?
            if (stats[1] == "0"):
                batchSize = makePrediction(global_mod, stats)
                print("new device prediction for " + stats[3] + " : ",
                      batchSize)
                sendMessage(conn, [batchSize])
            else:
                df = storeStats(df, stats)
                global_mod = train(df)
                print("update from " + stats[3] + ", " + stats[12])
	def __init__(self):
		self.__lock = Lock()
		self.__failList = dict()
		self.__maxRetry = 3
		self.__maxTime = 600
		self.__failTotal = 0
Example #22
0
    def __init__(self):

        # ---------------------------------------------------------------------------------------------
        # define the mutex to avoid concurency
        # ---------------------------------------------------------------------------------------------
        self.inputLock = threading.Lock()
        self.outputLock = threading.Lock()

        # ---------------------------------------------------------------------------------------------
        # message sequences
        # ---------------------------------------------------------------------------------------------
        # catheter control commandes in speed mode
        self.catheterMoveInstructionSequence = []

        # guidewire control commandes in speed mode
        self.guidewireProgressInstructionSequence = []
        self.guidewireRotateInstructionSequence = []

        # guidewire control commandes in position mode
        self.guidewireMovingDistance = []

        # to be verified...
        self.contrastMediaPushInstructionSequence = []
        self.injectionCommandSequence = []
        self.retractInstructionSequence = []

        # forcefeedback
        self.forcefeedbackSequence = []

        # push catheter and guidewire together
        self.catheter_guidewire_push_sequence = []

        # system control
        self.closeSessionSequence = []

        self.sensingParameterSequence = []

        # ---------------------------------------------------------------------------------------------
        # system status variable
        # ---------------------------------------------------------------------------------------------
        self.systemStatus = True

        # ------------------------------------------------------------------------------------------------------------
        # control variables:
        #
        # guidewireControlState
        #  where
        #      0: uncontrolled,
        #      1: valid,
        #      2: nonvalid_prepare_for_push,
        #      3: nonvalid_prepare_for_drawn,
        #      4: exception
        #
        # catheterControlState
        #   where
        #      0: uncontrolled,
        #      1: valid
        #      2: nonvalid_beyond_guidewire
        #      3: exception
        # contrastMediaControlState
        #   where
        #      0: uncontrolled,
        #      1: valid
        #      2: exception

        self.guidewireControlState = 0
        self.catheterControlState = 0
        self.contrastMediaControlState = 0
        self.globalContrastMediaVolumn = 0

        self.globalForceFeedback = 0.0
        self.globalTorqueFeedback = 0.0
        self.globalDistanceFromChuckToCatheter = 0.0
        self.globalTelescopicRodLength = 0.0
        self.globalDistanceFromCatheterToGuidewire = 0.0
        self.globalGuidewireAngle = 0.0
        self.globalTranslationVelocity = 0.0
        self.globalRotationVelocity = 0.0
        self.globalDecisionMade = 1

        informationAnalysisTask = threading.Thread(
            None, self.coreInformationAnalysis)
        informationAnalysisTask.start()

        decisionMaking_task = threading.Thread(None, self.decisionMaking)
        decisionMaking_task.start()

        self.storingDataLock = Lock()
        storingDataTask = threading.Thread(None, self.storingData)
        storingDataTask.start()
Example #23
0
 def __init__(self, tensorboard_dir):
     self.tensorboard_writter = SummaryWriter(tensorboard_dir,
                                              flush_secs=10,
                                              max_queue=1)
     self._mutex_add = Lock()
 def __init__(self, cap=10):  #cap for queue basic value of 10 can be change
     self.queue = []  # create a list
     self.qLock = Lock()  # lock
     self.full = Semaphore(0)  #counting lock to see if its full starts at 0
     self.empty = Semaphore(
         cap)  #counting lock to see if its empty starts at cap
Example #25
0
    def __init__(self, network,
                 settings_cache,
                 current_chain_head_func,
                 current_root_func,
                 endpoint=None,
                 peering_mode='static',
                 initial_seed_endpoints=None,
                 initial_peer_endpoints=None,
                 minimum_peer_connectivity=3,
                 maximum_peer_connectivity=10,
                 topology_check_frequency=1
                 ):
        """Constructor for the Gossip object. Gossip defines the
        overlay network above the lower level networking classes.

        Args:
            network (networking.Interconnect): Provides inbound and
                outbound network connections.
            settings_cache (state.SettingsCache): A cache for on chain
                settings.
            current_chain_head_func (function): returns the current chain head.
            current_root_func (function): returns the current state root hash
                for the current chain root.
            endpoint (str): The publically accessible zmq-style uri
                endpoint for this validator.
            peering_mode (str): The type of peering approach. Either 'static'
                or 'dynamic'. In 'static' mode, no attempted topology
                buildout occurs -- the validator only attempts to initiate
                peering connections with endpoints specified in the
                peer_list. In 'dynamic' mode, the validator will first
                attempt to initiate peering connections with endpoints
                specified in the peer_list and then attempt to do a
                topology buildout starting with peer lists obtained from
                endpoints in the seeds_list. In either mode, the validator
                will accept incoming peer requests up to max_peers.
            initial_seed_endpoints ([str]): A list of initial endpoints
                to attempt to connect and gather initial topology buildout
                information from. These are specified as zmq-compatible
                URIs (e.g. tcp://hostname:port).
            initial_peer_endpoints ([str]): A list of initial peer endpoints
                to attempt to connect and peer with. These are specified
                as zmq-compatible URIs (e.g. tcp://hostname:port).
            minimum_peer_connectivity (int): If the number of connected
                peers is below this threshold, the topology builder will
                continue to attempt to identify new candidate peers to
                connect with.
            maximum_peer_connectivity (int): The validator will reject
                new peer requests if the number of connected peers
                reaches this threshold.
            topology_check_frequency (int): The time in seconds between
                topology update checks.
        """
        self._peering_mode = peering_mode
        self._lock = Lock()
        self._network = network
        self._endpoint = endpoint
        self._initial_seed_endpoints = initial_seed_endpoints \
            if initial_seed_endpoints else []
        self._initial_peer_endpoints = initial_peer_endpoints \
            if initial_peer_endpoints else []
        self._minimum_peer_connectivity = minimum_peer_connectivity
        self._maximum_peer_connectivity = maximum_peer_connectivity
        self._topology_check_frequency = topology_check_frequency
        self._settings_cache = settings_cache

        self._current_chain_head_func = current_chain_head_func
        self._current_root_func = current_root_func

        self._topology = None
        self._peers = {}
Example #26
0
        f"Element has been put back into the queue after {delay} seconds",
        extra={"area": "main"},
    )
    queue.put(items)


####################################
# Global variables
####################################

# This variables keeps a snapshot of the current state of all the things
# of the platform
locationsStatus = defaultdict(location_status.LocationStatus)
devices = defaultdict(Device)
values = defaultdict(Value)
deviceslock = Lock()
locationsStatuslock = Lock()

modules = {
    "switch": switch_logic.Switch,
    "thermostat": thermostat_logic.Thermostat,
    "Toogle": toogle_logic.Toogle,
}

# Keep track of all the subscriptions. In case of reconnection they
# will be necessary to restore the subscriptions.
# I do not trust the clean_session=False because it does not
# guarantee that all the subscriptions will be restored, this wrong
# behaviour has been proved by stoping the broker and restart it again
subscriptionsList = []
Example #27
0
 def __init__(self):
     self.lock = Lock()
     self.data = {}  # {'session_id': [{time, content, score}]}
     self.last_update = {}  # {'session_id': datetime}
Example #28
0
from mindspore._checkparam import check_input_data, Validator
from mindspore.compression.export import quant_export
from mindspore.parallel._tensor import _load_tensor
from mindspore.parallel._utils import _infer_rank_list, _remove_repeated_slices


tensor_to_ms_type = {"Int8": mstype.int8, "Uint8": mstype.uint8, "Int16": mstype.int16, "Uint16": mstype.uint16,
                     "Int32": mstype.int32, "Uint32": mstype.uint32, "Int64": mstype.int64, "Uint64": mstype.uint64,
                     "Float16": mstype.float16, "Float32": mstype.float32, "Float64": mstype.float64,
                     "Bool": mstype.bool_}

tensor_to_np_type = {"Int8": np.int8, "Uint8": np.uint8, "Int16": np.int16, "Uint16": np.uint16,
                     "Int32": np.int32, "Uint32": np.uint32, "Int64": np.int64, "Uint64": np.uint64,
                     "Float16": np.float16, "Float32": np.float32, "Float64": np.float64, "Bool": np.bool_}

_ckpt_mutex = Lock()
SLICE_SIZE = 512 * 1024 * 1024


def _special_process_par(par, new_par):
    """
    Processes the special condition.

    Like (12,2048,1,1)->(12,2048), this case is caused by GE 4 dimensions tensor.
    """
    par_shape_len = len(par.data.shape)
    new_par_shape_len = len(new_par.data.shape)
    delta_len = new_par_shape_len - par_shape_len
    delta_i = 0
    for delta_i in range(delta_len):
        if new_par.data.shape[par_shape_len + delta_i] != 1:
 def __init__(self):
     self.lock = Lock()
     self.tokens = set()
Example #30
0
class ExecutingMountebankServer(MountebankServer):
    """A Mountebank mock server, running one or more imposters, one for each domain being mocked.

    Test will look like::

        def test_an_imposter(mock_server):
            mb = ExecutingMountebankServer()
            imposter = Imposter(Stub(Predicate(path='/test'),
                                     Response(body='sausages')),
                                record_requests=True)

            with mb(imposter) as s:
                r = requests.get('{0}/test'.format(imposter.url))

                assert_that(r, is_response().with_status_code(200).and_body("sausages"))
                assert_that(s, had_request(path='/test', method="GET"))

            mb.close()

    The mountebank server will be started when this class is instantiated, and needs to be closed if it's not to be
    left running. Consider using the :meth:`mock_server` pytest fixture, which will take care of this for you.

    :param executable: Optional, alternate location for the Mountebank executable.
    :param port: Server port.
    :param timeout: How long to wait for the Mountebank server to start.
    :param debug: Start the server in debug mode, which records all requests. This needs to be `True` for the
        :py:func:`mbtest.matchers.had_request` matcher to work.
    :param allow_injection: Allow JavaScript injection. If `True`, `local_only` should also be `True`,as per
        `Mountebank security <http://www.mbtest.org/docs/security>`_.
    :param local_only: Accept request only from localhost.
    :param data_dir: Persist all operations to disk, in this directory.
    """

    running = set()  # type: Set[int]
    start_lock = Lock()

    def __init__(
        self,
        executable: Union[str, Path] = DEFAULT_MB_EXECUTABLE,
        port: int = 2525,
        timeout: int = 5,
        debug: bool = True,
        allow_injection: bool = True,
        local_only: bool = True,
        data_dir: Union[str, None] = ".mbdb",
    ) -> None:
        super(ExecutingMountebankServer, self).__init__(port)
        with self.start_lock:
            if self.server_port in self.running:
                raise MountebankPortInUseException(
                    "Already running on port {0}.".format(self.server_port))
            try:
                options = self._build_options(port, debug, allow_injection,
                                              local_only, data_dir)
                self.mb_process = subprocess.Popen([executable] +
                                                   options)  # nosec
                self._await_start(timeout)
                self.running.add(port)
                logger.info("Spawned mb process %s on port %s.",
                            self.mb_process.pid, self.server_port)
            except OSError:
                logger.error(
                    "Failed to spawn mb process with executable at %s. Have you installed Mountebank?",
                    executable,
                )
                raise

    @staticmethod
    def _build_options(port: int, debug: bool, allow_injection: bool,
                       local_only: bool, data_dir: Union[str, None]):
        options = [
            "start",
            "--port",
            str(port),
        ]  # type: List[str]
        if debug:
            options.append("--debug")
        if allow_injection:
            options.append("--allowInjection")
        if local_only:
            options.append("--localOnly")
        if data_dir:
            options += [
                "--datadir",
                data_dir,
            ]
        return options

    def _await_start(self, timeout: int) -> None:
        start_time = time.time()

        while time.time() - start_time < timeout:
            try:
                requests.get(self.server_url, timeout=1).raise_for_status()
                started = True
                break
            except RequestException:
                started = False
                time.sleep(0.1)

        if not started:
            raise MountebankTimeoutError(
                "Mountebank failed to start within {0} seconds.".format(
                    timeout))

        logger.debug("Server started at %s.", self.server_url)

    def close(self) -> None:
        self.mb_process.terminate()
        self.mb_process.wait()
        self.running.remove(self.server_port)
        logger.info(
            "Terminated mb process %s on port %s status %s.",
            self.mb_process.pid,
            self.server_port,
            self.mb_process.returncode,
        )