Example #1
0
# mapping from a command name to its OCF and OGF
commands_op_code = {
    "reset": (0x3, 0x3),
    "set event mask": (0x3, 0x1),
    "le set event mask": (0x8, 0x1),
    "set adv params": (0x8, 0x6),
    "read adv channel tx power": (0x8, 0x7),
    "set adv data": (0x8, 0x8),
    "set scan rsp data": (0x8, 0x9),
    "set adv enable": (0x8, 0xa),
}

event_handlers = {}
le_meta_event_handlers = {}

cmd_done_condition = threading.Condition()
done_events = {}
l2cap_handles = {}


def event_thread(dev):

    global hci_device

    while True:
        #time.sleep(1)

        #hci_device.ctrl_transfer(0x20, 0x00, 0x00, 0x00, msg_empy)
        #hci_device.ctrl_transfer(0x20, 0x00, 0x00, 0x00, msg_empy)
        try:
            evt_packet = hci_device.read(0x81, 256 + 2, timeout=0)
Example #2
0
    for k in idToScore.keys():
        total = total + idToScore[k]
    for k in idToScore.keys():
        if total!=0:
            ans[k.__str__()] = idToScore[k]/total
        else:
            ans[k.__str__()] = idToScore[k]

    return ans


app = Flask(__name__)
app.lock = threading.Lock()
app.model_lock = threading.Lock()
app.is_training = False
app.using_old_model = threading.Condition()
app.using_model_num = 0
app.exist_new_model = False
app.model = model
app.matrix = getItemCFMatrix()

@app.route('/test', methods=['GET','POST'])
def index():
    if request.method == 'GET':
        # 用于获得目前是否有模型等待进行更新
        app.model_lock.acquire()
        num = app.using_model_num
        status = app.exist_new_model
        app.model_lock.release()
        if status:
            return jsonify({"code":3,"status":"new model waiting to refresh","num":num})
Example #3
0
import threading





condition = threading.Condition()
condition.acquire()
Example #4
0
# alarm listener gets all of the messages.
c = Consumer({
    'bootstrap.servers': '{}'.format(server),
    'group.id' : 'Alarm-' + str(uuid.uuid4()),
    'default.topic.config' : {'auto.offset.reset':'largest'} # Start reading from the end of the topic.
    })

c.subscribe(['{}Talk'.format(config)])
print("Connected to topic {}Talk".format(config))

# Message queue and accompanying lock.
annunciationQueue = Queue.PriorityQueue()
queueLock = threading.Lock()

# Condition variable signifying that there are messages to annunciate.
annunciateCondition = threading.Condition()

# Should the annunciator run?
run = True

# Annunciator, runs in its own thread, acts as consumer to annunciationQueue.
class annunciatorThread(threading.Thread):
    def __init__(self):
        threading.Thread.__init__(self)
        self.name = "Annunciator: Message Queue Consumer"
        # The thread should die when the script is killed.
        self.daemon = True

    def run(self):
        while (run):
            queueLock.acquire()
from io import StringIO
import requests
from queue import Queue
import play_scraper as pl
from selenium import webdriver
import regex as re
import csv
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import threading as td
import time

list = ["https://play.google.com/store/apps"]
visited_links = set()
c = td.Condition()
WAIT_CYCLE = 3000


class crawler():

    def __init__(self,csv_name):
        ###variable###
        self.CSV_APP_PRIVACY_NAME = csv_name
        self.TO_CSV_NUMBER = 10
        self.QUEUE_MAX_SIZE = 10000
        global visited_links
        global WAIT_CYCLE
        self.apps_privacy_dataset = []
        self.driver = webdriver.Firefox()
        self.restart_links_number = 0
Example #6
0
 def __init__(self):
     self._id = uuid.uuid4()
     self._result = []  # a result variable queue
     self._result_cv = threading.Condition()
     self._status = LauncherStatus.INITIAL
     self._diagnostics = []
Example #7
0
 def __init__(self, size):
     self.size = size
     self._quence = []
     self._mutex = threading.RLock()
     self._full = threading.Condition(self._mutex)
     self._empty = threading.Condition(self._mutex)
Example #8
0
    def __init__(self,
                 host_and_ports=[('localhost', 61613)],
                 user=None,
                 passcode=None,
                 prefer_localhost=True,
                 try_loopback_connect=True,
                 reconnect_sleep_initial=0.1,
                 reconnect_sleep_increase=0.5,
                 reconnect_sleep_jitter=0.1,
                 reconnect_sleep_max=60.0):
        """
        Initialize and start this connection.

        \param host_and_ports
                 a list of (host, port) tuples.

        \param prefer_localhost
                 if True and the local host is mentioned in the (host,
                 port) tuples, try to connect to this first

        \param try_loopback_connect
                 if True and the local host is found in the host
                 tuples, try connecting to it using loopback interface
                 (127.0.0.1)

        \param reconnect_sleep_initial

                 initial delay in seconds to wait before reattempting
                 to establish a connection if connection to any of the
                 hosts fails.

        \param reconnect_sleep_increase

                 factor by which the sleep delay is increased after
                 each connection attempt. For example, 0.5 means
                 to wait 50% longer than before the previous attempt,
                 1.0 means wait twice as long, and 0.0 means keep
                 the delay constant.

        \param reconnect_sleep_max

                 maximum delay between connection attempts, regardless
                 of the reconnect_sleep_increase.

        \param reconnect_sleep_jitter

                 random additional time to wait (as a percentage of
                 the time determined using the previous parameters)
                 between connection attempts in order to avoid
                 stampeding. For example, a value of 0.1 means to wait
                 an extra 0%-10% (randomly determined) of the delay
                 calculated using the previous three parameters.
        """

        sorted_host_and_ports = []
        sorted_host_and_ports.extend(host_and_ports)

        # If localhost is preferred, make sure all (host, port) tuples
        # that refer to the local host come first in the list
        if prefer_localhost:

            def is_local_host(host):
                return host in Connection.__localhost_names

            sorted_host_and_ports.sort(lambda x, y: (int(is_local_host(y[0])) -
                                                     int(is_local_host(x[0]))))

        # If the user wishes to attempt connecting to local ports
        # using the loopback interface, for each (host, port) tuple
        # referring to a local host, add an entry with the host name
        # replaced by 127.0.0.1 if it doesn't exist already
        loopback_host_and_ports = []
        if try_loopback_connect:
            for host_and_port in sorted_host_and_ports:
                if is_local_host(host_and_port[0]):
                    port = host_and_port[1]
                    if (not ("127.0.0.1", port) in sorted_host_and_ports and
                            not ("localhost", port) in sorted_host_and_ports):
                        loopback_host_and_ports.append(("127.0.0.1", port))

        # Assemble the final, possibly sorted list of (host, port) tuples
        self.__host_and_ports = []
        self.__host_and_ports.extend(loopback_host_and_ports)
        self.__host_and_ports.extend(sorted_host_and_ports)

        self.__recvbuf = ''

        self.__listeners = []

        self.__reconnect_sleep_initial = reconnect_sleep_initial
        self.__reconnect_sleep_increase = reconnect_sleep_increase
        self.__reconnect_sleep_jitter = reconnect_sleep_jitter
        self.__reconnect_sleep_max = reconnect_sleep_max

        self.__connect_headers = {}
        if user is not None and passcode is not None:
            self.__connect_headers['login'] = user
            self.__connect_headers['passcode'] = passcode

        self.__socket = None
        self.__current_host_and_port = None

        self.__receiver_thread_exit_condition = threading.Condition()
        self.__receiver_thread_exited = False
 def __init__(self):
     self._lock = threading.Lock()
     self._cv = threading.Condition(self._lock)
     self._event = None
     self._buffer = array.array('B')
     self._closed = False
Example #10
0
def test() -> None:
    cv = threading.Condition()
    query_result = QueryResult({}, {"stats": {}, "sql": ""})
    mock_query_runner = Mock(return_value=query_result)

    def callback_func(primary: Optional[Tuple[str, QueryResult]],
                      other: List[Tuple[str, QueryResult]]) -> None:
        with cv:
            cv.notify()

    mock_callback = Mock(side_effect=callback_func)

    query_body = {
        "selected_columns": ["type", "project_id"],
        "conditions": [
            ["project_id", "=", 1],
            ["timestamp", ">", "2020-01-01 12:00:00"],
        ],
    }

    events = get_dataset("events")
    query = parse_query(query_body, events)

    errors_pipeline = SimplePipelineBuilder(
        query_plan_builder=SingleStorageQueryPlanBuilder(
            storage=get_storage(StorageKey.ERRORS)), )

    errors_ro_pipeline = SimplePipelineBuilder(
        query_plan_builder=SingleStorageQueryPlanBuilder(
            storage=get_storage(StorageKey.ERRORS_RO)), )

    delegator = PipelineDelegator(
        query_pipeline_builders={
            "errors": errors_pipeline,
            "errors_ro": errors_ro_pipeline,
        },
        selector_func=lambda query, referrer: ("errors", ["errors_ro"]),
        callback_func=mock_callback,
    )

    with cv:
        request_settings = HTTPRequestSettings()
        delegator.build_execution_pipeline(
            Request(
                "",
                query_body,
                query,
                request_settings,
                "ref",
            ),
            mock_query_runner,
        ).execute()
        cv.wait(timeout=5)

    assert mock_query_runner.call_count == 2

    assert mock_callback.call_args == call(
        query,
        request_settings,
        "ref",
        Result("errors", query_result, ANY),
        [Result("errors_ro", query_result, ANY)],
    )
Example #11
0
            s.shutdown(socket.SHUT_WR)

        while True:
            data = s.recv(4096)
            if not data:
                break
            result.append(data)
    finally:
        s.close()

    return "".join(result)


q_stats = Queue.Queue()

p_wakeup = threading.Condition()


def wakeup_poller():
    p_wakeup.acquire()
    p_wakeup.notifyAll()
    p_wakeup.release()


def reset_server_stats(server):
    host, port = server.split(':')
    send_cmd(host, port, "srst\n")


server_id = 0
Example #12
0
 def __init__(self):
     self.__condition = threading.Condition()
     self.__value = None
     self.__exception = None
     self.__complete = False
 def __init__(self):
     self._queue = []
     self._count = 0
     self._cv = threading.Condition()
Example #14
0
 def __init__(self):
     self._condition = threading.Condition()
     self._value = None
     self._called = False
Example #15
0
 def __init__(self):
     super(SendLayer, self).__init__()
     self.ackQueue = []
     self.lock = threading.Condition()
Example #16
0
    SocketServer.TCPServer.allow_reuse_address = True
    server = SocketServer.TCPServer((HOST, PORT), handleConnection)
    srvThrd = threading.Thread(target=server.serve_forever)
    srvThrd.daemon = True
    srvThrd.start()

    global command  # The command APDU
    global response  # The response APDU
    global condCommand  # Condition to wait until a new APDU command arrives
    global condResponse  # Condition to wait until a response is available
    global newCommand  # Flag for the handler that there is a new command
    global processing  # Flag for the run function that the processing has finished
    global err  # Flag for the run function that an error happened

    condCommand = threading.Condition()
    condResponse = threading.Condition()

    select = -1
    while True:
        newCommand = 0
        processing = 0
        command = ""
        response = ""
        err = 0
        while True:  # Check for terminated status
            if (getARD() == True):
                break

        getLoginData()
        getURL()
    def _distribute_command(self, command_seq, index=None):
        """
        parses command type and issues command(s) to the proper browser
        <index> specifies the type of command this is:
        = None  -> first come, first serve
        =  #    -> index of browser to send command to
        = *     -> sends command to all browsers
        = **    -> sends command to all browsers (synchronized)
        """

        # Block if the aggregator queue is too large
        agg_queue_size = self.data_aggregator.get_most_recent_status()
        if agg_queue_size >= AGGREGATOR_QUEUE_LIMIT:
            while agg_queue_size >= AGGREGATOR_QUEUE_LIMIT:
                self.logger.info(
                    "Blocking command submission until the DataAggregator "
                    "is below the max queue size of %d. Current queue "
                    "length %d. " % (AGGREGATOR_QUEUE_LIMIT, agg_queue_size))
                agg_queue_size = self.data_aggregator.get_status()

        # Distribute command
        if index is None:
            # send to first browser available
            command_executed = False
            while True:
                for browser in self.browsers:
                    if browser.ready():
                        browser.current_timeout = command_seq.total_timeout
                        thread = self._start_thread(browser, command_seq)
                        command_executed = True
                        break
                if command_executed:
                    break
                time.sleep(SLEEP_CONS)

        elif index == '*':
            # send the command to all browsers
            command_executed = [False] * len(self.browsers)
            while False in command_executed:
                for i in range(len(self.browsers)):
                    if self.browsers[i].ready() and not command_executed[i]:
                        self.browsers[
                            i].current_timeout = command_seq.total_timeout
                        thread = self._start_thread(self.browsers[i],
                                                    command_seq)
                        command_executed[i] = True
                time.sleep(SLEEP_CONS)
        elif index == '**':
            # send the command to all browsers and sync it
            condition = threading.Condition()  # block threads until ready
            command_executed = [False] * len(self.browsers)
            while False in command_executed:
                for i in range(len(self.browsers)):
                    if self.browsers[i].ready() and not command_executed[i]:
                        self.browsers[
                            i].current_timeout = command_seq.total_timeout
                        thread = self._start_thread(self.browsers[i],
                                                    command_seq, condition)
                        command_executed[i] = True
                time.sleep(SLEEP_CONS)
            with condition:
                condition.notifyAll()  # All browsers loaded, start
        elif 0 <= index < len(self.browsers):
            # send the command to this specific browser
            while True:
                if self.browsers[index].ready():
                    self.browsers[
                        index].current_timeout = command_seq.total_timeout
                    thread = self._start_thread(self.browsers[index],
                                                command_seq)
                    break
                time.sleep(SLEEP_CONS)
        else:
            self.logger.info(
                "Command index type is not supported or out of range")
            return

        if command_seq.blocking:
            thread.join()
            self._check_failure_status()
Example #18
0
class MiGTCPServer(Whitelist, SocketServer.ThreadingMixIn,
    SocketServer.TCPServer):
    """An extension of TcpServer adding:
  
    * Threading (mix-in)
    * Whitelisting (mix-in + server_bind)
    * FQDN/Hostname extraction (mix-in + verify_request)
    * Contains lists of proxy agents and application sockets
    * TLS, optional TLS socket wrapping:
      - enabling: tls_conf = {key='path', cert='path'}
      - disabling: tls_conf = None
    """

    count = 0

    connections = {}
    connectionLock = threading.Lock()
    connectionCondition = threading.Condition(connectionLock)

    proxy_agents = {}
    proxyLock = threading.Lock()
    proxyCondition = threading.Condition(proxyLock)

    allow_reuse_address = 1  # Mostly for testing purposes

    def __init__(
        self,
        server_address,
        RequestHandlerClass,
        tls_conf=None,
        ):
        """Constructor overwritten to initialize TLS"""

        SocketServer.BaseServer.__init__(self, server_address,
                RequestHandlerClass)

        self.tls_conf = tls_conf
        configuration = get_configuration_object()
        if configuration.user_vmproxy_key:
            keyfile = certfile = configuration.user_vmproxy_key
            dhparamsfile = configuration.user_shared_dhparams
            ssl_ctx = hardened_openssl_context(configuration, OpenSSL, keyfile,
                                               certfile,
                                               dhparamsfile=dhparamsfile)
            self.socket = OpenSSL.SSL.Connection(ssl_ctx,
                    socket.socket(self.address_family,
                    self.socket_type))
        else:
            self.socket = socket.socket(self.address_family,
                    self.socket_type)

        self.server_bind()
        self.server_activate()

    def verify_request(self, request, client_address):
        """verify_request,
        Extended to provide whitelisting features.
        """

        # return self.peerAllowed(client_address)

        return True

    def server_bind(self):
        """server_bind,
      
        Extended for hostname extraction, hostname is used in http servers,
        vnc servers and many others, so it is conveniently added in this generic class.
        """

        SocketServer.TCPServer.server_bind(self)
        (host, port) = self.socket.getsockname()[:2]

        self.server_host = host
        self.server_name = socket.getfqdn(host)
        self.server_port = port
        logging.debug('%s Listening on %d, handling %s' % (self,
                      self.server_port, self.RequestHandlerClass))
 def __init__(self) -> None:
     self.__log: logging.Logger = logging.getLogger("root.threadly")
     self.__run: List[SimpleJob] = list()
     self.lock: threading.Condition = threading.Condition()
     self.in_queue: bool = False
Example #20
0
    def __init__(self,
                 role,
                 listen_port,
                 remote_address,
                 app_id=None,
                 rank=0,
                 streaming_mode=True,
                 compression=grpc.Compression.NoCompression):
        self._role = role
        self._listen_port = listen_port
        self._remote_address = remote_address
        if app_id is None:
            app_id = 'test_trainer'
        self._app_id = app_id
        self._rank = rank
        self._streaming_mode = streaming_mode
        self._compression = compression

        self._prefetch_handlers = []
        self._data_block_handler_fn = None

        # Connection related
        self._connected = False
        self._connected_at = 0
        self._terminated = False
        self._terminated_at = 0
        self._peer_terminated = False
        self._identifier = '%s-%s-%d-%d' % (app_id, role, rank, int(
            time.time()))  # Ensure unique per run
        self._peer_identifier = ''

        # data transmit
        self._condition = threading.Condition()
        self._current_iter_id = None
        self._next_iter_id = 0
        self._peer_next_iter_id = 0
        self._received_data = {}

        # grpc client
        self._transmit_send_lock = threading.Lock()
        self._client_lock = threading.Lock()
        self._grpc_options = [('grpc.max_send_message_length', 2**31 - 1),
                              ('grpc.max_receive_message_length', 2**31 - 1)]
        self._channel = make_insecure_channel(remote_address,
                                              ChannelType.REMOTE,
                                              options=self._grpc_options,
                                              compression=self._compression)
        self._client = tws_grpc.TrainerWorkerServiceStub(self._channel)
        self._next_send_seq_num = 0
        self._transmit_queue = _MessageQueue()
        self._client_daemon = None
        self._client_daemon_shutdown_fn = None

        # server
        self._transmit_receive_lock = threading.Lock()
        self._next_receive_seq_num = 0
        self._server = grpc.server(futures.ThreadPoolExecutor(max_workers=10),
                                   options=self._grpc_options,
                                   compression=self._compression)
        tws_grpc.add_TrainerWorkerServiceServicer_to_server(
            Bridge.TrainerWorkerServicer(self), self._server)
        self._server.add_insecure_port('[::]:%d' % listen_port)
Example #21
0
 def __init__(self):
     self._condition = threading.Condition()
     self._values = []
     self._open = True
Example #22
0
 def __init__(self, window_size=100):
     super(_MessageQueue, self).__init__()
     self._window_size = window_size
     self._condition = threading.Condition()
     self._queue = collections.deque()
     self._next = 0
Example #23
0
 def __init__(self):
     self.m = threading.Condition()
     self.count = 0
Example #24
0
 def __init__(self, balance=0):
     self.balance = balance
     lock = threading.Lock()
     self.condition = threading.Condition(lock)
Example #25
0
 def __init__(self):
     self._condition = threading.Condition()
     self._result = None
     self._done = False
     self._done_callbacks = []
    def __init__(self,
                 cmd=Cmd(),
                 dataSocket=None,
                 ctrlSocket=None,
                 nSamples=16384,
                 tms1mmReg=TMS1mmX19Config.TMS1mmReg(),
                 sigproc=None):

        self.cmd = cmd
        self.dataSocket = dataSocket
        self.ctrlSocket = ctrlSocket
        self.dataFName = ["adc.dat", "sdm.dat"]
        # number of chips
        self.nCh = 19
        self.nAdcCh = 20
        self.adcSdmCycRatio = 5
        self.nSamples = nSamples
        self.nWords = 512 / 32 * self.nSamples
        # signal processor
        if (not sigproc):
            self.sigproc = SigProc(self.nSamples, self.nAdcCh, self.nCh,
                                   self.adcSdmCycRatio)
        # adc sampling interval in us
        self.adcDt = 0.2
        #         self.adcData0 = [[i*0.0001 for i in xrange(self.nSamples)] for j in xrange(self.nAdcCh)]
        self.adcData0 = self.sigproc.generate_adcDataBuf()
        self.adcData = self.sigproc.generate_adcDataBuf(
        )  # ((ctypes.c_float * self.nSamples) * self.nAdcCh)()
        self.sdmData = self.sigproc.generate_sdmDataBuf(
        )  # ((ctypes.c_byte * (self.nSamples*self.adcSdmCycRatio)) * (self.nCh*2))()
        # size equals FPGA internal data fifo size
        self.sampleBuf = bytearray(4 * self.nWords)
        # number of voltages in a sensor to control
        self.nVolts = 6
        # update time interval (second)
        self.tI = 0.5
        #
        self.x2gain = 2
        self.bufferTest = 0
        self.sdmMode = 0  # 0 : disabled, 1 : normal operation, 2 : test with signal injection
        self.aoutBuf = 0  # 0 : AOUT1, 1 : AOUT2, >1 : disable both
        #
        self.voltsNames = [
            'VBIASN', 'VBIASP', 'VCASN', 'VCASP', 'VDIS', 'VREF'
        ]
        self.cv = threading.Condition()  # condition variable
        ########################################< cv protected <
        self.quit = False
        self.vUpdated = False
        #
        self.inputVs = [1.379, 1.546, 1.626, 1.169, 1.357, 2.458]
        self.inputVcodes = [tms1mmReg.dac_volt2code(v) for v in self.inputVs]
        # measured and returned values, not used but displayed
        self.voltsOutput = [0.0 for i in xrange(self.nVolts)]
        self.inputIs = [0.0 for i in xrange(self.nVolts)]
        #
        self.currentSensor = 0
        self.sensorVcodes = [[v for v in self.inputVcodes]
                             for i in xrange(self.nCh)]
        ########################################> cv protected >
        self.tms1mmReg = tms1mmReg

        self.isGood = [None] * self.nCh
        self.getCurrentBest()
Example #27
0
 def __init__(self):
     self._cond = threading.Condition()
     self._busy = False
Example #28
0
 def __init__(self, count=1):
     self.count = count
     self.lock = threading.Condition()
Example #29
0
 def __init__(self, eye):
     State.__init__(self, eye)
     self.condition = threading.Condition()
     thread.start_new_thread(self.start_server, ())
     self.shadow = False
Example #30
0
 def __init__(self, f, delay=1):
     self.f=f
     self.delay=delay
     self.aborted=False
     self.cond=threading.Condition()
     super(Delay, self).__init__()