Exemple #1
0
def interval_host(host, time, f, *args, **kwargs):
    '''
    Creates an Event attached to the *host* that will execute the *f*
    function every *time* seconds.

    See example in :ref:`sample_inter`

    :param Proxy host: proxy of the host. Can be obtained from inside a
        class with ``self.host``.
    :param int time: seconds for the intervals.
    :param func f: function to be called every *time* seconds.
    :param list args: arguments for *f*.
    :return: :class:`Event` instance of the interval.
    '''
    def wrap(*args, **kwargs):
        thread = currentThread()
        args = list(args)
        stop_event = args[0]
        del args[0]
        args = tuple(args)
        while not stop_event.is_set():
            f(*args, **kwargs)
            stop_event.wait(time)
        host.detach_interval(thread_id)
    t2_stop = Event()
    args = list(args)
    args.insert(0, t2_stop)
    args = tuple(args)
    t = Thread(target=wrap, args=args, kwargs=kwargs)
    t.start()
    thread_id = t.getName()
    host.attach_interval(thread_id, t2_stop)
    return t2_stop
Exemple #2
0
def main():
    """
    Main function of the proxy scanner.
    """
    global pl, output, q

    parser = ArgumentParser(description='Scans a list of proxies to determine which work for HTTPS.')
    parser.add_argument('--output', default='output/proxies.txt', type=str,
        help='The file in which to store the found proxies.')
    parser.add_argument('--threads', default=10, type=int,
        help='Number of threads to use.')

    args = parser.parse_args()
    output = args.output

    threads = args.threads
    q = Queue(threads * 3)

    print 'Starting threads.'
    for x in xrange(threads):
        t = Thread(target=check_proxies)
        t.daemon = True
        t.start()

    print 'Queueing proxies.'
    for proxy in proxies.proxies:
        q.put(proxy)
    q.join()

    save_proxies()
    def __init__(self):
        self.title = self.empty_title

        # we are listening to i3 events in a separate thread
        t = Thread(target=self._loop)
        t.daemon = True
        t.start()
Exemple #4
0
def do_main_program():
	
	do_scheduler()

	# read config and init sensors
	
	global sensors
	sensors = config.readConfig()
	
	logger.debug(sensors.keys())

	
	
	
	threadHTTP = Thread(target=inetServer.threadHTTP)
	threadHTTP.setDaemon(True)
	threadHTTP.start()

	
	while 1:
		try:
			time.sleep(0.1)
		except KeyboardInterrupt:
			print >> sys.stderr, '\nExiting by user request.\n'
			sys.exit(0)
Exemple #5
0
	def loop(self):
		if self.authorization_failed: return
		super(GetworkSource, self).loop()

		thread = Thread(target=self.long_poll_thread)
		thread.daemon = True
		thread.start()

		while True:
			if self.should_stop: return

			if self.check_failback():
				return True

			try:
				with self.switch.lock:
					miner = self.switch.updatable_miner()
					while miner:
						work = self.getwork()
						self.queue_work(work, miner)
						miner = self.switch.updatable_miner()

				self.process_result_queue()
				sleep(1)
			except Exception:
				say_exception("Unexpected error:")
				break
Exemple #6
0
 def do_mount(self, args):
     if self.dbg:
         print("DEBUG: Mount called")
     if self.fuse_enabled is False:
         print("ERROR: Only able to mount on systems with fuse installed")
         return
     from .File_System import memfuse
     self.fuseFS = memfuse.MemFS(self.fs)
     if self.dbg:
         print("DEBUG: MemFS has been created")
     newDir = False
     if not os.path.exists(self.mp):
         os.makedirs(self.mp)
         newDir = True
     if self.dbg:
         print("DEBUG: Mountpoint has been prepared")
     window = Thread(target=self.open_window)
     monitor = Thread(target=self.background_upload)
     window.start()
     if self.dbg:
         print("DEBUG: Window opened, about to mount")
     self.mounted = True
     monitor.start()
     memfuse.mount(self.fuseFS, self.mp)
     self.mounted = False
     if newDir:
         os.rmdir(self.mp)
     if not self.cmdloopused:
         print("STATUS: Uploading all files to online")
         self.do_uploadfs(self)
def runWorker(w, args, checklist, quithooks, queue=None):
    global killed
    global workers
    
    thrdRun = Thread(target=run, args=(w, args))
    thrdRun.daemon = True
    
    thrdRun.start()
    time.sleep(1)
    
    while (w.hasLines() or not w.isDone()) and not killed:
        line = w.nextLine()
        
        if line != None:
            message("Line", "Out", line.strip(), False)
            logLine(line)
            
            if maintainChecklist(w, checklist, line):
                if isChecklistDone(checklist):
                    message(w.fullName(), "Run complete", "Moving into background")
                    break
            else:
                if checkQuitHooks(w, quithooks, line):
                    globalKill()
        
        if queue:
            try:
                queued_command = queue.get_nowait()
                
                if queued_command == "quit":
                    w.kill()
                    queue.put("ok")
            except Empty:
                pass
Exemple #8
0
def run(data, d=None):
	global timer
	print(data)

	if data == "ring":
		if int(d) == 1:
			if timer:
				timer.cancel()

			timer = Timer(60, genugdavon)
			timer.start()

			check = Thread(target=check_locks)
			check.start()

			putfile('/sys/class/gpio/gpio11/value', '0')

			playsound("/root/ring-bb.wav")
		else:
			playsound("/root/ring-fis.wav")
	

	if data == "open":
#		if not locked_oben:
		playsound("/root/open.wav")
	
	if data == "summ":
		if timer:
			timer.cancel()

#		if not locked_oben:
		putfile('/sys/class/gpio/gpio11/value', '1')
		playsound("/root/summ.wav")
Exemple #9
0
class AntiFlapping(object):
    """
    AntiFlapping class to process event in a timely maneer
    """
    def __init__(self, window):
        self.window = window
        self.tasks = Queue(maxsize=1)
        self._window_ended = True
        self._thread = Thread(name="AntiFlapping", target=self._run)
        self._thread.start()

    def newEvent(self, func, kwargs={}):
        """
        newEvent Triggered.
        """
        if not self.tasks.full() and self._window_ended:
            self.tasks.put({'func': func, 'args':kwargs})

    def _run(self):
        """
        internal runloop that will fire tasks in order.
        """
        while True:
            task = self.tasks.get()
            self._window_ended = False
            sleep(self.window)
            self._window_ended = True
            if task['args']:
                task['func'](**task['args'])
            else:
                task['func']()
 def listen_threaded(cls, port, callback):
     """ Starts a new thread listening to the given port """
     thread = Thread(target=cls.__listen_forever, args=(port, callback),
                     name="NC-ListenForever")
     thread.setDaemon(True)
     thread.start()
     return thread
Exemple #11
0
    def __init__(self, n_threads):

        self.n_threads = n_threads

        self._running = True

        self.input_queues = []
        self.output_queues = []
        self.exception_queues = []
        self.threads = []
        for i in range(n_threads):
            input_queue = queue.Queue()
            output_queue = queue.Queue()
            exception_queue = queue.Queue()
            thread = Thread(
                    target=self._thread_func,
                    args=(input_queue, output_queue, exception_queue))
            thread.daemon = True

            self.input_queues.append(input_queue)
            self.output_queues.append(output_queue)
            self.exception_queues.append(exception_queue)
            self.threads.append(thread)

            thread.start()
    def getSaveSymbols(self):
        ''' get and save data '''
        counter = 0
        rounds = 0

        while counter < len(self.symbols):
            size = len(self.symbols) - counter
            if BATCH_SIZE < size:
                size = BATCH_SIZE
            symbols = self.symbols[counter: counter + size]

            threads = []
            for symbol in symbols:
                thread = Thread(name = symbol, target = self.__getSaveOneSymbol, args = [symbol])
                thread.daemon = True
                thread.start()

                threads.append(thread)

            for thread in threads:
                thread.join(THREAD_TIMEOUT) # no need to block, because thread should complete at last

            #can't start another thread to do commit because for sqlLite, only object for the same thread can be commited
            if 0 == rounds % 3:
                self.outputDAM.commit()

            counter += size
            rounds += 1
 def send_async(cls, port, message):
     """ Starts a new thread which sends a given message to a port """
     thread = Thread(target=cls.__send_message_async, args=(port, message),
                     name="NC-SendAsync")
     thread.setDaemon(True)
     thread.start()
     return thread
Exemple #14
0
def rpc_server(handler, address, authkey):
    sock = Listener(address, authkey=authkey)
    while True:
        client = sock.accept()
        t = Thread(target=handler.handle_connection, args=(client,))
        t.daemon = True
        t.start()
Exemple #15
0
def Watch( Module, Callback, * Args, ** Kwds ):
  if Module.__file__ in WatchList:
    return

  T = Thread(target = WatchThread, args=(Module,Callback,Args,Kwds))
  T.setDaemon(True)
  T.start()
Exemple #16
0
    def get_stock(self):
        self.sku = self.sku_input.get().strip()
        self.client_id = self.client_id_input.get().strip()
        self.cycle = 1

        region = self.region_input.get()

        if self.refresh_var.get():
            validity = self.refresh_is_valid()
            if not validity[0]:
                self.update_status(validity[1])
                return
            else:
                amt = int(self.refresh_amt_input.get())
                dly = float(self.refresh_dly_input.get())

                if amt <= 0:
                    self.update_status('Please enter a non-zero/negative amount.')
                    return
                if dly < 0:
                    self.update_status('Please enter a non-negative delay.')
                    return
        else:
            amt = 1
            dly = 1

        thread = Thread(name=self.sku+'_'+region, target=self.check_stock, args=[region, amt, dly])
        thread.daemon = True
        thread.start()

        self.disable_submit()
Exemple #17
0
class NonBlockingStreamReader:
    def __init__(self, stream):
        '''
        stream: the stream to read from.
                Usually a process' stdout or stderr.
        '''

        self._s = stream
        self._q = Queue()

        def _populateQueue(stream, queue):
            '''
            Collect lines from 'stream' and put them in 'quque'.
            '''

            while True:
                line = stream.readline()
                if line:
                    queue.put(line)
                else:
                    break

        self._t = Thread(target = _populateQueue,
                         args = (self._s, self._q))
        self._t.daemon = True
        self._t.start() #start collecting lines from the stream

    def readline(self, timeout = None):
        try:
            return self._q.get(block = timeout is not None,
                               timeout = timeout)
        except Empty:
            return None
Exemple #18
0
	def __init__(self, settings, on_loaded = lambda: None):
		self.__loaded = False
		self.__on_loaded = on_loaded
		self.__sp = self.__open(settings)
		t = Thread(target=self.__consume_beginning)
		t.daemon = True
		t.start()
Exemple #19
0
    def looped(window, *args, **kwargs):
        if hasattr(linux_native_dialog, 'native_failed'):
            import importlib
            m = importlib.import_module('calibre.gui2.qt_file_dialogs')
            qfunc = getattr(m, 'choose_' + name)
            return qfunc(window, *args, **kwargs)
        try:
            if window is None:
                return func(window, *args, **kwargs)
            ret = [None, None]
            loop = QEventLoop(window)

            def r():
                try:
                    ret[0] = func(window, *args, **kwargs)
                except:
                    ret[1] = sys.exc_info()
                    sys.exc_clear()
                while not loop.isRunning():
                    time.sleep(0.001)  # yield so that loop starts
                loop.quit()
            t = Thread(name='FileDialogHelper', target=r)
            t.daemon = True
            t.start()
            loop.exec_(QEventLoop.ExcludeUserInputEvents)
            if ret[1] is not None:
                raise ret[1][0], ret[1][1], ret[1][2]
            return ret[0]
        except Exception:
            linux_native_dialog.native_failed = True
            import traceback
            traceback.print_exc()
            return looped(window, *args, **kwargs)
Exemple #20
0
class TestStatsdLoggingDelegation(unittest.TestCase):
    def setUp(self):
        self.port = 9177
        self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        self.sock.bind(('localhost', self.port))
        self.queue = Queue()
        self.reader_thread = Thread(target=self.statsd_reader)
        self.reader_thread.setDaemon(1)
        self.reader_thread.start()

    def tearDown(self):
        # The "no-op when disabled" test doesn't set up a real logger, so
        # create one here so we can tell the reader thread to stop.
        if not getattr(self, 'logger', None):
            self.logger = utils.get_logger({
                'log_statsd_host': 'localhost',
                'log_statsd_port': str(self.port),
            }, 'some-name')
        self.logger.increment('STOP')
        self.reader_thread.join(timeout=4)
        self.sock.close()
        del self.logger
        time.sleep(0.15)  # avoid occasional "Address already in use"?

    def statsd_reader(self):
        while True:
            try:
                payload = self.sock.recv(4096)
                if payload and 'STOP' in payload:
                    return 42
                self.queue.put(payload)
            except Exception, e:
                sys.stderr.write('statsd_reader thread: %r' % (e,))
                break
Exemple #21
0
 def start(self, *args):
     """ Creates a subprocess of nping and invokes it
         with the given arguments."""
     cline = [BotConfig.NPING_PATH]
     cline = cline + args[0]
     t = Thread(target=self.__run, args=[cline])
     t.start()
Exemple #22
0
def tasks_view():
    # delete task
    if request.args.get('delete'):
        task_id = request.args.get('delete')
        connectiondb(tasks_db).delete_one({'_id': ObjectId(task_id)})
        connectiondb(vul_db).update({'task_id': ObjectId(task_id)}, {"$set": {"tag": "delete"}}, multi=True)
        return "success"
    # rescan
    elif request.args.get('rescan'):
        task_id = request.args.get('rescan')
        connectiondb(tasks_db).update_one({'_id': ObjectId(task_id)}, {'$set': {'task_status': 'Preparation'}})
        if connectiondb(vul_db).find_one({"task_id": ObjectId(task_id)}):
            connectiondb(vul_db).update({'task_id': ObjectId(task_id)}, {"$set": {"tag": "delete"}}, multi=True)
        try:
            scanner = PocsuiteScanner(ObjectId(task_id))
            t1 = Thread(target=scanner.set_scanner, args=())
            t1.start()
            return "success"
        except Exception as e:
            raise e

    # get task info for edit (get)
    elif request.args.get('edit'):
        task_id = request.args.get('edit')
        task_edit_data = connectiondb(tasks_db).find_one({'_id': ObjectId(task_id)})
        task_edit_data_json = {
            'task_name': task_edit_data['task_name'],
            'scan_target': '\n'.join(task_edit_data['scan_target']),
        }
        return jsonify(task_edit_data_json)

    # default task view
    task_data = connectiondb(tasks_db).find().sort('end_date', -1)
    return render_template('task-management.html', task_data=task_data)
Exemple #23
0
    def eject_windows(self):
        from calibre.constants import plugins
        from threading import Thread
        winutil, winutil_err = plugins['winutil']
        drives = []
        for x in ('_main_prefix', '_card_a_prefix', '_card_b_prefix'):
            x = getattr(self, x, None)
            if x is not None:
                drives.append(x[0].upper())

        def do_it(drives):
            for d in drives:
                try:
                    winutil.eject_drive(bytes(d)[0])
                except Exception as e:
                    try:
                        prints("Eject failed:", as_unicode(e))
                    except:
                        pass

        def do_it2(drives):
            import win32process
            subprocess.Popen([eject_exe()] + drives, creationflags=win32process.CREATE_NO_WINDOW).wait()

        t = Thread(target=do_it2, args=[drives])
        t.daemon = True
        t.start()
        self.__save_win_eject_thread = t
def footpedal_handler(data):
    global instruments
    global selected_instrument_index

    left_pedal_pressed = data[1] & 1
    middle_pedal_pressed = data[1] & 2
    right_pedal_pressed = data[1] & 4

    if (left_pedal_pressed):
        # Release the currently selected instrument.
        # This will cause the run_instrument background thread for that instrument
        # to exit because the instrument handle will be invalid.
        current_instrument = instruments[selected_instrument_index]
        current_instrument.release();

        # Cycle to the next instrument
        selected_instrument_index = (selected_instrument_index + 1) % len(instruments)

        # Once we have our new instrument, acquire a handle and start using it.
        current_instrument = instruments[selected_instrument_index]
        current_instrument.acquire()
        thread = Thread(target=background_worker, args=(current_instrument,))
        thread.start()
    if (middle_pedal_pressed or right_pedal_pressed) :
        text_to_speech_async("pedal functionality not defined")
Exemple #25
0
class Client(QObject):

    new_message = pyqtSignal(str)

    def __init__(self, username, server="localhost", send_port="5557", recv_port="5556"):
        QObject.__init__(self)
        self.username = username
        self.context = zmq.Context()

        #Sender socket using PULL-PUSH pattern
        self.sender_socket = self.context.socket(zmq.PUSH)
        self.sender_socket.connect("tcp://{0}:{1}".format(server, send_port))

        #Receiver socket using SUB_PUB pattern
        self.receiver_socket = self.context.socket(zmq.SUB)
        self.receiver_socket.connect("tcp://{0}:{1}".format(server, recv_port))
        self.receiver_socket.setsockopt(zmq.SUBSCRIBE, b"")

        #Start receiver thread
        self.recv_thread = Thread(target=self.receive, daemon=True)
        self.recv_thread.start()

    def receive(self):
        while True:
            #Get the publisher's message
            message = self.receiver_socket.recv()
            self.new_message.emit(str(message))

    def send(self, message):
        message = "{0} says: {1}".format(self.username, message)
        self.sender_socket.send(message.encode())
def run(**kwargs):

    thread_queue = []
    for provider in list_providers("openstack"):
        mgmt_sys = cfme_data['management_systems'][provider]
        rhos_credentials = credentials[mgmt_sys['credentials']]
        default_host_creds = credentials['host_default']

        username = rhos_credentials['username']
        password = rhos_credentials['password']
        auth_url = mgmt_sys['auth_url']
        rhosip = mgmt_sys['ipaddress']
        sshname = default_host_creds['username']
        sshpass = default_host_creds['password']
        if not net.is_pingable(rhosip):
            continue
        if not net.net_check(ports.SSH, rhosip):
            print("SSH connection to {}:{} failed, port unavailable".format(
                provider, ports.SSH))
            continue
        thread = Thread(target=upload_template,
                        args=(rhosip, sshname, sshpass, username, password, auth_url, provider,
                              kwargs.get('image_url'), kwargs.get('template_name')))
        thread.daemon = True
        thread_queue.append(thread)
        thread.start()

    for thread in thread_queue:
        thread.join()
Exemple #27
0
    def _move(self, pos, velocity, acceleration, deceleration, relative, block, units):
        if velocity is None:
            velocity = self.initial_velocity

        if acceleration is None:
            acceleration = self.acceleration
        if deceleration is None:
            deceleration = self.deceleration

        pos = self._get_steps(pos, units)
        self.debug('converted steps={}'.format(pos))

        def func():
            self.set_initial_velocity(velocity)
            self.set_acceleration(acceleration)
            self.set_deceleration(deceleration)

            cmd = 'MR' if relative else 'MA'
            self.tell('{} {}'.format(cmd, pos))
            self._block()

        if block:
            func()
            return True
        else:
            t = Thread(target=func)
            t.setDaemon(True)
            t.start()
            return True
Exemple #28
0
def checkTimeOutPut(args):
    global currCommandProcess
    global stde
    global stdo
    stde = None
    stdo = None
    def executeCommand():
        global currCommandProcess
        global stdo
        global stde
        try:
            stdo, stde = currCommandProcess.communicate()
            printLog('stdout:\n'+str(stdo))
            printLog('stderr:\n'+str(stde))
        except:
            printLog("ERROR: UNKNOWN Exception - +checkWinTimeOutPut()::executeCommand()")

    currCommandProcess = subprocess.Popen(args,stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    thread = Thread(target=executeCommand)
    thread.start()
    thread.join(TIMOUT_VAL) #wait for the thread to complete 
    if thread.is_alive():
        printLog('ERROR: Killing the process - terminating thread because it is taking too much of time to execute')
        currCommandProcess.kill()
        printLog('ERROR: Timed out exception')
        raise errorHandler.ApplicationException(__file__, errorHandler.TIME_OUT)
    if stdo == "" or stdo==None:
        errCode = currCommandProcess.poll()
        printLog('ERROR: @@@@@Raising Called processor exception')
        raise subprocess.CalledProcessError(errCode, args, output=stde)
    return stdo
Exemple #29
0
class Pulse:
    def __init__(self, consumer_key, consumer_secret, access_token, access_token_secret, geotags):
        self.consumer_key = consumer_key
        self.consumer_secret = consumer_secret
        self.access_token = access_token
        self.access_token_secret = access_token_secret
        self.geotags = geotags
    def start(self):
        self.queue = PriorityQueue()
        self.consumer = Thread(target = consume, args = (self.queue, ))
        self.consumer.start()
        self.operater = Thread(target = operate, args = (self.queue, ))
        self.operater.start()
        self.canary = canary.Canary(self.consumer_key, self.consumer_secret,\
                                    self.access_token, self.access_token_secret)
        self.canary.queue = self.queue
        def onData(canary, data):
            try:
                timestamp = time.time()
                twit = json.loads(data)
                if 'coordinates' not in twit or not twit['coordinates']:
                    return
                try:
                    tweet = {}
                    tweet['timestamp'] = timestamp
                    tweet['text'] = twit['text']
                    tweet['id'] = str(twit['id'])#does this actually work? must test!
                    tweet['latlong'] = twit['coordinates']['coordinates'][::-1]#twitter returns these flipped
                    self.canary.queue.put((0-timestamp, tweet))
                except Exception,e:
                    traceback.print_exc()
            except Exception,e:
                traceback.print_exc()
        self.canary.onData = onData
        self.canary.startStream(self.geotags)#may not work???
Exemple #30
0
    def run_windows_service(config):
        """ Initialize the web application and run it in the background, while
            displaying a small system tray icon to interact with the it.
        """
        import webbrowser
        from winservice import SysTrayIcon

        app = WebApplication(config)
        server_thread = Thread(target=app.run_server)
        server_thread.start()

        def on_quit(systray):
            """ Callback that stops the application. """
            IOLoop.instance().stop()
            server_thread.join()

        listening_port = config['web']['port'].get(int)
        open_browser = (
            lambda x: webbrowser.open_new_tab("http://127.0.0.1:{0}"
                                              .format(listening_port)))
        menu_options = (('Open in browser', None, open_browser),)

        SysTrayIcon(
            icon=os.path.join(os.path.dirname(sys.argv[0]), 'spreads.ico'),
            hover_text="Spreads Web Service",
            menu_options=menu_options,
            on_quit=on_quit,
            default_menu_index=1,
            on_click=open_browser)
Exemple #31
0
class TreeSplitter():
    '''
    用于将输入参数形如(turtle [, *args [, **kwargs]])的turtle递归函数多线程化
    实例化后为函数装饰器形式
    param turtle_gen : 生成turtle的函数
    max_turtle : 最大同时运行turtle数目
    stack_like : 线程池是否为LIFO,默认为否
    helper_func : 主循环线程的伴随函数
    '''

    ### 装饰器初始化
    def __init__(self,
                 turtle_gen,
                 max_turtle=1,
                 stack_like=False,
                 helper_func=None):
        '''
        初始化装饰器对象
        '''
        # 线程池
        self._sprouts = dq()
        self._sprout_stack = stack_like

        # 记录当前运行turtle
        self._running_turtles = 0
        self._turtle_count = max_turtle
        self._qlock = Lock()

        # 用于调用的turtle队列
        self._turtles = dq()
        for i in range(max_turtle):
            self._turtles.append(turtle_gen())

        # 记录主循环线程是否已在运行
        self._looping = False

        # 与主循环同时运行的伴随线程
        self._helper_func = helper_func

    ### turtle队列资源分配部分
    def _acquire_turtle(self):
        '''
        获取一个turtle,将工作计数加1
        '''
        with self._qlock:
            self._running_turtles += 1
            turtle_branch = self._turtles.pop()
        return turtle_branch

    def _return_turtle(self, turtle_branch):
        '''
        归还一个turtle至队列,将工作计数减1
        turtle_branch : 归还的turtle
        '''
        with self._qlock:
            self._turtles.append(turtle_branch)
            self._running_turtles -= 1

    ### 函数装饰部分
    def _wrapper_inner(self, func, *args, **kwargs):
        '''
        对原函数的内层包装
        用于由队列获取turtle并更新运行线程数目
        '''
        # 由队列获取一个turtle
        turtle_branch = self._acquire_turtle()

        # 以原参数运行被修饰函数
        func(turtle_branch, *args, **kwargs)

        # 归还turtle
        self._return_turtle(turtle_branch)

    def __call__(self, func):
        '''
        修饰器函数
        被调用时返回目标函数的修饰结果
        func : 原函数
        '''
        def wrapper_outer(dummy, *args, **kwargs):
            '''
            外层包装函数
            dummy : 不接收原始的turtle参数,使用内部分配的turtle
            *args, **kwargs : 其余参数
            '''
            # 向线程池中添加内层包装函数,参数为目标函数与原参数
            sprout = Thread(target=self._wrapper_inner,
                            args=(func, *args),
                            kwargs=kwargs)
            self._sprouts.append(sprout)

            # 启动循环线程,并添加至主进程
            if not self._looping:
                self._looping = True
                self._loopthr = Thread(target=self._mainloop)
                self._loopthr.setDaemon(True)
                self._loopthr.start()

                # 启动伴随线程
                if self._helper_func:
                    self._helper_thr = Thread(target=self._helper_func,
                                              args=(self._loopthr, ))
                    self._helper_thr.setDaemon(True)
                    self._helper_thr.start()

        return wrapper_outer

    ### 循环线程部分
    def _mainloop(self):
        '''
        启动后循环检查线程池与运行线程,并以最大线程数运行
        线程池清空后隐藏所有turtle
        '''
        # 进入主循环
        print('%d parallel thread(s) loaded' % self._turtle_count)
        while self._running_turtles > 0 or len(self._sprouts) > 0:
            # 等待可用线程且当前线程数小于最大线程数
            if self._running_turtles >= self._turtle_count or len(
                    self._sprouts) == 0:
                sleep(0.005)
                continue

            # 线程池按照栈或者队列形式运行
            if self._sprout_stack == 'random':
                if rnd() < 0.5:
                    thr = self._sprouts.pop()
                else:
                    thr = self._sprouts.popleft()
            elif self._sprout_stack:
                thr = self._sprouts.pop()
            else:
                thr = self._sprouts.popleft()

            # 开始线程
            thr.start()

        # 隐藏所有turtle,展示结果
        print('Done.')
        for t in self._turtles:
            t.hideturtle()
        t.screen.update()
Exemple #32
0
class JobQueue(object):
    """This class allows you to periodically perform tasks with the bot.

    Attributes:
        queue (PriorityQueue):
        bot (Bot):

    Args:
        bot (Bot): The bot instance that should be passed to the jobs

    Deprecated: 5.2
        prevent_autostart (Optional[bool]): Thread does not start during initialisation.
        Use `start` method instead.
    """
    def __init__(self, bot, prevent_autostart=None):
        if prevent_autostart is not None:
            warnings.warn(
                "prevent_autostart is being deprecated, use `start` method instead."
            )

        self.queue = PriorityQueue()
        self.bot = bot
        self.logger = logging.getLogger(self.__class__.__name__)
        self.__start_lock = Lock()
        self.__next_peek_lock = Lock(
        )  # to protect self._next_peek & self.__tick
        self.__tick = Event()
        self.__thread = None
        """:type: Thread"""
        self._next_peek = None
        """:type: float"""
        self._running = False

    def put(self, job, next_t=None):
        """Queue a new job.

        Args:
            job (Job): The ``Job`` instance representing the new job
            next_t (Optional[float]): Time in seconds in which the job should be executed first.
                Defaults to ``job.interval``

        """
        job.job_queue = self

        if next_t is None:
            next_t = job.interval

        now = time.time()
        next_t += now

        self.logger.debug('Putting job %s with t=%f', job.name, next_t)
        self.queue.put((next_t, job))

        # Wake up the loop if this job should be executed next
        self._set_next_peek(next_t)

    def _set_next_peek(self, t):
        """
        Set next peek if not defined or `t` is before next peek.
        In case the next peek was set, also trigger the `self.__tick` event.

        """
        with self.__next_peek_lock:
            if not self._next_peek or self._next_peek > t:
                self._next_peek = t
                self.__tick.set()

    def tick(self):
        """
        Run all jobs that are due and re-enqueue them with their interval.

        """
        now = time.time()

        self.logger.debug('Ticking jobs with t=%f', now)

        while True:
            try:
                t, job = self.queue.get(False)
            except Empty:
                break

            self.logger.debug('Peeked at %s with t=%f', job.name, t)

            if t > now:
                # we can get here in two conditions:
                # 1. At the second or later pass of the while loop, after we've already processed
                #    the job(s) we were supposed to at this time.
                # 2. At the first iteration of the loop only if `self.put()` had triggered
                #    `self.__tick` because `self._next_peek` wasn't set
                self.logger.debug("Next task isn't due yet. Finished!")
                self.queue.put((t, job))
                self._set_next_peek(t)
                break

            if job._remove.is_set():
                self.logger.debug('Removing job %s', job.name)
                continue

            if job.enabled:
                self.logger.debug('Running job %s', job.name)

                try:
                    job.run(self.bot)

                except:
                    self.logger.exception(
                        'An uncaught error was raised while executing job %s',
                        job.name)

            else:
                self.logger.debug('Skipping disabled job %s', job.name)

            if job.repeat:
                self.put(job)

    def start(self):
        """
        Starts the job_queue thread.

        """
        self.__start_lock.acquire()

        if not self._running:
            self._running = True
            self.__start_lock.release()
            self.__thread = Thread(target=self._main_loop, name="job_queue")
            self.__thread.start()
            self.logger.debug('%s thread started', self.__class__.__name__)

        else:
            self.__start_lock.release()

    def _main_loop(self):
        """
        Thread target of thread ``job_queue``. Runs in background and performs ticks on the job
        queue.

        """
        while self._running:
            # self._next_peek may be (re)scheduled during self.tick() or self.put()
            with self.__next_peek_lock:
                tmout = self._next_peek and self._next_peek - time.time()
                self._next_peek = None
                self.__tick.clear()

            self.__tick.wait(tmout)

            # If we were woken up by self.stop(), just bail out
            if not self._running:
                break

            self.tick()

        self.logger.debug('%s thread stopped', self.__class__.__name__)

    def stop(self):
        """
        Stops the thread
        """
        with self.__start_lock:
            self._running = False

        self.__tick.set()
        if self.__thread is not None:
            self.__thread.join()

    def jobs(self):
        """Returns a tuple of all jobs that are currently in the ``JobQueue``"""
        return tuple(job[1] for job in self.queue.queue if job)
Exemple #33
0
                    verification[season][player] = sequence_log
                    data.append(sequence_log_data)
                sequence_log = []
                sequence_log_data = []
            prev_game_number = result['GAME_NUMBER']
        if len(sequence_log_data) > 10:
            verification[season][player] = sequence_log
            data.append(sequence_log_data)

        q.task_done()

q = Queue.Queue()
for i in range(num_worker_threads):
    t = Thread(target=worker)
    t.daemon = True
    t.start()

for item in unique_entries:
    q.put(item)

q.join()       # block until all tasks are done

for season, players in verification.iteritems():
    for player, games in players.iteritems():
        for entry in games:
            print("player: %s, date: %s" % (entry['PLAYER_NAME'], entry['GAME_DATE']))

dataX = []
dataY = []
look_back = 5
for player_season in data:
Exemple #34
0
 def start_auto_detect_worker(self):
     self._auto_detect_event.clear()
     t = Thread(target=self.auto_detect_worker)
     t.daemon = True
     t.start()
     self._auto_detect_worker = t
Exemple #35
0
 def _start_cmd_sequence_worker(self):
     t = Thread(target=self.cmd_sequence_worker)
     t.daemon = True
     t.start()
     self._cmd_sequence_thread = t
Exemple #36
0
 def _start_message_rx_worker(self):
     self._running.set()
     t = Thread(target=self.msg_rx_worker)
     t.daemon = True
     t.start()
     self._msg_rx_thread = t
Exemple #37
0
def index(request):
    #initializing the server's object dictionary
    #this will be passed as the JSON response to the AJAX data collection request
    servers_object = {}

    context = RequestContext(request)
    
    #check if request method is POST
    if request.method == 'POST':
        #check if request is ajax and return json response
        #this will be used to return MySQL counter's data to the template
        if request.is_ajax():
            #fires an event when the server checkbox is checked
            if request.POST.get("single_selected_server"):
                server_name = request.POST.get("single_selected_server")
                #get the details of the current server from sqlite db
                current_server_obj = add_server.objects.filter(mysql_server_name=server_name)[0]
                #initialing a thread safe queue for storing the server details
                single_server_queue = Queue.Queue()
                #spawning a thread with the current mysql credentials from the sqlite database
                #this thread creates a new connection and get the mysql details
                connection_thread = Thread(target=get_mysql_data, args=(current_server_obj, single_server_queue))
                connection_thread.start()
                #wait for the thread to return
                connection_thread.join()
                #getting the dictionary of the global variables, status and slave status 
                current_server_data = single_server_queue.get()
                #structure of the dictionary is:
                # { 'Server_name': {
                #                   'global_var_dict': {},
                #                   'global_status_dict': {},
                #                   'slave_status_dict': {},
                #                   }
                # }
                
                #building the counters value dict from the mysql data
                server_dict = build_server_details_dict(
                                            current_server_data[current_server_data.keys()[0]]['global_var_dict'],
                                            current_server_data[current_server_data.keys()[0]]['global_status_dict'],
                                            current_server_data[current_server_data.keys()[0]]['slave_status_dict']
                                        )
                
                #mapping server in the server object dict with its counter's dict 
                servers_object[current_server_data.keys()[0]] = server_dict
                return HttpResponse(json.dumps(servers_object))
            
            #enters else every other data collection interval and resolves all the servers in sqlite database
            else:
                server_list = add_server.objects.all()
                server_info_queue = Queue.Queue()
                thread_list = []
                start_time = time.time()
                for current_server_obj in server_list:
                    #start_time = time.time()
                    connection_thread = Thread(target=get_mysql_data, args=(current_server_obj, server_info_queue))
                    connection_thread.start()
                    thread_list.append(connection_thread)
                
                for thread in thread_list:
                    thread.join()
                print "Time taken: " + str(time.time() - start_time)
                for _ in xrange(len(server_list)):
                    current_server_data = server_info_queue.get()
                    #print str(current_server_obj.mysql_server_name) + "  " + str(current_server_obj.mysql_host)
                    #global_var_dict, global_status_dict, slave_status_dict = get_mysql_data(current_server_obj)
                    #t1 += time.time() - start_time
                    #if global_var_dict == {}:
                        #print current_server_obj.mysql_host
                    server_dict = build_server_details_dict(
                                            current_server_data[current_server_data.keys()[0]]['global_var_dict'],
                                            current_server_data[current_server_data.keys()[0]]['global_status_dict'],
                                            current_server_data[current_server_data.keys()[0]]['slave_status_dict']
                                        )
                    #print server_dict['general_info']['available']
                    servers_object[current_server_data.keys()[0]] = server_dict
                    
                #print t1
                return HttpResponse(json.dumps(servers_object))

        
        if request.POST['delete_server'] == 'Delete':
            print "inside delete"
            #the delete_server_list returns the result as a list from the checkbox
            #containing the name of the mysql_server to be deleted from the sqlite database
            #s1 = request.POST.get("server_list_from_jquery")
            #delete_server_list = json.loads(s1)
            delete_server_list = request.POST.getlist('selected_server_checkbox')
            #print "inside delete"
            print delete_server_list
            #calling delete_server method from sqlite_operations module
            delete_servers(delete_server_list)
            context_dict = {
                    'server_list': add_server.objects.all(),
                    }
            return render_to_response('mypy_app/index.html', context_dict, context) 
        
     
    #building the updated context from the database
    context_dict = {
            'server_list': add_server.objects.all(), 
            }
    
    return render_to_response('mypy_app/index.html', context_dict, context)
Exemple #38
0
def myThread1(num):
    name = currentThread().getName()
    print(name, num)

def myThread2(num, id):
    name = currentThread().getName()
    print(name, num, id)

def myThread3(list, tuple, dict):
    name = currentThread().getName()
    print(name, list, tuple, dict)

th1 = Thread(target=myThread1, args=(10,))
th2 = Thread(target=myThread2, args=(10, "max"))

list = [2, 4, 6, 8]
tuple = (1, 3, 7, 9)
dict = {"bob" : 12, "sue" : 8}
th3 = Thread(target=myThread3, args=(list, tuple, dict))

th1.start(); th2.start(); th3.start()

##################################################
#
#     $ threadargs.py
#     Thread-1 10
#     Thread-2 10 max
#     Thread-3 [2, 4, 6, 8] (1, 3, 7, 9) {'bob': 12, 'sue': 8}
#
def read_traffic_light_status():
    for arduino in arduinos:
        thread = Thread(target=read_arduino_traffic_light_status, args=(arduino, None))
        thread.start()
Exemple #40
0
    for k in range(len(news_list)):
        src = "http://jjxy.ccu.edu.cn/" + src_list[k]
        p_content = get_newsContent(src)
        print("经济学院", k, news_list[k], time_list[k], src,
              p_content)  # 打印获取到的信息
        write_data = news_list[k] + '' + time_list[k] + '' + p_content + "\n"
        write_txt(write_data, "经济学院")  # 写入到txt中
        try:
            mysql.addnews("经济学院", k, news_list[k], time_list[k], src,
                          p_content)
        except:
            print("存入失败!")


#jingji()

if __name__ == '__main__':
    jingji = Thread(target=jingji(), args=("jingji", ))
    jixie = Thread(target=jixie(), args=("jixie", ))
    waiguoyu = Thread(target=waiguoyu(), args=("waiguoyu", ))
    tejiao = Thread(target=tejiao(), args=("tejiao", ))
    guanlixueyuan = Thread(target=guanlixueyuan(), args=("guanlixueyuan", ))
    dianzixinxi = Thread(target=dianzixinxi(), args=("dianzixinxi", ))
    jisuanji = Thread(target=jisuanji(), args=("jisuanji", ))
    jisuanji.start()
    dianzixinxi.start()
    guanlixueyuan.start()
    tejiao.start()
    waiguoyu.start()
    jixie.start()
    jingji.start()
 def process_request(self, request, client_address):
     thread = Thread(target=self.__new_request,
                     args=(self.RequestHandlerClass, request,
                           client_address, self))
     thread.start()
    """This function is to be called when the window is closed."""
    my_msg.set("quit")
    send()
    top.destroy()
    if connected:
        NANO.close()
    exit()

top = tkinter.Tk()
top.title("Chatter")
messages_frame = tkinter.Frame(top)
my_msg = tkinter.StringVar()  # For the messages to be sent.
my_msg.set("Type your messages here.")
scrollbar = tkinter.Scrollbar(messages_frame)  # To navigate through past messages.
# Following will contain the messages.
msg_list = tkinter.Listbox(messages_frame, height=15, width=50, yscrollcommand=scrollbar.set)
scrollbar.pack(side=tkinter.RIGHT, fill=tkinter.Y)
msg_list.pack(side=tkinter.LEFT, fill=tkinter.BOTH)
msg_list.pack()
messages_frame.pack()
entry_field = tkinter.Entry(top, textvariable=my_msg)
entry_field.bind("<Return>", send)
entry_field.pack()
send_button = tkinter.Button(top, text="Send", command=send)
send_button.pack()
top.protocol("WM_DELETE_WINDOW", on_closing)
receive_thread = Thread(target=receive)
receive_thread.daemon = True
receive_thread.start()
tkinter.mainloop()  # Starts GUI execution.
Exemple #43
0
class ExecutorDriver():
    def __init__(self, executor, handlers={}, loop=None):
        self.loop = loop or IOLoop()

        self.master = env.get('MESOS_AGENT_ENDPOINT')
        self.framework_id = dict(value=env.get('MESOS_FRAMEWORK_ID'))
        self.executor_id = dict(value=env.get('MESOS_EXECUTOR_ID'))
        self.framework = {
            'id': self.framework_id,
            'framework_id': self.framework_id,
            'executor_id': self.executor_id
        }

        grace_shutdown_period = env.get('MESOS_EXECUTOR_SHUTDOWN_GRACE_PERIOD')
        if grace_shutdown_period:  # pragma: no cover
            self.grace_shutdown_period = parse_duration(grace_shutdown_period)
        else:
            self.grace_shutdown_period = 0.0

        self.checkpoint = bool(env.get('MESOS_CHECKPOINT'))
        self.local = bool(env.get('MESOS_LOCAL', True))

        self.executor = executor
        self.framework_info = None
        self.executor_info = None

        self.executor = executor

        defaults = {
            Event.SUBSCRIBED: self.on_subscribed,
            Event.CLOSE: self.on_close,
            Event.MESSAGE: self.on_message,
            Event.ERROR: self.on_error,
            Event.ACKNOWLEDGED: self.on_acknowledged,
            Event.KILL: self.on_kill,
            Event.LAUNCH_GROUP: self.on_launch_group,
            Event.LAUNCH: self.on_launch,
            Event.SHUTDOWN: self.on_shutdown,
            Event.OUTBOUND_SUCCESS: self.on_outbound_success,
            Event.OUTBOUND_ERROR: self.on_outbound_error
        }
        self.handlers = merge(defaults, handlers)

        self.subscription = Subscription(self.framework,
                                         self.master,
                                         '/api/v1/executor',
                                         self.handlers,
                                         loop=self.loop)
        self.subscription.tasks = {}
        self.subscription.updates = {}

    def start(self, block=False, **kwargs):
        '''Start executor running in separate thread'''
        if not self.loop._running:
            self._loop_thread = Thread(target=self.loop.start)
            self._loop_thread.daemon = True
            self._loop_thread.start()
            while not self.loop._running:  # pragma: no cover
                sleep(0.001)

        self.loop.add_callback(self.subscription.start)
        if block:  # pragma: no cover
            self._loop_thread.join()

    def stop(self):
        log.debug('Terminating Scheduler Driver')
        self.subscription.close()
        self.loop.add_callback(self.loop.stop)
        while self.loop._running:  # pragma: no cover
            sleep(0.1)

    def update(self, status):
        if 'timestamp' not in status:
            status['timestamp'] = int(time.time())

        if 'uuid' not in status:
            status['uuid'] = encode_data(uuid.uuid4().bytes)

        if 'source' not in status:
            status['source'] = 'SOURCE_EXECUTOR'

        payload = {
            'type': 'UPDATE',
            'framework_id': self.framework_id,
            'executor_id': self.executor_id,
            'update': {
                'status': status
            }
        }
        self.loop.add_callback(self.subscription.send, payload)
        logging.debug('Executor sends status update {} for task {}'.format(
            status['state'], status['task_id']))

    def message(self, message):
        payload = {
            'type': 'MESSAGE',
            'framework_id': self.framework_id,
            'executor_id': self.executor_id,
            'message': {
                'data': encode_data(message)
            }
        }
        self.loop.add_callback(self.subscription.send, payload)
        logging.debug('Driver sends framework message {}'.format(message))

    def on_subscribed(self, info):
        executor_info = info['executor_info']
        framework_info = info['framework_info']
        agent_info = info['agent_info']
        if executor_info['executor_id'] != self.executor_id:  # pragma: no cover
            raise ExecutorException('Mismatched executor_id\'s')

        if framework_info['id'] != self.framework_id:  # pragma: no cover
            raise ExecutorException('Mismatched framework_ids')

        if self.executor_info is None or self.framework_info is None:
            self.executor_info = executor_info
            self.framework_info = framework_info
            self.executor.on_registered(self, executor_info,
                                        self.framework_info, agent_info)
        else:  # pragma: no cover
            self.executor.on_reregistered(self, agent_info)

        log.debug('Subscribed with info {}'.format(info))

    def on_close(self):
        if not self.checkpoint:
            if not self.local:  # pragma: no cover
                self._delay_kill()
            self.executor.on_shutdown(self)

        log.debug('Got close command')

    def on_launch_group(self, event):
        task_info = event['task']
        task_id = task_info['task_id']['value']
        if task_id in self.subscription.tasks:  # pragma: no cover
            raise ExecutorException('Task Exists')
        self.subscription.tasks[task_id] = task_info
        self.executor.on_launch(self, task_info)
        log.debug('Got launch group command {}'.format(event))

    def on_launch(self, event):
        task_info = event['task']
        task_id = task_info['task_id']['value']
        if task_id in self.subscription.tasks:
            raise ExecutorException('Task Exists')
        self.subscription.tasks[task_id] = task_info

        log.debug('Launching {}'.format(event))
        self.executor.on_launch(self, task_info)
        log.debug('Got launch command {}'.format(event))

    def on_kill(self, event):
        task_id = event['task_id']
        self.executor.on_kill(self, task_id)
        log.debug('Got kill command {}'.format(event))

    def on_acknowledged(self, event):
        task_id = event['task_id']['value']
        uuid_ = uuid.UUID(bytes=decode_data(event['uuid']))
        self.subscription.updates.pop(uuid_, None)
        self.subscription.tasks.pop(task_id, None)
        self.executor.on_acknowledged(self, task_id, uuid_)
        log.debug('Got acknowledge {}'.format(event))

    def on_message(self, event):
        data = event['data']
        self.executor.on_message(self, data)

    def on_error(self, event):
        message = event['message']
        self.executor.on_error(self, message)
        log.debug('Got error {}'.format(event))

    def on_shutdown(self):
        if not self.local:  # pragma: no cover
            self._delay_kill()
        self.executor.on_shutdown(self)
        log.debug('Got Shutdown command')
        self.stop()

    def on_outbound_success(self, event):
        self.executor.on_outbound_success(self, event['request'])
        log.debug('Got success on outbound {}'.format(event))

    def on_outbound_error(self, event):
        self.executor.on_outbound_error(self, event['request'],
                                        event['endpoint'], event['error'])
        log.debug('Got error on outbound {}'.format(event))

    def _delay_kill(self):  # pragma: no cover
        def _():
            try:
                time.sleep(self.grace_shutdown_period)
                os._exit(os.EX_OK)
            except Exception:
                log.exception('Failed to force kill executor')

        t = Thread(target=_)
        t.daemon = True
        t.start()

    def __str__(self):
        return '<{}: executor={}:{}:{}>'.format(
            self.__class__.__name__, self.master,
            self.subscription.master_info.info, self.framework)

    __repr__ = __str__
def online_hosts():
    os.system('clear')
    print "\n\n"
    print "\n            \033[36m\033[1mNETWORK-SCANNER\033[0;0m\n"
    file.write('Online Hosts (0): ' + str(time.ctime()) + '\n')
    try:
        sys.stdout.write(
            "            [*]\033[94m Internet Connection Status                                      \033[0;0m:"
        )
        sys.stdout.flush()
        if socket.gethostbyname('www.google.com'):
            file.write('Connected: ' + str(time.ctime()) + '\n')
            sys.stdout.write("\033[92m     CONNECTED\033[0;0m\n")

    except Exception:
        sys.stdout.write("\033[91m         NOT CONNECTED\033[0;0m\n")
        file.write('Connection Lost: ' + str(time.ctime()) + '\n')
        sys.stdout.write(
            "            [-]\033[91mPlease Check Your Internet Connection!\033[0;0m\n\n"
        )
        time.sleep(2)
        sys.exit()

    try:

        def get_hosts(q):

            while True:
                try:
                    ip = q.get()
                    comm = [
                        'ping -c 1 -W 2 ' + ip +
                        " | grep '64 bytes from' | awk '{print $4}'"
                    ]
                    add = subprocess.Popen(comm, shell=True, stdout=PIPE)
                    address1 = add.stdout.read()
                    address = str(address1).split("\n")[0].split(":")[0]
                    try:
                        responses, unanswered = srp(
                            Ether(dst="ff:ff:ff:ff:ff:ff") / ARP(pdst=address),
                            verbose=False,
                            timeout=2,
                            retry=5)
                        if responses:
                            for s, r in responses:
                                mac = r[Ether].src
                                sys.stdout.write(
                                    "            {:20.16}        {:20.18}".
                                    format(address, mac) + "\n")
                                sys.stdout.flush()
                                data.write(
                                    str(address) + '	:	' + str(mac) + "\n")
                                break
                        q.task_done()
                    except Exception:
                        print "            [-] \033[91mError\033[0;0m Retrieving MAC Addresses, Try Again!!"
                        time.sleep(1)
                        sys.exit

                except Exception:
                    q.task_done()
                    pass

        q = Queue(maxsize=0)
        threads = 80

        for ip_s in range(1, 255):
            com = ["route -n | grep 'UG' | awk '{print $2}'"]
            ga = subprocess.Popen(com, stdout=PIPE, shell=True)
            gate_ip = ga.stdout.read()

            ipaddr = ".".join(str(gate_ip).split(".")[0:3]) + '.' + str(ip_s)
            q.put(ipaddr)

        print "\n            [*] Getting Information..."
        a = time.time()
        sys.stdout.write(
            "            [*] \033[94mStarting Network Scanner...                                     \033[0;0m:     "
            + str(time.ctime()))
        sys.stdout.flush()
        print "\n\n            \033[1m________________________________________\033[0;0m"
        sys.stdout.write(
            "            \033[1m\033[4mIP ADDRESS                  MAC ADDRESS \033[0;0m\n\n"
        )
        sys.stdout.flush()

        data = open(
            'bin/data/' + str(date) + '/ntwrk_sc/Ntwrk_sc_' + str(c_time) +
            '.log', 'a')
        data.write('Network Scanner: ' + str(time.ctime()))

        for i in range(threads):
            thread = Thread(target=get_hosts, args=(q, ))
            thread.setDaemon(True)
            thread.start()

        q.join()
        file.write('Online Hosts (1): ' + str(time.ctime()) + '\n')

    except Exception:
        print "\n            [-] \033[91mError\033[0;0m Scanning Network"
        Options()

    sys.stdout.write(
        "\n            [+] \033[92mSuccess: Network Scan Done!                                     \033[0;0m:     "
        + str(time.ctime()))
    sys.stdout.flush()
    d = str(time.time() - a)
    c = d[0:5]
    sys.stdout.write(
        "\n            [+] Time Elapsed!                                                   :     "
        + str(c) + " seconds" + "\n\n")
    sys.stdout.flush()
    file.write('Exit: ' + str(time.ctime()) + '\n\n')
    file.close()
    data.write('\n------------END------------\n')
    data.close()
Exemple #45
0
class PartovServer:
    def __init__(self, client_manager):
        self.config = config['partov_server']
        self.server_port = self.config['port']
        self.client_manager = client_manager
        self.client_states = {}

        self.packets = []

    def start(self):
        def sniff_loop():
            conf.iface=str(self.config['iface'])
            sniff(iface=str(self.config['iface']), filter=("tcp and port %d" % self.server_port),
                  prn=lambda x: self.handle_frame(x))

        self.is_alive = True
        self.sniff_thread = Thread(target=sniff_loop)
        self.sniff_thread.daemon = True
        self.sniff_thread.start()

    def stop(self):
        self.is_alive = False

    def disconnect_client(self, client):
        if client in self.client_states:
            del self.client_states[client]

    def get_client(self, port):
        client = self.client_manager.find_client_by_port(port)
        if client in self.client_states:
            client_state = self.client_states[client]
        else:
            client_state = ClientState()
            self.client_states[client] = client_state
        return (client, client_state)

    def handle_frame(self, frame):
        if not self.is_alive: return

        tcp = frame.payload.payload
        if type(tcp.payload) == NoPayload:
            return
        if tcp.sport == self.server_port:
            client_port = tcp.dport
        elif tcp.dport == self.server_port:
            client_port = tcp.sport
        else:
            return

        client, client_state = self.get_client(client_port)

        if client == None or not client.started:
            return
        if tcp.sport == self.server_port:
            self.handle_receive_data(client, client_state, tcp.seq, list(tcp.load))
        elif tcp.dport == self.server_port:
            self.handle_send_data(client, client_state, tcp.seq, list(tcp.load))

    def handle_receive_data(self, client, state, seq = -1, packet_data = None):
        if packet_data != None:
            if state.expected_recv_seq == -1: # first packet determines the initial sequence number
                state.expected_recv_seq = seq
            state.expected_recv_seq, state.recv_out_of_order, state.recv_buff =\
                    add_new_tcp_data(seq, packet_data,
                            state.expected_recv_seq, state.recv_out_of_order, state.recv_buff)

        if len(state.recv_buff) >= state.recv_pending:
            packet = ''.join(state.recv_buff[:state.recv_pending])
            state.recv_buff = state.recv_buff[state.recv_pending:]
            state.recv_pending = 0

            if state.recv_state == Const.StateInRecv:
                interface = struct.unpack("!I", packet[0:4])[0]
                frame = Ether(packet[4:])
                client.put_recv_frame(frame, interface)
                # self.packets.append(frame)
                state.recv_state = Const.Nothing
                state.recv_pending = Const.RecvCommandLength
            else:
                command, size = struct.unpack("!IH", packet[0:6])
                if command == Const.RawFrameReceivedNotificationType:
                    state.recv_state = Const.StateInRecv
                    state.recv_pending = size
                else:
                    state.recv_pending = Const.RecvCommandLength
            self.handle_receive_data(client, state)

    def handle_send_data(self, client, state, seq = -1, packet_data = None):
        if packet_data != None:
            if state.expected_send_seq == -1: # first packet determines the initial sequence number
                state.expected_send_seq = seq
            state.expected_send_seq, state.send_out_of_order, state.send_buff =\
                    add_new_tcp_data(seq, packet_data,
                            state.expected_send_seq, state.send_out_of_order, state.send_buff)

        if len(state.send_buff) >= state.send_pending:
            packet = ''.join(state.send_buff[:state.send_pending])
            state.send_buff = state.send_buff[state.send_pending:]
            state.send_pending = 0

            if state.send_state == Const.StateInSendFrame:
                frame = Ether(packet)
                client.put_send_frame(frame, state.send_iface)
                self.packets.append(frame)
                state.send_state = Const.Nothing
                state.send_pending = Const.SendCommandLength
            elif state.send_state == Const.StateInSendData:
                length, iface = struct.unpack("!HI", packet)
                length -= 4
                state.send_state = Const.StateInSendFrame
                state.send_iface = iface
                state.send_pending = length
            else:
                command = struct.unpack("!I", packet[0:4])[0]
                if command == Const.SendFrameCommand:
                    state.started = True
                    state.send_state = Const.StateInSendData
                    state.send_pending = 6
                else:
                    state.send_pending = Const.SendCommandLength

            self.handle_send_data(client, state)

            # THIS
    def get_client_run_command(self, cwd, client_number=0, client_type='my'):
        # node = self.config['node_prefix'] + str(client_number)
        node = "node" + str(client_number)
        target = ""

        if client_type == 'my':
            target = os.path.join(cwd, config['my_router_target'])
        else:
            target = os.path.join(cwd, config['router_target'])
        command = "%s --ip %s --port %d --map %s --node %s --user %s --pass %s --id %s" % \
                (target, self.config['ip'],
                 self.config['port'], self.config['map'], node,
                 self.config['user'], self.config['pass'], self.config['user'])
        # HEREEEEE
        #print(command)
        return command
Exemple #46
0
def open_connection():
    try:
        file = open(os.path.join(os.path.dirname(__file__), "../cef/os.txt"))
        operating_system = file.readlines()[0].strip()
        file.close()

        global update_thread
        update_thread = Thread(target=update.check_update)
        update_thread.daemon = True
        update_thread.start()

        global pynode_process
        if operating_system == "win64":
            pynode_process = subprocess.Popen(
                [os.path.join(APP_DIR, "cef/win64/pynode.exe")],
                shell=False,
                cwd=APP_DIR,
                stdin=subprocess.PIPE,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE)
        elif operating_system == "win32":
            pynode_process = subprocess.Popen(
                [os.path.join(APP_DIR, "cef/win32/pynode.exe")],
                shell=False,
                cwd=APP_DIR,
                stdin=subprocess.PIPE,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE)
        elif operating_system == "macosx":
            pynode_process = subprocess.Popen([
                os.path.join(APP_DIR,
                             "cef/macosx/pynode.app/Contents/MacOS/pynode")
            ],
                                              shell=False,
                                              cwd=APP_DIR,
                                              stdin=subprocess.PIPE,
                                              stdout=subprocess.PIPE,
                                              stderr=subprocess.PIPE)
            # Bring window to foreground
            try:
                DEVNULL = open(os.devnull, 'w')
                subprocess.call([
                    "/usr/bin/osascript -e 'activate application \"PyNode\"'"
                ],
                                shell=True,
                                stdout=DEVNULL,
                                stderr=DEVNULL,
                                close_fds=True)
            except:
                pass
        elif operating_system == "linux":
            pynode_process = subprocess.Popen(
                [os.path.join(APP_DIR, "cef/linux/pynode")],
                shell=False,
                cwd=APP_DIR,
                stdin=subprocess.PIPE,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE)
        time.sleep(1)

        global monitor_thread
        monitor_thread = Thread(target=monitor_data)
        monitor_thread.start()
    except Exception as e:
        traceback.print_exc(file=sys.stderr)
Exemple #47
0

# initialize the central messaging controller
control = quMessageController()

# initialize the I/O handlers
deb = debugHandler(control)
kal = keepAliveHandler(control)
gio = gpioHandler(control)
eco = echoHandler(control)
net = quNetworkHandler(control)
tap = tapTimeHandler(control)

# start a thread for each handler
debugThread = Thread(target=deb.run)
debugThread.start()
kalThread = Thread(target=kal.run)
kalThread.start()
gioThread = Thread(target=gio.run)
gioThread.start()
netThread = Thread(target=net.run)
netThread.start()

# install SIGTERM handler for proper shutdown
def signal_term_handler(signal, frame):
  # use debug handler to initiate a proper shutdown in SIGTERM
  sys.stderr.write("APP: Received SIGTERM, sending SYS_SHUTDOWN to all handlers.\n")
  deb.sendEvent(quEvent(quEvent.SYS_SHUTDOWN))
 
signal.signal(signal.SIGTERM, signal_term_handler)
Exemple #48
0
class EAS_Query(object):
    """
    Main class to encapsulate query jobs for EAS
    """

    EAS_TAP_URL = "https://eas.esac.esa.int/tap-dev/tap/async"

    Content_Type = "application/x-www-form-urlencoded"
    MIME_Text_Plain = "text/plain"

    def __init__(self):
        """Initialize object (class instance) attributes."""
        self.qry_params = None
        self.request = None
        self.connection = None
        self.status_info = ""
        self.jobThread = None

        self.vospace_user = ""
        self.vospace_pwd = ""
        self.vospace_auth_set = False

    def setQuery(self, adqlQry, name="myQuery", desc="This is my query"):
        """Define the query.  Multiple definitions are possible, but when run()
        is invoked, only the last one will be launched."""
        self.qry_params = urlparse.urlencode({"REQUEST":        "doQuery",
                                              "LANG":           "ADQL",
                                              "FORMAT":         "csv",
                                              "PHASE":          "RUN",
                                              "JOBNAME":        name,
                                              "JOBDESCRIPTION": desc,
                                              "QUERY":          adqlQry})
        self.request = urlrequest.Request(EAS_Query.EAS_TAP_URL, method="POST")
        self.request.add_header("Content-type", EAS_Query.Content_Type)
        self.request.add_header("Accept", EAS_Query.MIME_Text_Plain)

    def run(self):
        """Launch the last defined query.  The execution is done in a separate thread."""
        self.connection = urlrequest.urlopen(self.request, data=self.qry_params.encode("UTF-8"))
        self.qry_exit_code = self.connection.getcode()
        self.status_info = "Status: {}, Reason: {}".format(str(self.qry_exit_code),
                                                           str(self.connection.reason))
        self.connection_info = self.connection.info()
        self.connection_url = self.connection.geturl()
        self.jobid = self.connection_url[self.connection_url.rfind('/') + 1:]
        self.connection.close()
        self.jobThread = Thread(target=self.runUntilFinished, args=())
        self.jobThread.start()
        return self.qry_exit_code

    def runUntilFinished(self):
        """Performs the monitoring of the query requested, and retrieves the
        results for later use."""
        while True:
            self.request = urlrequest.Request(self.connection_url, method="GET")
            self.connection = urlrequest.urlopen(self.request)
            data = self.connection.read().decode("UTF-8")
            #print(">>> ",data)
            # XML response: parse it to obtain the current status
            dom = parseString(data)
            #print("[[[{}]]]".format(dom.toprettyxml()))  #### <<<<<====================
            phaseElement = dom.getElementsByTagName('uws:phase')[0]
            phaseValueElement = phaseElement.firstChild
            phase = phaseValueElement.toxml()
            #print ("Status: " + phase)
            # Check finished
            if phase == 'COMPLETED': break
            # wait and repeat
            sleep(0.2)
        self.connection.close()

    def exit_info(self):
        """Return exit information in case the run() method reported a failure."""
        return self.status_info

    def results(self):
        """Returns the results from the last query executed."""
        # Wait for job to finish
        self.jobThread.join()
        # Retrieve and return results data
        self.request = urlrequest.Request(self.connection_url + "/results/result", method="GET")
        self.connection = urlrequest.urlopen(self.request)
        results_data = self.connection.read()
        self.connection.close()
        return results_data
        
    def save_results_as_csv(self, file_name):
        """Takes results, already as CSV data, and store them in a local file"""
        with open(file_name, "wb") as csv_file:
            # Read the whole file at once
            csv_file.write(self.results())

    def save_results_as_fits_table(self, file_name, header=None):
        """Takes the CSV results, convert them to an ascii.table.Table and outputs
        a pyfits.hdu.table.BinTableHDU, creating a blank header if no header
        is provided.  The result is stored in a FITS file."""
        if header is None:
            prihdr = fits.Header()
            prihdu = fits.PrimaryHDU(header=prihdr)
        else:
            prihdu = fits.PrimaryHDU(header=header)

        csv_file_name = file_name + ".csv"
        self.save_results_as_csv(csv_file_name)
        tab = ascii.read(csv_file_name)
        os.unlink(csv_file_name)

        table_hdu = fits.BinTableHDU.from_columns(np.array(tab.filled()))
        myfitstable = fits.HDUList([prihdu, table_hdu])
        myfitstable.writeto(file_name, overwrite=True)

    def results_as_fits_table(self, header=None):
        """Takes the CSV results, saves them in a temp. dile, and then retrieves
        the entire content."""
        tmpfile = tempfile.NamedTemporaryFile(delete=False)
        self.save_results_as_fits_table(tmpfile.name)

        with open(tmpfile.name, "rb") as fits_file:
            # Read the whole file at once
            fits_data = fits_file.read()

        os.unlink(tmpfile.name)
        return fits_data
Exemple #49
0
            if args.asyncN:
                futureOutputs.append(net.forwardAsync())
            else:
                outs = net.forward(outNames)
                predictionsQueue.put(np.copy(outs))

        while futureOutputs and futureOutputs[0].wait_for(0):
            out = futureOutputs[0].get()
            predictionsQueue.put(np.copy([out]))

            del futureOutputs[0]


framesThread = Thread(target=framesThreadBody)
framesThread.start()

processingThread = Thread(target=processingThreadBody)
processingThread.start()

#
# Postprocessing and rendering loop
#
while cv.waitKey(1) < 0:
    try:
        # Request prediction first because they put after frames
        outs = predictionsQueue.get_nowait()
        frame = processedFramesQueue.get_nowait()

        postprocess(frame, outs)
Exemple #50
0
class MockServer:
    def __init__(self, client_manager, custom_info=None, port=None):
        self.config = config.get("mock_server", {})
        self.client_manager = client_manager
        self.custom_info = custom_info or "\n".join(self.config.get("custom_info", [])).strip()
        self.port = self.config.get("port", 7891)
        self.server_port = self.port
        self.client_sockets = {}

    def get_client_run_command(self, cwd, client_number=0):
        nodes = self.config['iface'].keys()
        if len(nodes) <= client_number:
            raise MockServerError("Can't run client %d with Mock Server, iface information not given" % client_number)
        return "%s --ip 127.0.0.1 --port %d --map dummymap --node %s --user dummyuser --pass dummypass --id dummyid" % \
                (os.path.join(cwd, config['processor_target']), self.port, nodes[client_number])

    def start(self):
        self.is_alive = True
        self.server_thread = Thread(target=self.listen_for_clients)
        self.server_thread.daemon = True
        self.server_thread.start()

    def stop(self):
        self.is_alive = False

    def disconnect_client(self, client):
        pass
        # sock = self.client_sockets.get(client.port, None)
        # if sock:
            # del self.client_sockets[client.port]
            # sock.close()

    def remove_client(self, client_port):
        if client_port in self.client_sockets:
            del self.client_sockets[client_port]

    def send_frame(self, client, iface, frame):
        if client.port not in self.client_sockets:
            raise MockServerError("No client connected with port '%d'", client.port)
        try:
            sock = self.client_sockets[client.port]
            frame_data = str(frame)
            sock.sendall(struct.pack("!IHI", 4, len(frame_data) + 4, iface))
            sock.sendall(frame_data)
        except socket.error:
            self.remove_client(client.port)
            raise MockServerError("Connection lost with client '%d'", client.port)

    def handle_read_data(self, sock):
        address, port = sock.getpeername()
        data = sock.recv(10)
        if len(data) == 0:
            self.remove_client(port)
            return False
        com, size, iface = struct.unpack("!IHI", data)
        frame = sock.recv(size - 4)

        client = self.client_manager.find_client_by_port(port)
        if client == None:
            self.remove_client(port)
            return False

        client.put_send_frame(Ether(frame), iface)
        return True

    def do_initial_negotiations(self, req):
        self.do_signing_in_negotiations(req)
        self.do_map_selecting_negotiations(req)
        node_name = self.do_node_selecting_negotiations(req).strip().replace("\0", "")
        self.do_information_synchronization_negotiations(req, node_name)

    def do_signing_in_negotiations(self, req):
        size = struct.unpack("!H", req.recv(2))[0]
        req.recv(size)
        req.sendall(struct.pack("!II", 0, 1))

    def do_map_selecting_negotiations(self, req):
        size = struct.unpack("!H", req.recv(2))[0]
        req.recv(size)
        req.sendall(struct.pack("!II", 1, 2))

    def do_node_selecting_negotiations(self, req):
        size = struct.unpack("!H", req.recv(2))[0]
        node_name = req.recv(size)
        req.sendall(struct.pack("!II", 1, 4))
        return node_name

    def do_information_synchronization_negotiations(self, req, node_name):
        self.do_interfaces_information_synchronization_negotiations(req, node_name)
        self.do_custom_information_synchronization_negotiations(req)

    def do_interfaces_information_synchronization_negotiations(self, req, node_name):
        req.recv(4)
        req.sendall(struct.pack("!II", 2, 1))
        iface_string = "edu::sharif::partov::nse::map::interface::EthernetInterface\0"

        if node_name not in self.config.get("iface", {}):
            raise MockServerError("No iface information found for node '%s'" % node_name)
        ifaces = self.config["iface"][node_name]

        req.sendall(struct.pack("!I", len(ifaces)))
        for iface in ifaces:
            req.sendall(struct.pack("!H", len(iface_string) + 16))
            req.sendall(iface_string)

            mac_bytes = map(lambda x: int(x, 16), iface['mac'].split(":"))
            mac_bytes.append(0)
            mac_bytes.append(0)
            req.sendall(struct.pack("BBBBBBBB", *mac_bytes))
            req.sendall(socket.inet_aton(iface['ip']))
            req.sendall(socket.inet_aton(iface['mask']))

    def do_custom_information_synchronization_negotiations(self, req):
        req.recv(4)
        custom_info = self.custom_info + '\0'
        req.sendall(struct.pack("!IIH", 2, 2, len(custom_info)))
        req.sendall(custom_info)

    def start_simulation(self, req):
        req.recv(4)
        req.sendall(struct.pack("!I", 3))

    def handle_connection(self, req):
        self.do_initial_negotiations(req)
        self.start_simulation(req)
        address, port = req.getpeername()
        self.client_sockets[port] = req

    def listen_for_clients(self):
        serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        serversocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        serversocket.bind(("127.0.0.1", self.port))
        serversocket.listen(5)
        # serversocket.setblocking(0)

        connections = [serversocket]
        while self.is_alive:
            readable, writable, exceptional = select.select(connections, [], [])
            if not self.is_alive: break
            for s in readable:
                if s is serversocket:
                    connection, client_address = s.accept()
                    connections.append(connection)
                    connection.setblocking(True)
                    self.handle_connection(connection)
                else:
                    if not self.handle_read_data(s):
                        s.shutdown(1)
                        connections.remove(s)
Exemple #51
0
class GdbDebugDriver(DebugDriver):
    """ Implement debugging via the GDB remote interface.

    GDB servers can communicate via the RSP protocol.

    Helpfull resources:

    http://www.embecosm.com/appnotes/ean4/
        embecosm-howto-rsp-server-ean4-issue-2.html

    - https://sourceware.org/gdb/onlinedocs/gdb/Stop-Reply-Packets.html

    Tried to make this class about the protocol itself, not about the
    sending and receiving of bytes. The protocol must be able to
    work using sockets and threads, serial port and threads and asyncio
    sockets.
    """

    logger = logging.getLogger("gdbclient")

    def __init__(self, arch, transport, pcresval=0, swbrkpt=False):
        super().__init__()
        self.arch = arch
        self.transport = transport
        self.status = DebugState.RUNNING
        self.pcresval = pcresval
        self._register_value_cache = {}  # Cached map of register values
        self.swbrkpt = swbrkpt
        self.stopreason = INTERRUPT

        self._message_handler = None
        self._stop_msg_queue = queue.Queue()
        self._msg_queue = queue.Queue(maxsize=1)
        self._rsp = RspHandler(transport)
        self._rsp.on_message = self._handle_message

    def __str__(self):
        return "Gdb debug driver via {}".format(self.transport)

    def connect(self):
        """ Connect to the target """
        self._message_handler = Thread(target=self._handle_stop_queue)
        self._message_handler.start()
        self.transport.connect()
        # self.send('?')

    def disconnect(self):
        """ Disconnect the client """
        self.transport.disconnect()
        self._stop_msg_queue.put(1337)
        self._message_handler.join()

    def _handle_stop_queue(self):
        self.logger.debug("stop thread started")
        msg = self._stop_msg_queue.get()
        while msg != 1337:
            self._process_stop_status(msg)
            msg = self._stop_msg_queue.get()
        self.logger.debug("stop thread finished")

    def run(self):
        """ start the device """
        if self.status == DebugState.STOPPED:
            self._prepare_continue()
        else:
            self.logger.warning("Already running!")

        self._send_message("c")
        self._start()

    def restart(self):
        """ restart the device """
        if self.status == DebugState.STOPPED:
            self.set_pc(self.pcresval)
            self.run()
        else:
            self.logger.warning("Cannot restart, still running!")

    def step(self):
        """ Single step the device """
        if self.status == DebugState.STOPPED:
            self._prepare_continue()
            self._send_message("s")
            self._start()
        else:
            self.logger.warning("Cannot step, still running!")

    def nstep(self, count):
        """ Single step `count` times """
        if self.status == DebugState.STOPPED:
            self._prepare_continue()
            self._send_message("n %x" % count)
            self._start()
        else:
            self.logger.warning("Cannot step, still running!")

    def _prepare_continue(self):
        """ Set program counter somewhat back to continue """
        if self.swbrkpt and self.stopreason is BRKPOINT:
            pc = self.get_pc()
            self.clear_breakpoint(pc - 4)
            self.set_pc(pc - 4)

    def stop(self):
        if self.status == DebugState.RUNNING:
            self._sendbrk()
        else:
            self.logger.warning("Cannot stop if not running")

    def _sendbrk(self):
        """ sends break command to the device """
        self.logger.debug("Sending RAW stop 0x3")
        self.transport.send(bytes([0x03]))

    def _start(self):
        """ Update state to started """
        self.status = DebugState.RUNNING
        self._register_value_cache.clear()
        self.events.on_start()

    def _stop(self):
        self.status = DebugState.STOPPED
        self.events.on_stop()

    def _process_stop_status(self, pkt):
        """ Process stopped status like these:

        S05
        T0500:00112233;
        T05thread:01;
        """
        assert pkt.startswith(("S", "T"))
        code = int(pkt[1:3], 16)  # signal number
        self.stopreason = code

        if pkt.startswith("T"):
            rest = pkt[3:]
            for pair in map(str.strip, rest.split(";")):
                if not pair:
                    continue
                name, value = pair.split(":")
                if is_hex(name):
                    # We are dealing with a register value here!
                    reg_num = int(name, 16)
                    # self.logger.error('%s', reg_num)
                    if reg_num == self.arch.gdb_registers.index(
                            self.arch.gdb_pc):
                        # TODO: fill a cache of registers
                        data = bytes.fromhex(rest[3:-1])
                        self.pcstopval, = struct.unpack("<I", data)

        if code & (BRKPOINT | INTERRUPT) != 0:
            self.logger.debug("Target stopped..")

            # If the program counter was not given in the stop packet
            # retrieve it now
            if self.arch.gdb_pc not in self._register_value_cache:
                self.logger.debug("Retrieving general registers")
                self._get_general_registers()

            self._stop()
        else:
            self.logger.debug("Target running..")
            self.status = DebugState.RUNNING

    def get_status(self):
        return self.status

    def get_pc(self):
        """ read the PC of the device """
        if self.status == DebugState.STOPPED:
            return self._get_register(self.arch.gdb_pc)
        else:
            return 0

    def set_pc(self, value):
        """ set the PC of the device """
        self._set_register(self.arch.gdb_pc, value)
        self.logger.debug("PC value set:%x", value)

    def get_fp(self):
        """ read the frame pointer """
        return 0x100
        fp = self._get_register(self.arch.fp)
        self.logger.debug("FP value read:%x", fp)
        return fp

    def get_registers(self, registers):
        if self.status == DebugState.STOPPED:
            regs = self._get_general_registers()
        else:
            self.logger.warning("Cannot read registers while running")
            regs = {}
        return regs

    def _get_general_registers(self):
        """ Execute the gdb `g` command """
        data = self._send_command("g")
        data = binascii.a2b_hex(data.encode("ascii"))
        res = {}
        offset = 0
        for register in self.arch.gdb_registers:
            size = register.bitsize // 8
            reg_data = data[offset:offset + size]
            value = self._unpack_register(register, reg_data)
            res[register] = value
            # self.logger.debug('reg %s = %s', register, value)
            self._register_value_cache[register] = value
            offset += size

        if len(data) != offset:
            self.logger.error(
                "Received %s bytes register data, processed %s",
                len(data),
                offset,
            )
        return res

    def set_registers(self, regvalues):
        if self.status == DebugState.STOPPED:
            data = bytearray()
            res = {}
            offset = 0
            for register in self.arch.gdb_registers:
                reg_data = self._pack_register(register, regvalues[register])
                size = register.bitsize // 8
                data[offset:offset + size] = reg_data
                offset += size
            data = binascii.b2a_hex(data).decode("ascii")
            res = self._send_command("G %s" % data)
            if res == "OK":
                self.logger.debug("Register written")
            else:
                self.logger.warning("Registers writing failed: %s", res)

    def _get_register(self, register):
        """ Get a single register """
        if self.status == DebugState.STOPPED:
            if register in self._register_value_cache:
                value = self._register_value_cache[register]
            else:
                idx = self.arch.gdb_registers.index(register)
                data = self._send_command("p %x" % idx)
                data = binascii.a2b_hex(data.encode("ascii"))
                value = self._unpack_register(register, data)
                self._register_value_cache[register] = value
            return value
        else:
            self.logger.warning("Cannot read register %s while not stopped",
                                register)
            return 0

    def _set_register(self, register, value):
        """ Set a single register """
        if self.status == DebugState.STOPPED:
            idx = self.arch.gdb_registers.index(register)
            value = self._pack_register(register, value)
            value = binascii.b2a_hex(value).decode("ascii")
            res = self._send_command("P %x=%s" % (idx, value))
            if res == "OK":
                self.logger.debug("Register written")
            else:
                self.logger.warning("Register write failed: %s", res)

    def _unpack_register(self, register, data):
        """ Fetch a register from some data """
        fmts = {8: "<Q", 4: "<I", 2: "<H", 1: "<B"}
        size = register.bitsize // 8
        if len(data) == size:
            if size == 3:
                value = data[0] + (data[1] << 8) + (data[2] << 16)
            else:
                value, = struct.unpack(fmts[size], data)
        else:
            self.logger.error("Could not read register %s", register)
            value = 0
        return value

    @staticmethod
    def _pack_register(register, value):
        """ Put some data in a register """
        fmts = {8: "<Q", 4: "<I", 2: "<H", 1: "<B"}
        size = register.bitsize // 8
        data = struct.pack(fmts[size], value)
        return data

    def set_breakpoint(self, address: int):
        """ Set a breakpoint """
        if self.status == DebugState.STOPPED:
            res = self._send_command("Z0,%x,4" % address)
            if res == "OK":
                self.logger.debug("Breakpoint set")
            else:
                self.logger.warning("Breakpoint not set: %s", res)
        else:
            self.logger.warning("Cannot set breakpoint, target not stopped!")

    def clear_breakpoint(self, address: int):
        """ Clear a breakpoint """
        if self.status == DebugState.STOPPED:
            res = self._send_command("z0,%x,4" % address)
            if res == "OK":
                self.logger.debug("Breakpoint cleared")
            else:
                self.logger.warning("Breakpoint not cleared: %s", res)
        else:
            self.logger.warning("Cannot clear breakpoint, target not stopped!")

    def read_mem(self, address: int, size: int):
        """ Read memory from address """
        if self.status == DebugState.STOPPED:
            res = self._send_command("m %x,%x" % (address, size))
            ret = binascii.a2b_hex(res.encode("ascii"))
            return ret
        else:
            self.logger.warning("Cannot read memory, target not stopped!")
            return bytes()

    def write_mem(self, address: int, data):
        """ Write memory """
        if self.status == DebugState.STOPPED:
            length = len(data)
            data = binascii.b2a_hex(data).decode("ascii")
            res = self._send_command("M %x,%x:%s" % (address, length, data))
            if res == "OK":
                self.logger.debug("Memory written")
            else:
                self.logger.warning("Memory write failed: %s", res)
        else:
            self.logger.warning("Cannot write memory, target not stopped!")

    def _handle_message(self, message):
        # Filter stop packets:
        if message.startswith(("T", "S")):
            self._stop_msg_queue.put(message)
        else:
            self._msg_queue.put(message)

    def _send_command(self, command):
        """ Send a gdb command a receive a response """
        self._send_message(command)
        return self._recv_message()

    def _recv_message(self, timeout=3):
        """ Block until a packet is received """
        return self._msg_queue.get(timeout=timeout)

    def _send_message(self, message):
        self._rsp.sendpkt(message)
Exemple #52
0
        \'{}\',
        \'{}\');
        """.format(table, stock, high_price, low_price)
        cur.execute(delete_query)
        cur.execute(query)
        self.conn.commit()


if __name__ == '__main__':
    stocks_to_fetch = ['GOOGL', 'AMZN', 'FB', 'AAPL', 'BABA']
    stock_fetcher = IEXStockFetcher(stocks_to_fetch)
    # dburl = "postgres://*****:*****@192.168.99.100:5432/stocks"
    dburl = os.environ.get("DATABASE_URL")
    conn = psycopg2.connect(dburl)
    manager = PostgreSQLStockManager(conn, stock_fetcher)

    stock_price_thread = Thread(
        target=partial(manager.fetchInsertStockLoop, 5))
    image_url_thread = Thread(
        target=partial(manager.fetchUpdateImageURLLoop, 5000))
    high_low_thread = Thread(
        target=partial(manager.fetchUpdateHighLowLoop, 5000))

    stock_price_thread.start()
    image_url_thread.start()
    high_low_thread.start()

    stock_price_thread.join()
    image_url_thread.join()
    high_low_thread.join()
class CrashReporter(object):
    """
    Create a context manager that emails or uploads a report to a webserver (HQ) with the traceback on a crash.
    It can be setup to do both, or just one of the upload methods.

    If a crash report fails to upload, the report is saved locally to the `report_dir` directory. The next time the
    CrashReporter starts up, it will attempt to upload all offline reports every `check_interval` seconds. After a
    successful upload the offline reports are deleted. A maximum of `offline_report_limit` reports are saved at any
    time. Reports are named crashreport01, crashreport02, crashreport03 and so on. The most recent report is always
    crashreport01.

    Report Customizing Attributes:

    application_name: Application name as a string to be included in the report
    application_version: Application version as a string to be included in the report
    user_identifier: User identifier as a string to add to the report
    offline_report_limit: Maximum number of offline reports to save.
    recursion_depth_limit: Maximum number of tracebacks to record in the case of RunetimeError: maximum recursion depth
                           exceeded
    max_string_length: Maximum string length for values returned in variable inspection. This prevents reports which
                       contain array data from becoming too large.
    inspection_level: The number of traceback objects (from most recent) to inspect for source code, local variables etc

    :param report_dir: Directory to save offline reports.
    :param watcher: Enable a thread that periodically checks for any stored offline reports and attempts to send them.
    :param check_interval: How often the watcher will attempt to send offline reports.
    :param logger: Optional logger to use.
    :param config: Path to configuration file that defines the arguments to setup_smtp and setup_hq. The file has the
                   format of a ConfigParser file with sections [SMTP] and [HQ]

    """
    _report_name = "crash_report_%d"
    html_template = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'email_report.html')
    active = False
    application_name = None
    application_version = None
    user_identifier = None
    offline_report_limit = 10
    recursion_depth_limit = 10
    send_at_most = 3            # max number of offline reports to send in batch
    max_string_length = 1000
    obj_ref_regex = re.compile("[A-z]+[0-9]*\.(?:[A-z]+[0-9]*\.?)+(?!\')")

    def __init__(self, report_dir=None, config='', logger=None, activate=True,
                 watcher=True, check_interval=5*60):
        self.logger = logger if logger else logging.getLogger('CrashReporter')
        # Setup the directory used to store offline crash reports
        self.report_dir = report_dir
        self.check_interval = check_interval
        self.watcher_enabled = watcher
        self._watcher = None
        self._watcher_running = False
        self.etype = None
        self.evalue = None
        self.tb = None
        self._recursion_error = False
        self.analyzed_traceback = None
        self.payload = None
        self._excepthook = None
        self.inspection_level = 1
        self._smtp = None
        self._hq = None
        # Load the configuration from a file if specified
        if os.path.isfile(config):
            self.load_configuration(config)
        if activate:
            self.enable()

    def setup_smtp(self, host, port, user, passwd, recipients, **kwargs):
        """
        Set up the crash reporter to send reports via email using SMTP

        :param host: SMTP host
        :param port: SMTP port
        :param user: sender email address
        :param passwd: sender email password
        :param recipients: list or comma separated string of recipients
        """
        self._smtp = kwargs
        self._smtp.update({'host': host, 'port': port, 'user': user, 'passwd': passwd, 'recipients': recipients})
        try:
            self._smtp['timeout'] = int(kwargs.get('timeout', SMTP_DEFAULT_TIMEOUT))
        except Exception as e:
            logging.error(e)
            self._smtp['timeout'] = None
        self._smtp['from'] = kwargs.get('from', user)

    def setup_hq(self, server, **kwargs):
        self._hq = kwargs
        try:
            self._hq['timeout'] = int(kwargs.get('timeout', HQ_DEFAULT_TIMEOUT))
        except Exception as e:
            logging.error(e)
            self._hq['timeout'] = None
        self._hq.update({'server': server})

    def enable(self):
        """
        Enable the crash reporter. CrashReporter is defaulted to be enabled on creation.
        """
        if not CrashReporter.active:
            CrashReporter.active = True
            # Store this function so we can set it back if the CrashReporter is deactivated
            self._excepthook = sys.excepthook
            sys.excepthook = self.exception_handler
            self.logger.info('CrashReporter: Enabled')
            if self.report_dir:
                if os.path.exists(self.report_dir):
                    if self.get_offline_reports():
                        # First attempt to send the reports, if that fails then start the watcher
                        self.submit_offline_reports()
                        remaining_reports = len(self.get_offline_reports())
                        if remaining_reports and self.watcher_enabled:
                            self.start_watcher()
                else:
                    os.makedirs(self.report_dir)

    def disable(self):
        """
        Disable the crash reporter. No reports will be sent or saved.
        """
        if CrashReporter.active:
            CrashReporter.active = False
            # Restore the original excepthook
            sys.excepthook = self._excepthook
            self.stop_watcher()
            self.logger.info('CrashReporter: Disabled')

    def start_watcher(self):
        """
        Start the watcher that periodically checks for offline reports and attempts to upload them.
        """
        if self._watcher and self._watcher.is_alive:
            self._watcher_running = True
        else:
            self.logger.info('CrashReporter: Starting watcher.')
            self._watcher = Thread(target=self._watcher_thread, name='offline_reporter')
            self._watcher.setDaemon(True)
            self._watcher_running = True
            self._watcher.start()

    def stop_watcher(self):
        """
        Stop the watcher thread that tries to send offline reports.
        """
        if self._watcher:
            self._watcher_running = False
            self.logger.info('CrashReporter: Stopping watcher.')

    def interprocess_exception_handler(self, err_name, err_msg, analyzed_tb):
        payload = self.generate_payload(err_name, err_msg, analyzed_tb)
        self.handle_payload(payload)

    def _analyze_traceback(self, traceback):
        # To prevent recording a large amount of potentially redundant tracebacks, limit the trace back for the case of
        # infinite recursion errors.
        limit = CrashReporter.recursion_depth_limit if self._recursion_error else None
        analyzed_tb = analyze_traceback(traceback, limit=limit)
        self.custom_inspection(analyzed_tb)
        # Perform serialization check on the possibly user-altered traceback
        overriden = self.__class__.custom_inspection.im_func is not CrashReporter.custom_inspection.im_func
        if overriden:
            for tb in analyzed_tb:
                for key, value in tb['Custom Inspection'].iteritems():
                    try:
                        json.dumps(value)
                    except TypeError:
                        tb['Custom Inspection'][key] = {k: safe_repr(v) for k, v in value.iteritems()}
        return analyzed_tb

    def custom_inspection(self, analyzed_traceback):
        """
        Define this function so that users can override it and add their own custom information to
        the payload in the 'Custom Inspection' key.
        """
        return analyzed_traceback

    def exception_handler(self, etype, evalue, tb):
        """
        Exception hook. Catches crashes / un-caught exceptions and passes them to handle_payload()

        :param etype: Exception type
        :param evalue: Exception value
        :param tb: Traceback
        :return:
        """
        self.etype = etype
        self.evalue = evalue
        self.tb = tb
        self._recursion_error = "maximum recursion depth exceeded" in str(self.evalue)

        if etype:
            self.logger.info('CrashReporter: Crashes detected!')
            self.analyzed_traceback = self._analyze_traceback(tb)
            self.handle_payload(self.generate_payload(etype.__name__, '%s' % evalue, self.analyzed_traceback))
        else:
            self.logger.info('CrashReporter: No crashes detected.')

        self.forward_exception(etype, evalue, tb)

    def forward_exception(self, etype, evalue, tb):
        """
        Forward the exception onto the backup copy that was made of the sys.__excepthook__

        :param etype: Exceoption type
        :param evalue: Exception value
        :param tb: Traceback
        :return:
        """
        self._excepthook(etype, evalue, tb)

    def handle_payload(self, payload):
        """
        Given a crash report (JSON represented payload), attempts to upload the crash reports. Calls the default
        exception handler (sys.__except_hook__) upon completion.
        :param payload: JSON structure containing crash report along with metadata
        :return:
        """
        self.payload = payload
        if CrashReporter.active:
            # Attempt to upload the report
            hq_success = smtp_success = False
            if self._hq is not None:
                hq_success = self.hq_submit(self.payload)
                if hq_success:
                    self.payload['HQ Submission'] = 'Sent'
            if self._smtp is not None:
                # Send the report via email
                smtp_success = self.smtp_submit(self.subject(), self.body(self.payload), self.attachments())
                if smtp_success:
                    self.payload['SMTP Submission'] = 'Sent'

        if not CrashReporter.active or (self._smtp and not smtp_success) or (self._hq and not hq_success):
            # Only store the offline report if any of the upload methods fail, or if the Crash Reporter was disabled
            report_path = self.store_report(self.payload)
            self.logger.info('Offline Report stored %s' % report_path)

    def generate_payload(self, err_name, err_msg, analyzed_tb):
        dt = datetime.datetime.now()
        payload = {'Error Type': err_name,
                   'Error Message': err_msg + self._recursion_error * " (Not all tracebacks are shown)",
                   'Application Name': self.application_name,
                   'Application Version': self.application_version,
                   'User': self.user_identifier,
                   'Date': dt.strftime('%d %B %Y'),
                   'Time': dt.strftime('%I:%M %p'),
                   'Traceback': analyzed_tb,
                   'HQ Submission': 'Not sent' if self._hq else 'Disabled',
                   'SMTP Submission': 'Not sent' if self._smtp else 'Disabled'
                   }
        return payload

    def load_configuration(self, config):
        cfg = ConfigParser.ConfigParser()

        with open(config, 'r') as _f:
            cfg.readfp(_f)
            if cfg.has_section('General'):
                general = dict(cfg.items('General'))
                self.application_name = general.get('application_name', CrashReporter.application_name)
                self.application_version = general.get('application_version', CrashReporter.application_version)
                self.user_identifier = general.get('user_identifier', CrashReporter.user_identifier)
                self.offline_report_limit = general.get('offline_report_limit', CrashReporter.offline_report_limit)
                self.max_string_length = general.get('max_string_length', CrashReporter.max_string_length)
            if cfg.has_section('SMTP'):
                self.setup_smtp(**dict(cfg.items('SMTP')))
                if 'port' in self._smtp:
                    self._smtp['port'] = int(self._smtp['port'])
                if 'recipients' in self._smtp:
                    self._smtp['recipients'] = self._smtp['recipients'].split(',')

            if cfg.has_section('HQ'):
                self.setup_hq(**dict(cfg.items('HQ')))

    def subject(self):
        """
        Return a string to be used as the email subject line.
        """
        if self.application_name and self.application_version:
            return 'Crash Report - {name} (v{version})'.format(name=self.application_name,
                                                               version=self.application_version)
        else:
            return 'Crash Report'

    def body(self, payload):
        return self.render_report(payload, inspection_level=self.inspection_level)

    def render_report(self, payload, inspection_level=1):
        with open(self.html_template, 'r') as _f:
            template = jinja2.Template(_f.read())

        return template.render(info=payload,
                               inspection_level=inspection_level)

    def attachments(self):
        """
        Generate and return a list of attachments to send with the report.
        :return: List of strings containing the paths to the files.
        """
        return []

    def delete_offline_reports(self):
        """
        Delete all stored offline reports
        :return: List of reports that still require submission
        """
        reports = self.get_offline_reports()
        remaining_reports = reports[:]
        for report in reports:
            with open(report, 'r') as _f:
                try:
                    js = json.load(_f)
                except ValueError as e:
                    logging.error("%s. Deleting crash report.")
                    os.remove(report)
                    continue
                if js['SMTP Submission'] in ('Sent', 'Disabled') and js['HQ Submission'] in ('Sent', 'Disabled'):
                    # Only delete the reports which have been sent or who's upload method is disabled.
                    remaining_reports.remove(report)
                    try:
                        os.remove(report)
                    except OSError as e:
                        logging.error(e)

        self.logger.info('CrashReporter: Deleting offline reports. %d reports remaining.' % len(remaining_reports))
        return remaining_reports

    def submit_offline_reports(self):
        """
        Submit offline reports using the enabled methods (SMTP and/or HQ)
        Returns a tuple of (N sent reports, N remaining reports)
        """
        smtp_enabled = bool(self._smtp)
        hq_enabled = bool(self._hq)
        offline_reports = self.get_offline_reports()
        logging.info('Submitting %d offline crash reports' % len(offline_reports))
        offline_reports = offline_reports[:self.send_at_most]

        if smtp_enabled:
            try:
                smtp_success = self._smtp_send_offline_reports(*offline_reports)
            except Exception as e:
                logging.error(e)
                smtp_success = [False] * len(offline_reports)
        else:
            smtp_success = [True] * len(offline_reports)

        if hq_enabled:
            try:
                hq_success = self._hq_send_offline_reports(*offline_reports)
            except Exception as e:
                logging.error(e)
                hq_success = [False] * len(offline_reports)
        else:
            hq_success = [True] * len(offline_reports)

        remaining_reports = self.delete_offline_reports()
        success = [s1 and s2 for (s1, s2) in zip(smtp_success, hq_success)]
        logging.info('%d crash reports successfully submitted' % success.count(True))
        logging.info('%d crash reports remain to be submitted' % len(remaining_reports))
        return all(success)

    def store_report(self, payload):
        """
        Save the crash report to a file. Keeping the last `offline_report_limit` files in a cyclical FIFO buffer.
        The newest crash report always named is 01
        """
        offline_reports = self.get_offline_reports()
        if offline_reports:
            # Increment the name of all existing reports 1 --> 2, 2 --> 3 etc.
            for ii, report in enumerate(reversed(offline_reports)):
                rpath, ext = os.path.splitext(report)
                n = int(re.findall('(\d+)', rpath)[-1])
                new_name = os.path.join(self.report_dir, self._report_name % (n + 1)) + ext
                shutil.copy2(report, new_name)
            os.remove(report)
            # Delete the oldest report
            if len(offline_reports) >= self.offline_report_limit:
                oldest = glob.glob(os.path.join(self.report_dir, self._report_name % (self.offline_report_limit+1) + '*'))[0]
                os.remove(oldest)
        new_report_path = os.path.join(self.report_dir, self._report_name % 1 + '.json')
        # Write a new report
        with open(new_report_path, 'w') as _f:
            json.dump(payload, _f)

        return new_report_path

    def hq_submit(self, payload):
        payload['HQ Parameters'] = self._hq if self._hq is not None else {}
        r = upload_report(self._hq['server'], payload, timeout=self._hq['timeout'])
        if r is False:
            return False
        else:
            return r.status_code == 200

    def smtp_submit(self, subject, body, attachments=None):
        smtp = self._smtp
        msg = MIMEMultipart()
        if isinstance(smtp['recipients'], list) or isinstance(smtp['recipients'], tuple):
            msg['To'] = ', '.join(smtp['recipients'])
        else:
            msg['To'] = smtp['recipients']
        msg['From'] = smtp['from']
        msg['Subject'] = subject

        # Add the body of the message
        msg.attach(MIMEText(body, 'html'))

        # Add any attachments
        if attachments:
            for attachment in attachments:
                part = MIMEBase('application', 'octet-stream')
                part.set_payload(open(attachments, 'rb').read())
                encoders.encode_base64(part)
                part.add_header('Content-Disposition',
                                'attachment; filename="%s"' % os.path.basename(attachment))
                msg.attach(part)

        try:
            ms = smtplib.SMTP(smtp['host'], smtp['port'], timeout=smtp['timeout'])
            ms.ehlo()
            ms.starttls()
            ms.ehlo()
            ms.login(smtp['user'], smtp['passwd'])
            ms.sendmail(smtp['from'], smtp['recipients'], msg.as_string())
            ms.close()
        except Exception as e:
            self.logger.error('CrashReporter: %s' % e)
            return False

        return True

    def get_offline_reports(self):
        return sorted(glob.glob(os.path.join(self.report_dir, self._report_name.replace("%d", "*"))))

    def poll(self):
        for remote, local in CrashReportingProcess.cr_pipes:
            if remote.poll():
                pkg = remote.recv()
                self.logger.debug('Interprocess payload found.')
                self.handle_payload(self.generate_payload(*pkg))
                return True
        return False

    def _watcher_thread(self):
        """
        Periodically attempt to upload the crash reports. If any upload method is successful, delete the saved reports.
        """
        while 1:
            time.sleep(self.check_interval)
            if not self._watcher_running:
                break
            self.logger.info('CrashReporter: Attempting to send offline reports.')
            self.submit_offline_reports()
            remaining_reports = len(self.get_offline_reports())
            if remaining_reports == 0:
                break
        self._watcher = None
        self.logger.info('CrashReporter: Watcher stopped.')

    def _smtp_send_offline_reports(self, *offline_reports):
        success = []
        if offline_reports:
            # Add the body of the message
            for report in offline_reports:
                with open(report, 'r') as js:
                    payload = json.load(js)
                if payload['SMTP Submission'] == 'Not sent':
                    success.append(self.smtp_submit(self.subject(), self.body(payload)))
                    if success[-1]:
                        # Set the flag in the payload signifying that the SMTP submission was successful
                        payload['SMTP Submission'] = 'Sent'
                        with open(report, 'w') as js:
                            json.dump(payload, js)
            self.logger.info('CrashReporter: %d Offline reports sent.' % sum(success))
        return success

    def _hq_send_offline_reports(self, *offline_reports):
        payloads = {}
        if offline_reports:

            for report in offline_reports:
                with open(report, 'r') as _f:
                    payload = json.load(_f)
                    if payload['HQ Submission'] == 'Not sent':
                        payload['HQ Parameters'] = self._hq if self._hq is not None else {}
                        payloads[report] = payload

            if payloads:
                r = upload_many_reports(self._hq['server'], payloads.values(), timeout=self._hq['timeout'])
                if r is False or r.status_code != 200:
                    return [False] * len(payloads)

            # Set the flag in the payload signifying that the HQ submission was successful
            for report, payload in payloads.iteritems():
                payload['HQ Submission'] = 'Sent'
                with open(report, 'w') as js:
                    json.dump(payload, js)

            return [True] * len(payloads)
        else:
            return [False] * len(payloads)
Exemple #54
0
class Elevator(Component):
    ON_TARGET_DELTA = 1 / 4

    def __init__(self):
        super().__init__()
        self._motor = SyncGroup(Talon, hardware.elevator)
        self._stabilizer_piston = Solenoid(hardware.stabilizer_solenoid)

        # Motion Planning!
        self._follower = TrajectoryFollower()

        self._calibrated = False
        self.tote_count = 0
        self.has_bin = False  # Do we have a bin?
        self._reset = True  # starting a new stack?
        self.tote_first = False  # We're stacking totes without a bin.
        self._should_drop = False  # Are we currently trying to get a bin ?
        self._manual_stack = False
        self._cap = False

        self._follower.set_goal(Setpoints.BIN)  # Base state
        self._follower_thread = Thread(target=self.update_follower)
        self._follower_thread.start()

    def stop(self):
        self._motor.set(0)

    def update(self):
        goal = self._follower.get_goal()
        if self.at_goal():
            self.do_stack_logic(goal)

        self._motor.set(self._follower.output)
        self._stabilizer_piston.set(not self._should_drop)
        self.tote_first = False
        self._manual_stack = False
        self._cap = False

    def do_stack_logic(self, goal):
        if self._should_drop:  # Dropping should override everything else
            self.reset_stack()
            if not hardware.game_piece_in_intake():
                self._follower._max_acc = 100  # Put things down gently if there's space before the bottom tote
            else:
                self._follower._max_acc = 100000000000
            self._follower.set_goal(Setpoints.BOTTOM)
            self._should_drop = False
            return

        self._follower._max_acc = 200  # Normal speed # TODO configurable

        if goal == Setpoints.BOTTOM:  # If we've just gone down to grab something
            if self.tote_count == 0 and not self.has_bin and not self.tote_first:
                self.has_bin = True  # We just stacked the bin
            else:  # We just stacked a tote
                if not self._reset:
                    self.tote_count += 1

            self._follower.set_goal(
                Setpoints.TOTE
            )  # Go back up. After stacking, you should always grab a tote.
        # If we try to stack a 6th tote it'll break the robot, don't do that.
        elif (hardware.game_piece_in_intake() or self._manual_stack
              ) and self.tote_count < 4:  # We have something, go down.
            if not self.has_bin:
                if self.tote_first or self.tote_count > 0 or self._manual_stack:
                    self._follower.set_goal(Setpoints.BOTTOM)
            else:  # We have a bin, just auto-stack.
                self._follower.set_goal(Setpoints.BOTTOM)
        else:  # Wait for a game piece & raise the elevator
            if self.is_empty():
                if self.tote_first:
                    self._follower.set_goal(Setpoints.FIRST_TOTE)
                elif self._cap:
                    setpoint = Setpoints.MAX_TRAVEL - 12 * self.tote_count
                    self._follower.set_goal(setpoint)
                else:
                    self._follower.set_goal(Setpoints.BIN)
            else:
                self._follower.set_goal(Setpoints.TOTE)
        if self._reset:
            self._reset = False

    def reset_encoder(self):
        hardware.elevator_encoder.reset()
        self._follower.set_goal(0)
        self._follower._reset = True

    def reset_stack(self):
        self.tote_count = 0
        self.has_bin = False
        self._reset = True

    def at_goal(self):
        return self._follower.trajectory_finished(
        )  # and abs(self._follower.get_goal() - self.position) < 2

    def drop_stack(self):
        self._should_drop = True

    def stack_tote_first(self):
        self.tote_first = True

    def is_full(self):
        return self.tote_count == 5 and hardware.game_piece_in_intake()

    def is_empty(self):
        return self.tote_count == 0 and not self.has_bin

    def manual_stack(self):
        self._manual_stack = True

    def update_nt(self):
        log.info("position: %s" % hardware.elevator_encoder.getDistance())
        log.info("capping? %s" % self._cap)
        log.info("at goal? %s" % self.at_goal())
        log.info("totes: %s" % self.tote_count)

    def cap(self):
        self._cap = True

    def update_follower(self):
        while True:
            self._follower.calculate(hardware.elevator_encoder.getDistance())
            time.sleep(0.005)
 def update(self, ccy):
     t = Thread(target=self.update_safe, args=(ccy, ))
     t.setDaemon(True)
     t.start()
Exemple #56
0
def start(cmd):
    process = subprocess.Popen(cmd, stdout=subprocess.PIPE, text=True, bufsize=1, close_fds=ON_POSIX)
    t = Thread(target=enqueue_output, args=(process.stdout, q))
    t.daemon = True
    t.start()
Exemple #57
0
def build_or_deploy_server(task_name, server_name, build_no, event):
    ts = time.time()
    timestamp = datetime.datetime.fromtimestamp(ts).strftime(
        '%Y-%m-%d %H:%M:%S')

    if task_name == 'BUILD':

        jenkins_job_name = SkypeBot.get_jenkins_build_job_by_server_name(
            server_name)

        if SkypeBot.build_in_progress_list.__contains__(jenkins_job_name):
            event.msg.chat.sendMsg(
                "Build already requested for : {0}".format(jenkins_job_name))

        else:
            SkypeBot.add_build_request(jenkins_job_name)
            event.msg.chat.sendMsg("Sure!")
            print("Building Server : {0} @ {1}".format(server_name, timestamp))
            try:

                last_queue_number = server.get_job_info(
                    jenkins_job_name)['lastBuild']['number']

                print("Build Number Before Trigger :" + str(last_queue_number))

                print(server.build_job(jenkins_job_name))
                th = Thread(target=check_build_status,
                            args=(
                                jenkins_job_name,
                                last_queue_number,
                                event,
                            ),
                            group=None)
                th.start()
                print("Continue Listening Again..")

            except NotFoundException:
                set_default_values_skype_bot()
                SkypeBot.build_in_progress_list.clear()
                print("NotFoundException")

    elif task_name == 'DEPLOY':

        jenkins_job_name = SkypeBot.get_jenkins_deploy_job_by_server_name(
            server_name)

        if SkypeBot.deployment_in_progress_list.__contains__(jenkins_job_name):
            event.msg.chat.sendMsg(
                "Deployment already requested for : {0}".format(
                    jenkins_job_name))

        else:
            SkypeBot.add_deploy_request(jenkins_job_name)
            event.msg.chat.sendMsg("Sure!")
            jira_build_no = build_no.upper()
            print("Deploying Server : {0} for Build No : {1} @ {2}".format(
                server_name, jira_build_no, timestamp))
            try:
                last_queue_number = server.get_job_info(
                    jenkins_job_name)['lastBuild']['number']
                print(
                    server.build_job(
                        jenkins_job_name,
                        token=jenkins_job_name,
                        parameters={'JIRA_REL_VERSION': jira_build_no}))

                new_thread = Thread(target=check_deployment_status,
                                    args=(
                                        jenkins_job_name,
                                        last_queue_number,
                                        event,
                                    ),
                                    group=None)
                new_thread.start()
                print("Continue Listening Again..")

            except NotFoundException:
                set_default_values_skype_bot()
                SkypeBot.deployment_in_progress_list.clear()
                print("NotFoundException")
class Websocket:
    HEADER_LEN = 6

    def __init__(self, port):
        self.port = port
        self.socket = socket.socket()
        self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        self.socket.settimeout(10.0)
        self.send_q = queue.Queue()
        self.shutdown = Event()

    def __enter__(self):
        try:
            self.socket.bind(('', self.port))
        except socket.error as e:
            print("Bind failed:{}".format(e))
            raise

        self.socket.listen(1)
        self.server_thread = Thread(target=self.run_server)
        self.server_thread.start()

        return self

    def __exit__(self, exc_type, exc_value, traceback):
        self.shutdown.set()
        self.server_thread.join()
        self.socket.close()
        self.conn.close()

    def run_server(self):
        self.conn, address = self.socket.accept()  # accept new connection
        self.socket.settimeout(10.0)

        print("Connection from: {}".format(address))

        self.establish_connection()
        print("WS established")
        # Handle connection until client closes it, will echo any data received and send data from send_q queue
        self.handle_conn()

    def establish_connection(self):
        while not self.shutdown.is_set():
            try:
                # receive data stream. it won't accept data packet greater than 1024 bytes
                data = self.conn.recv(1024).decode()
                if not data:
                    # exit if data is not received
                    raise

                if "Upgrade: websocket" in data and "Connection: Upgrade" in data:
                    self.handshake(data)
                    return

            except socket.error as err:
                print("Unable to establish a websocket connection: {}".format(
                    err))
                raise

    def handshake(self, data):
        # Magic string from RFC
        MAGIC_STRING = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
        headers = data.split("\r\n")

        for header in headers:
            if "Sec-WebSocket-Key" in header:
                client_key = header.split()[1]

        if client_key:
            resp_key = client_key + MAGIC_STRING
            resp_key = base64.standard_b64encode(
                hashlib.sha1(resp_key.encode()).digest())

            resp = "HTTP/1.1 101 Switching Protocols\r\n" + \
                "Upgrade: websocket\r\n" + \
                "Connection: Upgrade\r\n" + \
                "Sec-WebSocket-Accept: {}\r\n\r\n".format(resp_key.decode())

            self.conn.send(resp.encode())

    def handle_conn(self):
        while not self.shutdown.is_set():
            r, w, e = select.select([self.conn], [], [], 1)
            try:
                if self.conn in r:
                    self.echo_data()

                if not self.send_q.empty():
                    self._send_data_(self.send_q.get())

            except socket.error as err:
                print("Stopped echoing data: {}".format(err))
                raise

    def echo_data(self):
        header = bytearray(self.conn.recv(self.HEADER_LEN, socket.MSG_WAITALL))
        if not header:
            # exit if socket closed by peer
            return

        # Remove mask bit
        payload_len = ~(1 << 7) & header[1]

        payload = bytearray(self.conn.recv(payload_len, socket.MSG_WAITALL))

        if not payload:
            # exit if socket closed by peer
            return
        frame = header + payload

        decoded_payload = self.decode_frame(frame)
        print("Sending echo...")
        self._send_data_(decoded_payload)

    def _send_data_(self, data):
        frame = self.encode_frame(data)
        self.conn.send(frame)

    def send_data(self, data):
        self.send_q.put(data.encode())

    def decode_frame(self, frame):
        # Mask out MASK bit from payload length, this len is only valid for short messages (<126)
        payload_len = ~(1 << 7) & frame[1]

        mask = frame[2:self.HEADER_LEN]

        encrypted_payload = frame[self.HEADER_LEN:self.HEADER_LEN +
                                  payload_len]
        payload = bytearray()

        for i in range(payload_len):
            payload.append(encrypted_payload[i] ^ mask[i % 4])

        return payload

    def encode_frame(self, payload):
        # Set FIN = 1 and OP_CODE = 1 (text)
        header = (1 << 7) | (1 << 0)

        frame = bytearray([header])
        payload_len = len(payload)

        # If payload len is longer than 125 then the next 16 bits are used to encode length
        if payload_len > 125:
            frame.append(126)
            frame.append(payload_len >> 8)
            frame.append(0xFF & payload_len)

        else:
            frame.append(payload_len)

        frame += payload

        return frame
Exemple #59
0
        if command != "":
            key(command)
        command = ""

# Event object used to send signals from one thread to another
stop_event = Event()

time.sleep(10)


if __name__ == '__main__':
    # We create another Thread
    action_thread = Thread(target=gfg)

    # Here we start the thread and we wait 5 seconds before the code continues to execute.
    action_thread.start()
    action_thread.join(timeout=60)

    # We send a signal that the other thread should stop.
    stop_event.set()

    print("Timeout landing the dronekite")


if vehicle.mode != VehicleMode("RTL"):
    print(vehicle.mode)
    print("Now let's land")
    vehicle.mode = VehicleMode("RTL")
    print("Coming back Down")
    while True:
       v_alt = vehicle.location.global_relative_frame.alt
Exemple #60
0
import Queue
from threading import Thread


# A thread that produces data
def producer(out_q):
    while True:
        # Produce some data
        data = 'hola'
        out_q.put(data)


# A thread that consumes data
def consumer(in_q):
    while True:
        # Get some data
        data = in_q.get()
        # Process the data
        print data


# Create the shared queue and launch both threads
q = Queue.Queue()
t1 = Thread(target=consumer, args=(q, ))
t2 = Thread(target=producer, args=(q, ))
t1.start()
t2.start()