def contar(limite, delay): global data for i in range(limite): print str(i) time.sleep(delay) if data == 'Silva': thread.exit()
def processKiller(dwProcessId): # Attach to the process. win32.DebugActiveProcess( dwProcessId ) # Quit the current thread. thread.exit()
def run_tests_on_device(dev, tests, results): for test in tests: if exit_now.isSet(): thread.exit() result = None rerun = None try: result, rerun = self._RunTest(dev, test) if isinstance(result, base_test_result.BaseTestResult): results.AddResult(result) elif isinstance(result, list): results.AddResults(result) else: raise Exception("Unexpected result type: %s" % type(result).__name__) except: if isinstance(tests, test_collection.TestCollection): rerun = test raise finally: if isinstance(tests, test_collection.TestCollection): if rerun: tests.add(rerun) tests.test_completed() logging.info("Finished running tests on this device.")
def sendData(s): #have to tune it according to our data then will uncomment the lines log = open ('sendLog','r+') logs = log.readlines() line = int(logs[0]) log.seek(0) log.write(str(int(line)+1)) log.truncate() log.close() dataFile = open('databaseFile','r') dataFileLines = dataFile.readlines() data = dataFileLines[line] #convert required datafiles line into a tuple and save it as a json # send the json file this would make easier to feed into database at the server #data = "1,simple,list1" print data[:-1] jsonfile = open('f','w') json.dump(data,jsonfile) jsonfile.close() jsonfile = open('f','r') data = jsonfile.read() k = len(data) for i in range(0,k,1024): # print data try: s.send(data[i:min(i+1023,k)]) except socket.error: exit #s.send("\nDONE") jsonfile.close() thread.exit()
def Pinging_Thread(self): print "[+] Starting Ping thread" #self.ptc=threading.Condition() wait=True p=0.1 while 1: #loop forever if wait and (self.ping_delay > 0): self.ptc.acquire() self.ptc.wait(self.ping_delay+self.penalty) #send ping to server interval + penalty self.ptc.release() self.mutex_http_req.acquire() #Ensure that the other thread is not making a request at this time try: resp_data=self.http.HTTPreq(self.url,"") #Read response if self.verbose: self.http.v_print(pings_n=1) if self.penalty<60: self.penalty+=p #Don't wait more than a minute if resp_data: #If response had data write them to socket self.penalty=0 if self.verbose: self.http.v_print(received_d_pt=len(resp_data)) self.TunnaSocket.send(resp_data) #write to socket resp_data="" #clear data wait=False #If data received don't wait else: wait=True except: self.TunnaSocket.close() thread.exit() finally: self.mutex_http_req.release() print "[-] Pinging Thread Exited" #Unrecoverable thread.interrupt_main() #Interupt main thread -> exits
def main(argv): #http://stackoverflow.com/questions/6347115/python-icmpv6-client table = [] #if(len(sys.argv) >= 4): iface = sys.argv[1] #getting iface parameter # manet = sys.argv[2] #getting manet parameter # str_message = sys.argv[3] #mensage to sen trougth manet # if(iface == '' or manet == '' or str_message == ''): # print 'Parameter is incorrect' # print 'Usage main_interop.py <iface> <manet> <str_message>' # else: #setup_rules(iface) capture = pcapy.open_live(iface, 65536, 1, 0) #Caracteristicas del paquete capturado. pcap = pcs.PcapConnector(iface,64,True,1000) #main_socket = init_socket(iface) #thread.start_new_thread(read_socket,()) #read socket init print("\nWaiting...") a = 0 thread.start_new_thread(interop_beacon_generator,(iface,0,0.5)) #thread basic interop packet send 1 thread.start_new_thread(parse_packet,(capture,pcap,iface,table, )) #capture analisi thread.start_new_thread(read_socket_,(iface, )) #reading socket time.sleep(0.2) thread.start_new_thread(write_socket_,(table, )) #reading socket while(True): if(global_command == 'f'): print('Killing Threads...') thread.exit()
def get_irc_socket_object(self): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.settimeout(10) self.sock = sock try: sock.connect((self.server['host'], self.server['port'])) except: pp('Cannot connect to server (%s:%s).' % (self.server['host'], self.server['port']), 'error') sys.exit() sock.settimeout(None) sock.send('USER %s\r\n' % self.server['user']) sock.send('PASS %s\r\n' % self.server['oauthpass']) sock.send('NICK %s\r\n' % self.server['user']) if self.check_login_status(sock.recv(1024)): pp('Login successful.') else: pp('Login unsuccessful. (hint: make sure your oauth token is set in self.config/self.config.py).', 'error') thread.exit() self.join_channel() return sock
def _doTask(self, fname, mod_name): task_module = __import__(mod_name) # import the new Module self._Logger.info("Start thread %s" % fname) while True: try: self._Logger.info("Thread one task %s" % fname) # run once if self._taskUrl: data = self._getPara(mod_name) # get task's para result = task_module.task(data) else: result = task_module.task() # upload result if self._resultUrl: self._postResult(mod_name, result) # check the Module's status if fname not in self._Module.module_status: self._Logger.info("Thread expired %s" % fname) break if "reload" == self._Module.module_status[fname]: self._Logger.info("Thread module reload %s" % fname) reload(task_module) # reload the Module self._Module.module_status[fname] = "keep" except: pass finally: time.sleep(self._taskFreq) self._Logger.info("Terminal thread %s" % fname) thread.exit()
def write_socket_(route_list): while(True): try: print 'Ready to send\n' str_msg = raw_input('\nMessage to Send (msg_str to_X) #> ') str_to_send = str_msg[:str_msg.find('to_')-1] #estracting message recive_node = str_msg[str_msg.find('_')+1:] #extracting node index global_command = str_msg if(str_msg == 'f'): print('Killing Threads...') thread.exit() break else: global_command = str_msg for element in route_list: try: if(str(element["Index"]) == recive_node): to_connect_addr = element["local"] print 'Node with ' + element["local"] + ' link local addres' s = socket.socket(socket.AF_INET6, socket.SOCK_RAW, socket.IPPROTO_TCP) s.connect((to_connect_addr, 9991,0,3)) s.send(str_to_send + '\0') s.close() else: print "Element not in interop route list" except Exception,e: print 'Error de escritura socket - Exception handler seeds: ' + str(e) except Exception,e: print 'Error de escritura socket - Exception handler seeds: ' + str(e)
def get_current_piece(self): if not hasattr(self.torrent,"torrent_handle") or not self.torrent.torrent_handle: raise Exception("No torrent started") # Do we have it in our cache? if self.torrent.current_piece in self.cache: ret = self.cache[self.torrent.current_piece] cache[self.torrent.current_piece] = 0 return ret while True: s = self.torrent.torrent_handle.status() if s.pieces[self.torrent.current_piece]: break time.sleep(.1) if self.torrent.cancel_download: thread.exit() self.torrent.torrent_handle.read_piece(self.torrent.current_piece); while True: piece = self.torrent.session.pop_alert() if isinstance(piece, lt.read_piece_alert): if piece.piece == self.torrent.current_piece: self.torrent.current_piece = self.torrent.current_piece + 1 return self.handle_piece(piece) else: self.cache[piece.piece] = piece.buffer return self.get_current_piece() time.sleep(.1) if self.torrent.cancel_download: thread.exit()
def OnClose(self,event): try: self.room.deleteFile() except: pass thread.exit() self.Close()
def set_edge(self,value,callback): if self.fd!=None: set_edge(self.kernel_id,value) thread.start_new_thread(self.wait_edge,(self.fd,callback)) return else: thread.exit()
def print_time(threadName, delay, counter): while counter: if exitFlag: thread.exit() time.sleep(delay) print "%s: %s" % (threadName, time.ctime(time.time())) counter -= 1
def recsolve(self, start, field, q = False): if q and self.cancel: thread.exit() f = field if f[start] is None: some = False for n in range(1,10): f[start] = n if self.check_field(f): self.field = f some = True if f.count(None) == 0: if not q: delta = datetime.datetime.now() - self.started self.strtime = str(delta.seconds) + "." + newFill(str(delta.microseconds), 6, '0') print "GOT IT in %s seconds" % self.strtime self.found = True self.print_field(f) sys.exit(0) else: self.found = True self.field = f delta = datetime.datetime.now() - self.started self.strtime = str(delta.seconds) + "." + newFill(str(delta.microseconds), 6, '0') thread.exit() else: sub = self.recsolve(start+1, f, q) #if not sub: # continue f[start] = None #if not some: # pass else: self.recsolve(start+1, field, q)
def run(self): "use paramiko sshclient to change passwords through ssh" ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) try: ssh.connect(self.ip_addr, username=self.username, password=self.password) except socket.error: print "Connection refused on %s." % self.ip_addr thread.exit() except paramiko.AuthenticationException: print "wrong username and/or password %s." % self.ip_addr thread.exit() stdin, stdout, stderr = ssh.exec_command('passwd') if self.username == 'root': stdin.write('%s\n' % self.newpass) stdin.flush() stdin.write('%s\n' % self.newpass) stdin.flush() print stdout.readlines() print "password change on %s for user %s successfull" % \ (self.ip_addr, self.username) else: stdin.write('%s\n' % self.password) stdin.flush() stdin.write('%s\n' % self.newpass) stdin.flush() stdin.write('%s\n' % self.newpass) stdin.flush() print stdout.readlines() print "password change on %s for user %s successfull" % \ (self.ip_addr, self.username) ssh.close()
def run(self, node): attempts = self.attempts # query the node and parse the results while attempts > 0: try: res = self.proxy(node, "Information.Info") except: print sys.exc_info() # Means the remote side didn't support the operation, empty means no response res = {"type": "bad user"} try: res["neighbors"] = self.proxy(node, "sys:link.GetNeighbors")[0] except: res["neighbors"] = {} res = [res] self.logger(str(res)) (peers, info) = self.parse(res, self.attempts - attempts) if peers != [] and info != {}: break attempts -= 1 to_query = [] # add the results to the table and determine any new nodes that need to be queried self.lock.acquire() if info != {}: self.nodes[node] = info else: self.nodes[node] = self.parse([{"type" : "error"}], self.attempts)[1] for neighbor in peers: if neighbor in self.nodes: continue to_query.append(neighbor) self.nodes[neighbor] = True if to_query == 0: # no new nodes, we're done self.threads -= 1 else: # add as many new threads as nodes, except the current thread still is working self.threads += len(to_query) - 1 if self.threads == 0: # last thread, notifies the main thread of completion self.done.release() self.logger("Threads remaining: " + str(self.threads)) self.lock.release() # start new threads outside the lock for idx in range(len(to_query) - 1): thread.start_new_thread(self.run, (to_query[idx], )) # this is the thread starter executing if len(to_query) > 0: self.run(to_query[len(to_query) - 1]) # when the thread gets here, its completely done thread.exit()
def sendmail(fn,to,activeflag,xmobj,recipient,botaddress,pw,smtp_server): activeflag.value=1 filename=fn msg = email.mime.Multipart.MIMEMultipart() msg['Subject'] =filename[filename.rfind('\\')+1:] msg['From'] = botaddress msg['To'] = to body = email.mime.Text.MIMEText("""thekindlybot abides. Ask and the tunes are thine.""") msg.attach(body) fp=open(filename,'rb') #att = email.mime.application.MIMEApplication(fp.read(),_subtype="mp3") att = email.mime.application.MIMEApplication(fp.read()) fp.close() att.add_header('Content-Disposition','attachment',filename=filename) msg.attach(att) s = smtplib.SMTP(smtp_server) s.starttls() s.login(msg['From'],pw) s.sendmail(msg['From'],[msg['To']], msg.as_string()) s.quit() activeflag.value=0 xmobj.send_message(recipient, mbody='Mail Sent!') print 'mail sent! to ',recipient thread.exit()
def child(): global exitstat exitstat = exitstat + 1 threadid = thread.get_ident() print 'Hello from child',threadid,exitstat thread.exit() print 'Never reached'
def exit(code=0): preloader.stop() if threading.current_thread().__class__.__name__ == '_MainThread': sys.exit(code) else: thread.exit()
def do_qt(self, arg): """Quit the current thread.""" thread_name=threading.currentThread().getName() self.msg( "quitting thread %s" % thread_name) del self.traced[thread_name] self.threading_lock.release() thread.exit()
def print_time(name, delay, counter): while counter: if doExit: thread.exit() time.sleep(delay) print("%s: %s" % (name, time.ctime(time.time()))) counter -= 1
def www_connect_rserver(self): self.logger.log('*** Connecting to remote server...') self.first_run = 0 # we don't have proxy then we have to connect server by ourselves rs, rsp = self.client_head_obj.get_http_server() self.logger.log('(%s:%d)...' % (rs, rsp)) try: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((rs, rsp)) self.rserver_socket = s self.rserver_socket_closed = 0 self.current_rserver_net_location = '%s:%d' % (rs, rsp) self.logger.log('Done.\n') except: self.rserver_socket_closed = 1 self.logger.log('Failed.\n') self.exit() thread.exit() if self.client_head_obj.get_http_method() == 'CONNECT': self.logger.log('*** Generating server HTTP response...') buffer = 'HTTP/1.1 200 Connection established\015\012\015\012' self.rserver_head_obj, rest = http_header.extract_server_header(buffer) self.rserver_buffer = rest self.guess_rserver_data_length() self.logger.log('Done.\n')
def take_a_video_picture_interval(string,sleeptime,*args): global config_data global space while space<config_data.amount_of_space: #while 1 print string reload_config() comparison = check_config() GPIO.setwarnings(False) #to remove Rpi.GPIO's warning if comparison: take_the_moment() if config_data.motion_event and config_data.ext_int_event_a: GPIO.setmode(GPIO.BOARD) #GPIO.gpio_function(7) check if gpio is already setted as input or other GPIO.setup(7,GPIO.IN,pull_up_down=GPIO.PUD_DOWN) #pull-up input ,pull_up_down=GPIO.PUD_DOWN # dimensione della directory #print os.path.getsize("nomefile") #print os.stat(".").st_size #print sum(os.path.getsize(f) for f in os.listdir('.') if os.path.isfile(f)) #directory size without subdirectories print ("Debug 1: input value ") print GPIO.input(7) GPIO.add_event_detect(7,GPIO.RISING,callback=take_a_video_picture_movement,bouncetime=3000) #bouncetime tempo inibizione tra un evento e un altro 3000=3s thread.exit() else: GPIO.cleanup() time.sleep(sleeptime) else: print "The date/time range is not valid" print "There isn't space available - Max amount_of_space reached: "+str(config_data.amount_of_space)+" KB"
def conectado(con, cliente): respHost = con.recv(1024) #print respHost if armazena(respHost, con): con.sendall('Cadastro já extistente. Tente outro nome...') con.close() return else: print respHost+' :conectou ao Servidor...' con.sendall(respHost+" Conectado...") if respHost == ' ': return if len(LISTA_DE_CLIENTES) == 1: con.send('Voce é o primeiro conectado, por favor, aguarde outras conexões para iniciar o chat') else: conectados(con) #print MAP_CLIENTES.values() while True: try: data = con.recv(1024) for x, m in MAP_CLIENTES.items(): if con == m: remetente = x if data: #broadcast_data(con, "\r"+ str(con.getpeername()) + 'disse: ' + data) broadcast_data_mapa(con,remetente+' disse: '+ data) except: print 'Finalizando conexão do cliente', cliente for x, m in MAP_CLIENTES.items(): if m == con: broadcast_data_mapa(con,'o cliente '+ x +' desconectou...') del MAP_CLIENTES[x] con.close() thread.exit()
def new_thread(conn,ok): global close_serv while True: try: data = conn.recv(1024) except Exception as e: print "exception: {}".format(e) break if not data: print 'data is null' if data == 'exit': break if data == 'close_serv': print 'close server command is called' close_serv = True print 'data is ', data conn.sendall('echo : ' + data) print 'prepare close connection and exit thread' conn.close() thread.exit() pass
def _connectToAClient(self): print 'New connection is waiting' conn, addr = self.sock.accept() self.currentUsers.append(addr) self.listenFlag = True print str(addr) + " is connected" hint = self._handleClientsMessage(None) conn.send(hint) while True: #get data from client data = conn.recv(1024) if not data: break print str(addr) + " " + str(data) #send back to the client conn.send( self._handleClientsMessage(data) ) conn.send( self._handleClientsMessage(None)) conn.close() for tmp in self.currentUsers: if tmp == addr: self.currentUsers.remove(tmp) thread.exit()
def get_client_access_token( self ): access_token_url = 'https://api.mapmyfitness.com/v7.0/oauth2/access_token/' access_token_data = {'grant_type': 'client_credentials', 'client_id': self.client_id, 'client_secret': self.client_secret} try: response = requests.post(url=access_token_url, data=access_token_data, headers={'Api-Key': self.client_id}) except: print 'Request for access token failed (not error code)' thread.exit() self.increment_calls() # Print out debug info if the call is not successful if( response.status_code != 200 ): print 'ERROR! Received a status code of ' + str(response.status_code) + '\n' print 'URL: ' + str(access_token_url) + '\n' print 'Data: ' + str(access_token_data) + '\n' print 'Received Content:' print response.content thread.exit() try: access_token = response.json() except: print 'Did not get JSON. Here is the response and content:' print response print response.content access_token = '' return access_token
def guess_rserver_data_length(self): code = self.rserver_head_obj.get_http_code() try: c_method = self.client_head_obj.get_http_method() except AttributeError: # Problem with remote end of connection self.logger.log('*** Exception getting http code from client_head_obj -- remote end closed connection??\n') thread.exit() if code == '304' or code == '204' or code[0] == '1': self.rserver_all_got = 1 self.rserver_data_sent = 1 self.rserver_data_length = 0 self.logger.log('*** Remote server response is %s and it must not have any body.\n' % code) # we had problem here if the responce was some kind of error. Then there may be # some body. # This time let's try to check for 4** responses to fix the problem. if (c_method == 'HEAD' or c_method == 'CONNECT') and (code[0] != '4'): self.rserver_all_got = 1 self.rserver_data_sent = 1 self.rserver_data_length = 0 self.logger.log("*** Remote server response to the '%s' request. It must not have any body.\n" % c_method) if not self.rserver_all_got: try: self.rserver_data_length = int(self.rserver_head_obj.get_param_values('Content-Length')[0]) self.logger.log("*** Server 'Content-Length' found to be %d.\n" % self.rserver_data_length) if self.rserver_data_length == 0: self.rserver_all_got = 1 self.rserver_data_sent = 1 except: self.rserver_data_length = 0 self.logger.log("*** Could not find server 'Content-Length' parameter.\n")
def timer(no, interval): cnt = 0 while cnt < 10: print 'Thread: (%d) Time:%s\n' % (no, time.ctime()) time.sleep(interval) cnt += 1 thread.exit()
def load_assets_thread(self, f, icon_path, url): valid = True self.download(f, icon_path, url) self.load_assets_done() thread.exit()
def stop(): """May be used to kill the thread, if it is not a daemon thread.""" thread.exit()
def close(self): thread.exit()
#computeSegmentation(audiofile, pool) #segments = pool['segments'] computeBeats(audiofile, pool) beatFilename = writeBeatFile(audiofile, pool) computeBeatsLoudness(audiofile, pool) imgfilename = os.path.splitext(audiofile)[0]+'.png' #imgfilename = imgfilename.split(os.sep)[-1] #print 'plotting', imgfilename if sys.platform == 'darwin' or sys.platform == 'win32': plot(pool,'beats loudness ' + str(audiofile), imgfilename); else: card = 'default' f = wave.open(beatFilename, 'rb') # print '%d channels, sampling rate: %d \n' % (f.getnchannels(), # f.getframerate()) device = alsaaudio.PCM(card=card) lock = thread.allocate_lock() thread.start_new_thread(getkey, (beatFilename, device, f, card, lock)) plot(pool,'beats loudness ' + audiofile, imgfilename); f.close() thread.exit() #print 'deleting beatfile:', beatFilename #subprocess.call(['rm', beatFilename])
def comet(session_copy): # used by more than 1 (note that it is ok to retrieve all of # the lists since they are all pointers - not the actual list! employees_pending_list_copy =\ SESSION.get_employees_pending_list(session_copy) employees_approved_list_copy =\ SESSION.get_employees_approved_list(session_copy) messages_received_list_copy =\ SESSION.get_messages_received_list(session_copy) redemptions_pending_copy =\ SESSION.get_redemptions_pending(session_copy) redemptions_past_copy =\ SESSION.get_redemptions_past(session_copy) # this is the latest session data session = SessionStore(request.session.session_key) employees_pending_list =\ SESSION.get_employees_pending_list(session) employees_approved_list =\ SESSION.get_employees_approved_list(session) messages_received_list =\ SESSION.get_messages_received_list(session) redemptions_pending =\ SESSION.get_redemptions_pending(session) redemptions_past =\ SESSION.get_redemptions_past(session) # put the diffs between session_copy and session here data = {} ############################################################# # FEEDBACKS_UNREAD ################################## fbs_unread_copy = [ fb.objectId for fb in\ messages_received_list_copy if not fb.is_read ] fbs_unread = [ fb.objectId for fb in\ messages_received_list if not fb.is_read ] # get the difference between the two feedbacks_unread =\ tuple(set(fbs_unread) - set(fbs_unread_copy)) if feedbacks_unread: fb_unread = [] messages_received_ids =\ [ fb.objectId for fb in messages_received_list ] for feedback_id in feedbacks_unread: for fb in messages_received_list: if fb.objectId == feedback_id: fb_unread.append(fb.jsonify()) break if len(fb_unread) > 0: fb_count = 0 for fb in messages_received_list: if not fb.get("is_read"): fb_count += 1 data['feedbacks_unread'] = fb_unread data['feedback_unread_count'] = fb_count ############################################################# # EMPLOYEES_PENDING ################################## # must also check if employee is already approved! emps_pending_copy = [ emp.objectId for emp in employees_pending_list_copy ] emps_pending = [emp.objectId for emp in employees_pending_list] employees_pending =\ tuple(set(emps_pending) - set(emps_pending_copy)) if employees_pending: pending = [] for emp_id in employees_pending: for emp in employees_pending_list: if emp.objectId == emp_id: pending.append(emp.jsonify()) break if len(pending) > 0: data['employees_pending_count'] =\ len(employees_pending_list) data['employees_pending'] = pending ############################################################# # EMPLOYEES APPROVED (pending to approved) ################# emps_approved_copy = [ emp.objectId for emp in\ employees_approved_list_copy] emps_approved = [ emp.objectId for emp in\ employees_approved_list] appr_emps =\ tuple(set(emps_approved) - set(emps_approved_copy)) if appr_emps: approved = [] for appr_emp_id in appr_emps: for emp in employees_approved_list: if emp.objectId == appr_emp_id: approved.append(emp.jsonify()) break if len(approved) > 0: data['employees_approved'] = approved data['employees_pending_count'] =\ len(employees_pending_list) ############################################################# # EMPLOYEES DELETED/DENIED/REJECTED (pending/approved to pop)! # need to compare approved and pending! emps_copy = emps_approved_copy[:] emps_copy.extend(emps_pending_copy) emps = emps_approved[:] emps.extend(emps_pending) # emps_copy has the same or more items that emps del_emps = tuple(set(emps_copy) - set(emps)) if del_emps: deleted = [] for demp_id in del_emps: if demp_id in emps_approved_copy: emps_list = employees_approved_list_copy else: emps_list = employees_pending_list_copy for emp in emps_list: if emp.objectId == demp_id: deleted.append(emp.jsonify()) break if len(deleted) > 0: data['employees_pending_count'] =\ len(employees_pending_list) data['employees_deleted'] = deleted ############################################################# # REDEMPTIONS PENDING reds_pending_copy = [ r.objectId for r in\ redemptions_pending_copy ] reds_pending = [r.objectId for r in redemptions_pending] reds = tuple(set(reds_pending) - set(reds_pending_copy)) if reds: redemps = [] for r_id in reds: for redemp in redemptions_pending: if redemp.objectId == r_id: redemps.append(redemp.jsonify()) break if len(redemps) > 0: data['redemption_pending_count'] =\ len(redemptions_pending) data['redemptions_pending'] = redemps ############################################################# # REDEMPTIONS APPROVED (pending to history) reds_past_copy = [ r.objectId for r in\ redemptions_past_copy ] reds_past = [r.objectId for r in redemptions_past] appr_redemps =\ tuple(set(reds_past) - set(reds_past_copy)) if appr_redemps: redemp_js = [] for red_id in appr_redemps: for redemp in redemptions_past: if redemp.objectId == red_id: redemp_js.append(redemp.jsonify()) break if len(redemp_js) > 0: data['redemption_pending_count'] =\ len(redemptions_pending) data['redemptions_approved'] = redemp_js ############################################################# # REDEMPTIONS DELETED ############################## # remove from pending (should not be in history!) reds_copy = reds_past_copy[:] reds_copy.extend(reds_pending_copy) reds = reds_past[:] reds.extend(reds_pending) # reds_copy has the same or more items that reds del_redemps = tuple(set(reds_copy) - set(reds)) if del_redemps: redemp_js = [] for red_id in del_redemps: reds_list = [] if red_id in reds_past_copy: reds_list = redemptions_past_copy elif red_id in reds_pending_copy: reds_list = redemptions_pending_copy for redemp in reds_list: if redemp.objectId == red_id: redemp_js.append(redemp.jsonify()) break if len(redemp_js) > 0: data['redemption_pending_count'] =\ len(redemptions_pending) data['redemptions_deleted'] = redemp_js ############################################################# # SETTINGS UPDATED ############################## settings_copy = session_copy.get("settings") settings = session.get("settings") if settings_copy.get("retailer_pin") !=\ settings.get("retailer_pin"): data['retailer_pin'] = settings.get("retailer_pin") ############################################################# # REWARDS UPDATED ############################## rewards_copy = session_copy.get("store").get("rewards") rewards_copy =\ { reward['reward_id']:reward for reward in rewards_copy } rewards = session.get("store").get("rewards") rewards = {reward['reward_id']: reward for reward in rewards} updated_rewards = [] for reward_id, rew_copy in rewards_copy.iteritems(): # Note that some rewards may have been deleted! rew = rewards.get(reward_id) if rew and rew_copy['redemption_count'] !=\ rew['redemption_count']: # only the redemtpion_count and reward_id are used # in the client side updated_rewards.append({ "reward_id": reward_id, "redemption_count": rew['redemption_count'], }) if updated_rewards: data['rewards'] = updated_rewards ############################################################# # PATRONSTORE_COUNT ################################## patronStore_count_copy = int(session_copy["patronStore_count"]) patronStore_count = int(session["patronStore_count"]) if patronStore_count_copy != patronStore_count: data['patronStore_count'] = patronStore_count ############################################################# # ACTIVE_STORE_LOCATION_ID ############################ if session['active_store_location_id'] !=\ session_copy['active_store_location_id']: data['active_store_location_id'] =\ session['active_store_location_id'] # IMPORTANT! The request.session is the same as the # SessionStore(session_key)! so we must use the # request.session because it is automatically saved at the end # of each request- thereby overriding/undoing any changes made # to the SessionStore(session_key) key! # need to check if we are still logged in session = SessionStore(request.session.session_key) if 'account' in session and SESSION_KEY in session: request.session.clear() request.session.update(session) else: flush(request.session) ############################################################ # Respond ########################################### try: return HttpResponse(json.dumps(data), content_type="application/json") except (IOError, socket.error) as e: # broken pipe/socket. thread.exit() # exit silently
def pull(request): """ This is where the comet approach is put into play. This handles ajax requests from clients, holding on to the request while checking Parse for new activity. IMPORTANT! The order in which the session cache is checked is very critical. Take for example and employee that registers. Dashboard A receives the pending employee and immediately approves it. Now Dashboard B will run pull with the pending employee and the approved employee. We must first add the pending then check for the approved! """ def comet(session_copy): # used by more than 1 (note that it is ok to retrieve all of # the lists since they are all pointers - not the actual list! employees_pending_list_copy =\ SESSION.get_employees_pending_list(session_copy) employees_approved_list_copy =\ SESSION.get_employees_approved_list(session_copy) messages_received_list_copy =\ SESSION.get_messages_received_list(session_copy) redemptions_pending_copy =\ SESSION.get_redemptions_pending(session_copy) redemptions_past_copy =\ SESSION.get_redemptions_past(session_copy) # this is the latest session data session = SessionStore(request.session.session_key) employees_pending_list =\ SESSION.get_employees_pending_list(session) employees_approved_list =\ SESSION.get_employees_approved_list(session) messages_received_list =\ SESSION.get_messages_received_list(session) redemptions_pending =\ SESSION.get_redemptions_pending(session) redemptions_past =\ SESSION.get_redemptions_past(session) # put the diffs between session_copy and session here data = {} ############################################################# # FEEDBACKS_UNREAD ################################## fbs_unread_copy = [ fb.objectId for fb in\ messages_received_list_copy if not fb.is_read ] fbs_unread = [ fb.objectId for fb in\ messages_received_list if not fb.is_read ] # get the difference between the two feedbacks_unread =\ tuple(set(fbs_unread) - set(fbs_unread_copy)) if feedbacks_unread: fb_unread = [] messages_received_ids =\ [ fb.objectId for fb in messages_received_list ] for feedback_id in feedbacks_unread: for fb in messages_received_list: if fb.objectId == feedback_id: fb_unread.append(fb.jsonify()) break if len(fb_unread) > 0: fb_count = 0 for fb in messages_received_list: if not fb.get("is_read"): fb_count += 1 data['feedbacks_unread'] = fb_unread data['feedback_unread_count'] = fb_count ############################################################# # EMPLOYEES_PENDING ################################## # must also check if employee is already approved! emps_pending_copy = [ emp.objectId for emp in employees_pending_list_copy ] emps_pending = [emp.objectId for emp in employees_pending_list] employees_pending =\ tuple(set(emps_pending) - set(emps_pending_copy)) if employees_pending: pending = [] for emp_id in employees_pending: for emp in employees_pending_list: if emp.objectId == emp_id: pending.append(emp.jsonify()) break if len(pending) > 0: data['employees_pending_count'] =\ len(employees_pending_list) data['employees_pending'] = pending ############################################################# # EMPLOYEES APPROVED (pending to approved) ################# emps_approved_copy = [ emp.objectId for emp in\ employees_approved_list_copy] emps_approved = [ emp.objectId for emp in\ employees_approved_list] appr_emps =\ tuple(set(emps_approved) - set(emps_approved_copy)) if appr_emps: approved = [] for appr_emp_id in appr_emps: for emp in employees_approved_list: if emp.objectId == appr_emp_id: approved.append(emp.jsonify()) break if len(approved) > 0: data['employees_approved'] = approved data['employees_pending_count'] =\ len(employees_pending_list) ############################################################# # EMPLOYEES DELETED/DENIED/REJECTED (pending/approved to pop)! # need to compare approved and pending! emps_copy = emps_approved_copy[:] emps_copy.extend(emps_pending_copy) emps = emps_approved[:] emps.extend(emps_pending) # emps_copy has the same or more items that emps del_emps = tuple(set(emps_copy) - set(emps)) if del_emps: deleted = [] for demp_id in del_emps: if demp_id in emps_approved_copy: emps_list = employees_approved_list_copy else: emps_list = employees_pending_list_copy for emp in emps_list: if emp.objectId == demp_id: deleted.append(emp.jsonify()) break if len(deleted) > 0: data['employees_pending_count'] =\ len(employees_pending_list) data['employees_deleted'] = deleted ############################################################# # REDEMPTIONS PENDING reds_pending_copy = [ r.objectId for r in\ redemptions_pending_copy ] reds_pending = [r.objectId for r in redemptions_pending] reds = tuple(set(reds_pending) - set(reds_pending_copy)) if reds: redemps = [] for r_id in reds: for redemp in redemptions_pending: if redemp.objectId == r_id: redemps.append(redemp.jsonify()) break if len(redemps) > 0: data['redemption_pending_count'] =\ len(redemptions_pending) data['redemptions_pending'] = redemps ############################################################# # REDEMPTIONS APPROVED (pending to history) reds_past_copy = [ r.objectId for r in\ redemptions_past_copy ] reds_past = [r.objectId for r in redemptions_past] appr_redemps =\ tuple(set(reds_past) - set(reds_past_copy)) if appr_redemps: redemp_js = [] for red_id in appr_redemps: for redemp in redemptions_past: if redemp.objectId == red_id: redemp_js.append(redemp.jsonify()) break if len(redemp_js) > 0: data['redemption_pending_count'] =\ len(redemptions_pending) data['redemptions_approved'] = redemp_js ############################################################# # REDEMPTIONS DELETED ############################## # remove from pending (should not be in history!) reds_copy = reds_past_copy[:] reds_copy.extend(reds_pending_copy) reds = reds_past[:] reds.extend(reds_pending) # reds_copy has the same or more items that reds del_redemps = tuple(set(reds_copy) - set(reds)) if del_redemps: redemp_js = [] for red_id in del_redemps: reds_list = [] if red_id in reds_past_copy: reds_list = redemptions_past_copy elif red_id in reds_pending_copy: reds_list = redemptions_pending_copy for redemp in reds_list: if redemp.objectId == red_id: redemp_js.append(redemp.jsonify()) break if len(redemp_js) > 0: data['redemption_pending_count'] =\ len(redemptions_pending) data['redemptions_deleted'] = redemp_js ############################################################# # SETTINGS UPDATED ############################## settings_copy = session_copy.get("settings") settings = session.get("settings") if settings_copy.get("retailer_pin") !=\ settings.get("retailer_pin"): data['retailer_pin'] = settings.get("retailer_pin") ############################################################# # REWARDS UPDATED ############################## rewards_copy = session_copy.get("store").get("rewards") rewards_copy =\ { reward['reward_id']:reward for reward in rewards_copy } rewards = session.get("store").get("rewards") rewards = {reward['reward_id']: reward for reward in rewards} updated_rewards = [] for reward_id, rew_copy in rewards_copy.iteritems(): # Note that some rewards may have been deleted! rew = rewards.get(reward_id) if rew and rew_copy['redemption_count'] !=\ rew['redemption_count']: # only the redemtpion_count and reward_id are used # in the client side updated_rewards.append({ "reward_id": reward_id, "redemption_count": rew['redemption_count'], }) if updated_rewards: data['rewards'] = updated_rewards ############################################################# # PATRONSTORE_COUNT ################################## patronStore_count_copy = int(session_copy["patronStore_count"]) patronStore_count = int(session["patronStore_count"]) if patronStore_count_copy != patronStore_count: data['patronStore_count'] = patronStore_count ############################################################# # ACTIVE_STORE_LOCATION_ID ############################ if session['active_store_location_id'] !=\ session_copy['active_store_location_id']: data['active_store_location_id'] =\ session['active_store_location_id'] # IMPORTANT! The request.session is the same as the # SessionStore(session_key)! so we must use the # request.session because it is automatically saved at the end # of each request- thereby overriding/undoing any changes made # to the SessionStore(session_key) key! # need to check if we are still logged in session = SessionStore(request.session.session_key) if 'account' in session and SESSION_KEY in session: request.session.clear() request.session.update(session) else: flush(request.session) ############################################################ # Respond ########################################### try: return HttpResponse(json.dumps(data), content_type="application/json") except (IOError, socket.error) as e: # broken pipe/socket. thread.exit() # exit silently ################################################################## ##### ENTRY POINT ###################################################### # get the timestamp and uid t = parser.parse(request.GET["timestamp"]) timestamp = str(t.hour).zfill(2) + ":" +\ str(t.minute).zfill(2) + ":" + str(t.second).zfill(2) uid = request.GET['uid'] # update the last_updated field of the CometSessionIndex try: csi = CometSessionIndex.objects.get(session_key=\ request.session.session_key) csi.last_updated = timezone.now() csi.save() except CometSessionIndex.DoesNotExist: # should never go here but just in case. CometSessionIndex.objects.create(session_key=\ request.session.session_key, store_id=SESSION.get_store(request.session).objectId, last_updated=timezone.now()) # register the CometSession CometSession.objects.update() CometSession.objects.create(session_key=\ request.session.session_key, timestamp=timestamp, uid=uid) # cache the current session at this state session_copy = dict(request.session) timeout_time = timezone.now() + relativedelta(seconds=REQUEST_TIMEOUT) # keep going until its time to return a response forcibly while timezone.now() < timeout_time: # need to update he objects manager to get the latest objects CometSession.objects.update() try: scomet = CometSession.objects.get(session_key=\ request.session.session_key, timestamp=timestamp, uid=uid) except CometSession.DoesNotExist: # cometsession was deleted - time to go try: # make sure that the latest session is saved! # need to check if we are still logged in session = SessionStore(request.session.session_key) if 'account' in session and SESSION_KEY in session: request.session.clear() request.session.update(session) else: flush(request.session) return HttpResponse(json.dumps({"result": -1}), content_type="application/json") except (IOError, socket.error) as e: thread.exit() if scomet.modified: # delete the registered comet session object CometSession.objects.update() try: scomet = CometSession.objects.get(session_key=\ request.session.session_key, timestamp=timestamp, uid=uid) scomet.delete() except CometSession.DoesNotExist: pass # do nothing try: return comet(session_copy) except KeyError: # if a key error occurs then that probably means that # the session has been flushed- was logged out by user # or forcefully by server =) # now time to flag existing tabs. request.session.clear() try: return HttpResponse(json.dumps({"result": -3}), content_type="application/json") except (IOError, socket.error) as e: # broken pipe/socket. thread.exit() # exit silently else: # nothing new, sleep for a bit sleep(COMET_PULL_RATE) # TIME IS UP - return a response result 0 means no change # try 1 last time if scomet.modified: # delete the registered comet session object CometSession.objects.update() try: scomet = CometSession.objects.get(session_key=\ request.session.session_key, timestamp=timestamp, uid=uid) scomet.delete() except CometSession.DoesNotExist: pass # do nothing return comet(session_copy) # make sure that request.session is the most up to date session = SessionStore(request.session.session_key) # need to check if we are still logged in if 'account' in session and SESSION_KEY in session: request.session.clear() request.session.update(session) else: flush(request.session) # attempt to delete registered comet session if not yet deleted try: scomet = CometSession.objects.get(session_key=\ request.session.session_key, timestamp=timestamp, uid=uid) scomet.delete() except CometSession.DoesNotExist: pass # do nothing try: return HttpResponse(json.dumps({"result": 0}), content_type="application/json") except (IOError, socket.error) as e: thread.exit() # exit silently
def clientTearDown(self): self.done.set() thread.exit()
def interrupt(*args): # Temporarily suppress SIGTERM. We'll enable it # once we are ready to wait (and recheck races). signal.signal(signal.SIGTERM, squash) thread.exit()
def close(self): self.ser.close() if self.ser.isOpen() == False: self.showSerial.delete(0.0, END) self.showSerial.insert(0.0, "Serial has been closed!") thread.exit() # 关闭线程;
def spam(thread_name='0', proxy=None): iteration = 1 print showstatus( wrapsbrace('info', True) + ('thread-{} started at <0x{}>').format( thread_name, thread.get_ident()), 'new') while True: try: if proxy == None: r = requests.post( 'https://fotostrana.ru/signup/signup/signup/', data={ 'user_name': 'test', 'user_sex': 'm', 'user_birthday_day': '2', 'user_birthday_month': '2', 'user_birthday_year': '1999', 'user_email_or_phone': _phone, 'terms_agree': '239' }, headers={ 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:56.0) Gecko/20100101 Firefox/56.0' }) else: r = requests.post( 'https://fotostrana.ru/signup/signup/signup/', data={ 'user_name': 'test', 'user_sex': 'm', 'user_birthday_day': '2', 'user_birthday_month': '2', 'user_birthday_year': '1999', 'user_email_or_phone': _phone, 'terms_agree': '239' }, headers={ 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:56.0) Gecko/20100101 Firefox/56.0' }, proxies={ 'http': str(proxy), 'https': str(proxy) }) except requests.exceptions.ConnectionError: print showstatus( wrapsbrace(('thread-{}').format(thread_name)) + wrapsbrace('except', True) + ('ConnectionError thrown! Sleeping for {}s . . .' ).format(delaytime), 'warn') sleep(delaytime) else: if r.status_code == 429: print showstatus( wrapsbrace(('thread-{}').format(thread_name)) + wrapsbrace(('429 {}').format(r.reason), True) + ('Sleeping for {}s . . .').format(delaytime), 'warn', color.WARNING + color.REVERSE) sleep(delaytime) elif r.status_code == 200: print showstatus( wrapsbrace(('thread-{}').format(thread_name)) + wrapsbrace('200 OK', True) + ('GAC SMS sent! Sleeping for {}s . . . (iteration:{})' ).format(delaytime, iteration)) iteration += 1 sleep(delaytime) else: print showstatus( wrapsbrace(('thread-{}').format(thread_name)) + wrapsbrace(('{} {}').format( r.status_code, r.reason), True) + 'Something went wrong. Exiting thread . . .', 'warn') thread.exit() return
class PyStreamer(object): #Variables that contains the user credentials to access Twitter API with open("credentials.yaml", "r") as file: cred = load(file, Loader=Loader) consumer_key = cred["consumer_key"] consumer_secret = cred["consumer_secret"] access_token = cred["access_token"] access_token_secret = cred["access_token_secret"] HOST = '' # Symbolic name meaning all available interfaces PORT = 9999 # Arbitrary non-privileged port def __init__(self, languages, topics): self.socket_created = False start_new_thread(self.handle_socket, ()) self.should_end = False self.languages = languages self.topics = topics while not self.socket_created: pass def handle_socket(self): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) print 'Socket created' try: s.bind((self.HOST, self.PORT)) except socket.error, msg: print 'Bind failed. Error Code : ' + str( msg[0]) + ' Message ' + msg[1] sys.exit() self.socket_created = True print 'Socket bind complete' s.listen(10) print 'Socket now listening' # This is a basic listener that just prints received tweets to stdout. class StdOutListener(StreamListener): def on_data(self, data): # post = json.loads(data.decode('utf-8')) conn.send(data) return True def on_error(self, status): print status #now keep talking with the client while not self.should_end: #wait to accept a connection - blocking call conn, addr = s.accept() print 'Connected with ' + addr[0] + ':' + str(addr[1]) listener = StdOutListener() auth = OAuthHandler(self.consumer_key, self.consumer_secret) auth.set_access_token(self.access_token, self.access_token_secret) stream = Stream(auth, listener) # This line filter Twitter Streams to capture data by the keywords: 'python', 'javascript', 'ruby' stream.filter(track=self.topics, languages=self.languages) s.close() thread.exit()
def main(argv): script = os.path.basename(argv[0]) params = argv[1:] print "Process killer" print "by Mario Vilas (mvilas at gmail.com)" print if len(params) == 0 or '-h' in params or '--help' in params or \ '/?' in params: print "Usage:" print " %s <process ID or name> [process ID or name...]" print print "If a process name is given instead of an ID all matching processes are killed." exit() # Scan for active processes. # This is needed both to translate names to IDs, and to validate the user-supplied IDs. s = System() s.request_debug_privileges() s.scan_processes() # Parse the command line. # Each ID is validated against the list of active processes. # Each name is translated to an ID. # On error, the program stops before killing any process at all. targets = set() for token in params: try: pid = HexInput.integer(token) except ValueError: pid = None if pid is None: matched = s.find_processes_by_filename(token) if not matched: print "Error: process not found: %s" % token exit() for (process, name) in matched: targets.add(process.get_pid()) else: if not s.has_process(pid): print "Error: process not found: 0x%x (%d)" % (pid, pid) exit() targets.add(pid) targets = list(targets) targets.sort() count = 0 # Try to terminate the processes using the TerminateProcess() API. next_targets = list() for pid in targets: next_targets.append(pid) try: # Note we don't really need to call open_handle and close_handle, # but it's good to know exactly which API call it was that failed. process = Process(pid) process.open_handle() try: process.kill(-1) next_targets.pop() count += 1 print "Terminated process %d" % pid try: process.close_handle() except WindowsError, e: print "Warning: call to CloseHandle() failed: %s" % str(e) except WindowsError, e: print "Warning: call to TerminateProcess() failed: %s" % str(e) except WindowsError, e: print "Warning: call to OpenProcess() failed: %s" % str(e)
def exit(): preloader.stop() thread.exit()
def spam(thread_name='0', proxy=None): iteration = 1 print showstatus( wrapsbrace('info', True) + ('thread-{} started at <0x{}>').format( thread_name, thread.get_ident()), 'new') while True: try: if proxy == None: r = requests.post( 'https://p.grabtaxi.com/api/passenger/v2/profiles/register', data={ 'phoneNumber': _phone, 'countryCode': 'ID', 'name': 'test', 'email': '*****@*****.**', 'deviceToken': '*' }, headers={'User-Agent': 'curl/7.52.1'}) else: r = requests.post( 'https://p.grabtaxi.com/api/passenger/v2/profiles/register', data={ 'phoneNumber': _phone, 'countryCode': 'ID', 'name': 'test', 'email': '*****@*****.**', 'deviceToken': '*' }, headers={'User-Agent': 'curl/7.52.1'}, proxies={ 'http': str(proxy), 'https': str(proxy) }) except requests.exceptions.ConnectionError: print showstatus( wrapsbrace(('thread-{}').format(thread_name)) + wrapsbrace('except', True) + ('ConnectionError thrown! Sleeping for {}s . . .' ).format(delaytime), 'warn') sleep(delaytime) else: if r.status_code == 429: print showstatus( wrapsbrace(('thread-{}').format(thread_name)) + wrapsbrace(('429 {}').format(r.reason), True) + ('Sleeping for {}s . . .').format(delaytime), 'warn', color.WARNING + color.REVERSE) sleep(delaytime) elif r.status_code == 200: print showstatus( wrapsbrace(('thread-{}').format(thread_name)) + wrapsbrace('200 OK', True) + ('GAC SMS sent! Sleeping for {}s . . . (iteration:{})' ).format(delaytime, iteration)) iteration += 1 sleep(delaytime) else: print showstatus( wrapsbrace(('thread-{}').format(thread_name)) + wrapsbrace(('{} {}').format( r.status_code, r.reason), True) + 'Something went wrong. Exiting thread . . .', 'warn') thread.exit() return
def run(self): if self.config['DEBUG']['SCR_DEBUG']: print 'Connected from %s:%d' % self.client_address while (not self.stop_request): # wait for data if not (self.rserver_buffer or self.client_buffer): # if buffers are empty """ if not self.rserver_socket_closed: select.select([self.rserver_socket.fileno(), self.client_socket.fileno()], [], [], 5.0) else: # if there is no connection to remote server select.select([self.client_socket.fileno()], [], [], 5.0) """ # Experimental code. We fight bug when we do not get all the header in the # first try and have stop request because rserver_socket_closed==1 # So let's try change socket_closed to socket. which is None if there is no # connection if not self.rserver_socket_closed: try: select.select([ self.rserver_socket.fileno(), self.client_socket.fileno() ], [], [], 5.0) except (socket.error, select.error, ValueError): thread.exit() else: # if there is no connection to remote server try: select.select([self.client_socket.fileno()], [], [], 5.0) except socket.error: thread.exit() # client part self.run_client_loop() if self.tunnel_mode: self.tunnel_client_data() if not self.client_header_sent and self.client_head_obj: if not self.rserver_socket_closed: # if connected we have to check whether we are connected to the right host. # if not then close connection self.check_connected_remote_server() #if self.rserver_socket_closed: if self.rserver_socket_closed: # connect remote server if we have not yet self.connect_rserver() self.log_url() self.send_client_header() if self.client_header_sent and (not self.client_data_sent): self.send_client_data() if self.client_data_sent and self.rserver_data_sent: # NOTE: we need to know if the request method is HEAD or CONNECT, so we cannot # proceed to the next request header until response is not worked out self.check_tunnel_mode() self.reset_client() if self.config['DEBUG']['SCR_DEBUG']: print '\b.', # Remote server part if not self.rserver_socket_closed: # if there is a connection to remote server self.run_rserver_loop() if self.tunnel_mode: self.tunnel_rserver_data() if (not self.rserver_header_sent) and self.rserver_head_obj: self.auth_routine() # NTLM authorization if (not self.rserver_header_sent) and self.rserver_head_obj: self.send_rserver_header() self.check_rserver_response() if self.rserver_header_sent and (not self.rserver_data_sent): self.send_rserver_data() if self.client_head_obj == None and self.rserver_data_sent: self.reset_rserver() self.logger.log('*** Request completed.\n') self.check_stop_request() self.exit() if self.config['DEBUG']['SCR_DEBUG']: print 'Finished %s:%d' % self.client_address
def __THREAD__send(self): self.__threadsalive += 1 logging.info(repr(self.__address) + "Ready To Send...") while self.__connalive: #print repr(self.__address) + "consend start" self.__outlock.acquire() while len(self.__outgoingdata) == 0 and self.__connalive: self.__outlock.wait(MWNL_TIMEOUT) #END WHILE try: if self.__connalive: sending = "" while len(self.__outgoingdata) > self.__blocksize: sending = self.__outgoingdata[:self.__blocksize] self.__outgoingdata = self.__outgoingdata[self. __blocksize:] #print "ogdb = " + self.__outgoingdata #print "sendingb = " + sending logging.debug("Sending Chunk " + repr(len(sending)) + " bytes") self.__socket.send(sending) #END WHILE if self.__connalive and len(self.__outgoingdata) > 0: #print "sending = " + self.__outgoingdata logging.debug("Sending " + repr(len(self.__outgoingdata)) + " bytes") self.__socket.send( self.__outgoingdata) #Error if socket closed? self.__outgoingdata = "" #print "ogd = " + self.__outgoingdata #END IF #print repr(self.__address) + "outnow = " + self.__outgoingdata #END IF except socket.error as error: # If the 10054 error, we assume connection is closed and ok to terminate logging.info("Remote Client Closed Connection?") self.__connalive = False self.__callback([(self.__id, 0), MWNL_CMD_DISCONNECT]) if error.errno not in [ errno.WSAECONNRESET, errno.WSAECONNABORTED ]: logging.info("Error Sending Data") logging.error("Error Sending Data") logging.info(traceback.format_exc()) logging.error(traceback.format_exc()) print traceback.format_exc() #END IF except: self.__connalive = False logging.info("Error Sending Data") logging.error("Error Sending Data") logging.info(traceback.format_exc()) logging.error(traceback.format_exc()) print traceback.format_exc() self.__callback([(self.__id, 0), MWNL_CMD_DISCONNECT]) self.__outlock.release() #print repr(self.__address) + "consend end" #END WHILE logging.info(repr(self.__address) + "Stopped Sending...") self.__threadsalive -= 1 thread.exit() #END __THREAD__send #END MWNL_Connection
# already what we wanted. # # This must be done after attaching to at least one process. # # http://msdn.microsoft.com/en-us/library/ms679307(VS.85).aspx try: win32.DebugSetProcessKillOnExit(True) except AttributeError: pass except WindowsError, e: print "Warning: call to DebugSetProcessKillOnExit() failed: %s" % str(e) if count == 0: print "Failed! No process was killed." elif count == 1: print "Successfully killed 1 process." else: print "Successfully killed %d processes." % count # Exit the current thread. # This will kill all the processes we have attached to. exit() if __name__ == '__main__': try: import psyco psyco.bind(main) except ImportError: pass main(sys.argv)
def checkThread(self): if self.threadComplete == True: self.session.remove_torrent(self.torrentHandle) thread.exit()
def enviar(): while True: msgSND = raw_input() con.send(msgSND) thread.exit()
class AgentThread(threading.Thread): """ All threads used by the agent should be of type agent thread. This thread will be used to generate ids, provide categories for the thread mgr. """ def __init__(self, threadMgr, cat = None, name = 'agent_thread', mergeOnFound = False, reqChecksum = None): """ Constructor. Creates the uuid. Sets the category of this thread """ threading.Thread.__init__(self) self.__lock = threading.Lock() self.__stop = False self._timeout = configutil.getConfigAsInt('agent_thread_timeout') self._progressTimeout = 0 self.__lastProgress = None self.__timeoutAt = 0 # thread event to mark when the thread has been added to threadMgr self.threadMgrEvent = threading.Event() self.threadMgrEvent.clear() # thread manager self._threadMgr = threadMgr # used by status self.__uuid = str(uuid.uuid4()) self.__cat = cat self.__name = name self.__executionMsec = None self.__mergeOnFound = mergeOnFound self.__reqChecksum = reqChecksum # status self.__status = {'httpStatus': 200, 'progress': 0, 'fprogress': 0.0, 'result': None, 'error': None, 'errorMsg': None, 'executionMsec': None} ############################################################## # Thread Running Methods ############################################################## def run(self): """ run - register self with threadmgr """ self.beforeRun() self.__timeoutAt += (time.time() + self._timeout) # start timer self.__executionMsec = time.time() * 1000 try: self._threadMgr.addThread(self) self.threadMgrEvent.set() self.doRun() except ConcurrentActivityException as excep: # found existing, and merge with it if needed conflictUuid = excep.getConflictUuid() if self.__mergeOnFound: if self.__reqChecksum: conflictThread = self._threadMgr.getThreadByUuid(conflictUuid) if conflictThread and isinstance(conflictThread, AgentThread) and conflictThread.getReqChecksum() == self.__reqChecksum: LOG.info('Thread(%s / %s) merged with existing thread %s' % (self.__name, self.__uuid, excep.getConflictUuid())) self.__uuid = excep.getConflictUuid() self._updateProgress(0) else: self._updateStatus(httpStatus = 500, progress = 100, error = excep.getCode(), errorMsg = excep.getMsg()) LOG.error('Thread(%s / %s) Caught ThreadMgr Exception - exiting (%s) - %s' % (self.__name, self.__uuid, excep, traceback.format_exc(5))) thread.exit() except AgentException, excep: self._updateStatus(httpStatus = 500, progress = 100, error = excep.getCode(), errorMsg = excep.getMsg()) LOG.error('Thread(%s / %s) Caught ThreadMgr Exception - exiting (%s) - %s' % (self.__name, self.__uuid, excep, traceback.format_exc(5))) thread.exit() except Exception, excep: LOG.error('Thread(%s / %s) Caught ThreadMgr Exception - exiting (%s) - %s' % (self.__name, self.__uuid, excep, traceback.format_exc(5))) thread.exit()
def __THREAD__listen(self): self.__threadsalive += 1 logging.info(repr(self.__address) + "Listening...") retval = "" __incomingdata = "" self.__socket.settimeout(MWNL_TIMEOUT) while self.__connalive: try: blk = self.__socket.recv(self.__blocksize) if blk <> "": #print repr(self.__address) + "listen start" #print "recv = " + blk #print repr(self.__address) + "indata+ " + blk logging.debug("Received " + repr(len(blk)) + " bytes") __incomingdata += blk blk = "" i = __incomingdata.find("[") while i != -1: #logging.debug("Found json data at " + repr(i)) #TODO: chop after all data received? #logging.debug("Message Info Before: " + self.__incomingdata) lofs = __incomingdata[:i] # Do we have complete data tlen = len(lofs) + int(lofs) if len(__incomingdata) >= tlen: #logging.debug("Processing json data found") retval = __incomingdata[:tlen] __incomingdata = __incomingdata[tlen:] retval = json.loads("[" + retval[i:] + "]") else: logging.debug("Need more json data, need %d bytes", tlen) #atedata = tlen break #eif # If We Have A Complete Tag, Process It if retval <> "": #atedata = 0 logging.debug("Making callback for command: %s", retval) self.__callback(retval) logging.debug("Callback Finished") retval = "" #END IF i = __incomingdata.find("[") #wend #print repr(self.__address) + "listen end" #print "icd = " + self.__incomingdata #END IF except socket.timeout: pass except socket.error as error: # If the 10054 error, we assume connection is closed and ok to terminate if error.errno == errno.WSAECONNRESET or error.errno == errno.WSAECONNABORTED: self.close() break else: logging.info(traceback.format_exc()) logging.error(traceback.format_exc()) print traceback.format_exc() #END IF except: logging.info(traceback.format_exc()) logging.error(traceback.format_exc()) print traceback.format_exc() #END TRY/EXCEPT #END WHILE logging.info(repr(self.__address) + "Stopped Listening...") self.__threadsalive -= 1 thread.exit()
def conectado(con, cliente): global tempo_rasp global cont_rasp tempo_rasp = 0 cont_rasp = 0 global tempo_client global cont_client tempo_client = 0 cont_client = 0 print 'Conectado por', cliente msg = con.recv(1024) if msg == 'id_rasp': print cliente, msg while True: msg = con.recv(1024) inicio = datetime.datetime.now() if not msg: break # print 'Rasp: ', cliente, msg msg = str(msg).split(';') temperatura = float(msg[0]) umidade = float(msg[1]) sql = "INSERT INTO `temperatura_umidade` (`data`, `temperatura`, `umidade`) VALUES (%s,%s,%s)" val = (datetime.datetime.now(), temperatura, umidade) lock.acquire() # Tranca acesso ao recurso do BD mycursor.execute(sql, val) mydb.commit() lock.release() # Libera acesso ao recurso do BD con.send(str("ok")) fim = datetime.datetime.now() delta = fim - inicio deltas = delta.total_seconds() # print deltas lock_medicao.acquire() tempo_rasp += deltas cont_rasp += 1 lock_medicao.release() else: if msg == 'id_client': print cliente, msg while True: msg = con.recv(1024) inicio = datetime.datetime.now() if not msg: break # print 'Cliente: ', cliente, msg numero = int(msg) lock.acquire() # Tranca acesso ao recurso do BD mycursor.execute( "SELECT temperatura,umidade FROM temperatura_umidade order by id desc limit %s" % (numero)) myresult = mycursor.fetchall() lock.release() # Libera acesso ao recurso do BD con.send(str(myresult)) fim = datetime.datetime.now() delta = fim - inicio deltas = delta.total_seconds() # print deltas lock_medicao.acquire() tempo_client += deltas cont_client += 1 lock_medicao.release() print 'Finalizando conexao do cliente', cliente lock_medicao.acquire() print("Media rasp") print(tempo_rasp / cont_rasp) print("Media cliente") print(cont_client) print(tempo_client / cont_client) lock_medicao.release() con.close() thread.exit()
def main_thread(conf): print("thread 1 started\n") ''' main_thread: ''' global shared_memory import time import datetime import RPi.GPIO as GPIO import Adafruit_GPIO.SPI as SPI import Adafruit_MCP3008 #globals: GLOBVAR = {} #global variables GLOBMSG = {} #global messages GLOBIN = {} #global input data GLOBOUT = {} #global output data GLOBLOG = {} #global log data GLOBTIMER = {} #global timer data GLOBGPIO = {} #global gpio data #init global variables: GLOBVAR["first_cycle"] = True GLOBVAR["stop"] = False GLOBVAR["cycletime"] = 0.0 #[sec] GLOBVAR["cycletime_offset"] = 0.2 #recommended values: 0.0s-1.5[sec] GLOBVAR["restarttime"] = 5.0 #recommended values: 0.0-5.0[sec] GLOBVAR["auto_release"] = False GLOBVAR["diag"] = False GLOBVAR["rain"] = False GLOBVAR["light"] = False GLOBVAR["level"] = False GLOBVAR["wetness_1"] = False GLOBVAR["wetness_2"] = False GLOBVAR["wetness_3"] = False GLOBVAR["max_time"] = 0 GLOBVAR["switch_value"] = 0 GLOBVAR["start"] = False #init global messages: GLOBMSG["message"] = set() #init message as a set GLOBMSG["warning"] = set() #init warning as a set GLOBMSG["error"] = set() #init error as a set GLOBMSG["status"] = "stop" #init status as a string "stop" GLOBMSG["logged"] = "" #setup for the gpio's def user_gpio_setup(GLOBVAR, GLOBGPIO): ''' user_gpio_setup: ''' #gpio general setup: GPIO.setmode(GPIO.BCM) GPIO.setwarnings(False) #hc-sr04 (ultrsonicsensor): GLOBGPIO["trig"] = 4 GLOBGPIO["echo"] = 18 GPIO.setup(GLOBGPIO["trig"], GPIO.OUT) GPIO.output(GLOBGPIO["trig"], False) #init GPIO.setup(GLOBGPIO["echo"], GPIO.IN) #spi-Interface for mcp3008: SPI_PORT = 0 SPI_DEVICE = 0 GLOBGPIO["mcp3008_1"] = Adafruit_MCP3008.MCP3008( spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE)) #pump GLOBGPIO["pump_1"] = 19 GPIO.setup(GLOBGPIO["pump_1"], GPIO.OUT) GPIO.output(GLOBGPIO["pump_1"], False) #init ''' user_gpio_setup end ''' pass user_gpio_setup(GLOBVAR, GLOBGPIO) #setup for the gpio's #user program: def user_program(GLOBVAR, GLOBMSG, GLOBIN, GLOBOUT, GLOBTIMER, GLOBGPIO): ''' user_program: ''' global shared_memory #hc-sr04 (ultrasonicsensor): GPIO.output(GLOBGPIO["trig"], True) time.sleep(0.001) GPIO.output(GLOBGPIO["trig"], False) end = 0 start = 0 sig_time = 0 timeout = False timeout_time = time.time() while not GPIO.input(GLOBGPIO["echo"]): start = time.time() if (time.time() - timeout_time) > 0.5: timeout = True break timeout_time = time.time() while GPIO.input(GLOBGPIO["echo"]): end = time.time() if (time.time() - timeout_time) > 0.5: timeout = True break if not timeout: sig_time = end - start result = sig_time / 0.000058 #distance in cm if result > 0: GLOBIN["distance_1"] = result else: GLOBIN["distance_1"] = 0 else: #shared_memory["MQTT_SEND"].append("error/hc-sr04 timeout!") GLOBIN["distance_1"] = 0 pass #mcp3008 (8-channel-analog-digital-converter): values = [0] * 8 #init list of 8 ints for i in range(8): values[i] = GLOBGPIO["mcp3008_1"].read_adc(i) GLOBIN["mcp3008_1_ch1"] = values[0] #wetness-sensor 1 GLOBIN["mcp3008_1_ch2"] = values[1] #wetness-sensor 2 GLOBIN["mcp3008_1_ch3"] = values[2] #wetness-sensor 3 GLOBIN["mcp3008_1_ch4"] = values[3] #rain-sensor GLOBIN["mcp3008_1_ch5"] = values[4] #brightness-sensor GLOBIN["mcp3008_1_ch6"] = values[5] #reserve GLOBIN["mcp3008_1_ch7"] = values[6] #reserve GLOBIN["mcp3008_1_ch8"] = values[7] #reserve #water-level (values in cm): if GLOBIN["distance_1"] > 10: GLOBVAR["level"] = False else: GLOBVAR["level"] = True #light (0-1000): if GLOBIN["mcp3008_1_ch5"] > 600: GLOBVAR["light"] = True elif GLOBIN["mcp3008_1_ch5"] < 500: GLOBVAR["light"] = False #rain (0-1000): if GLOBIN["mcp3008_1_ch4"] < 700: GLOBVAR["rain"] = True elif GLOBIN["mcp3008_1_ch4"] > 850: GLOBVAR["rain"] = False #wetness-sensor 1 (0-1000): if GLOBIN["mcp3008_1_ch1"] > (GLOBVAR["switch_value"] + 150): GLOBVAR["wetness_1"] = False elif GLOBIN["mcp3008_1_ch1"] < (GLOBVAR["switch_value"] - 150): GLOBVAR["wetness_1"] = True ''' #wetness-sensor 2 (0-1000): if GLOBIN["mcp3008_1_ch2"]>600: GLOBVAR["wetness_2"]=False elif GLOBIN["mcp3008_1_ch2"]<600: GLOBVAR["wetness_2"]=True #wetness-sensor 3 (0-1000): if GLOBIN["mcp3008_1_ch3"]>600: GLOBVAR["wetness_3"]=False elif GLOBIN["mcp3008_1_ch3"]<600: GLOBVAR["wetness_3"]=True ''' #pump control: GLOBOUT["pump_1"] = not GLOBVAR["auto_release"] and GLOBVAR[ "start"] #GLOBVAR["level"] and not GLOBVAR["rain"] and not GLOBVAR["wetness_1"] and GLOBVAR["auto_release"] or not GLOBVAR["auto_release"] and GLOBVAR["start"] and GLOBVAR["level"] GPIO.output(GLOBGPIO["pump_1"], GLOBOUT["pump_1"]) #diag: if GLOBVAR["diag"]: print("---") print("status: " + GLOBMSG["status"]) print("inputs: " + repr(GLOBIN)) print("outputs: " + repr(GLOBOUT)) print("variablen: " + repr(GLOBVAR)) print("message: " + str(GLOBMSG["message"])) print("warning: " + str(GLOBMSG["warning"])) print("error: " + str(GLOBMSG["error"])) print("MQTT_RECV: " + repr(shared_memory["MQTT_RECV"])) print("MQTT_SEND: " + repr(shared_memory["MQTT_SEND"])) if shared_memory["MQTT_RECV"]: for entry in shared_memory["MQTT_RECV"]: data = shared_memory["MQTT_RECV"][0].split("/") if data[0].decode("UTF-8") == "Betriebsart": if data[1].decode("UTF-8") == "Auto": GLOBVAR["auto_release"] = True pass elif data[1].decode("UTF-8") == "Hand": GLOBVAR["auto_release"] = False pass shared_memory["MQTT_RECV"].pop(0) pass elif data[0].decode("UTF-8") == "Zeit": try: GLOBVAR["max_time"] = int(data[1].decode("UTF-8")) except: pass shared_memory["MQTT_RECV"].pop(0) pass elif data[0].decode("UTF-8") == "Feuchte": try: GLOBVAR["switch_value"] = int(data[1].decode("UTF-8")) except: pass shared_memory["MQTT_RECV"].pop(0) pass elif data[0].decode("UTF-8") == "Hand": if data[1].decode("UTF-8") == "Start": GLOBVAR["start"] = True elif data[1].decode("UTF-8") == "Stop": GLOBVAR["start"] = False shared_memory["MQTT_RECV"].pop(0) pass else: try: shared_memory["MQTT_RECV"].pop(0) except: pass ''' data=shared_memory["MQTT_RECV"][0].split("/") if data[0]=="test": GLOBVAR["test"]=int(data[1]) shared_memory["MQTT_RECV"].pop(0) elif data[0]=="auto_release": if data[1]=="1": GLOBVAR["auto_release"]=True elif data[1]=="0": GLOBVAR["auto_release"]=False else: pass shared_memory["MQTT_RECV"].pop(0) else: try: shared_memory["MQTT_RECV"].pop(0) except: pass ''' ''' user_program end ''' pass def user_log(GLOBMSG): ''' user_log: ''' data = " m: " + str(GLOBMSG["message"]) + " w: " + str( GLOBMSG["warning"]) + " e: " + str(GLOBMSG["error"]) if data != GLOBMSG["logged"]: with open("log.txt", "a+") as file: GLOBMSG["logged"] = data file.write(str(datetime.datetime.now()) + data + "\n") ''' user_log end ''' pass while True: while True: if GLOBVAR["stop"]: GLOBMSG["status"] = "stop" GPIO.cleanup() #reset gpio setup if GLOBVAR["restarttime"] > 0.0: time.sleep(GLOBVAR["restarttime"]) user_gpio_setup(GLOBVAR, GLOBGPIO) #setup for the gpio's user_log(GLOBMSG) #loging: message/warning/error GLOBMSG["message"] = set() GLOBMSG["warning"] = set() GLOBMSG["error"] = set() GLOBVAR["stop"] = False while not GLOBVAR["stop"]: time_value_1 = time.time() user_program(GLOBVAR, GLOBMSG, GLOBIN, GLOBOUT, GLOBTIMER, GLOBGPIO) #user program GLOBVAR["first_cycle"] = False if GLOBMSG["error"]: GLOBVAR["stop"] = True GLOBVAR["auto_release"] = False if not GLOBVAR["stop"]: GLOBMSG["status"] = "run" if GLOBVAR["cycletime_offset"] > 0.0: time.sleep(GLOBVAR["cycletime_offset"]) GLOBVAR["cycletime"] = time.time() - time_value_1 if GLOBVAR["diag"]: print("cycletime: " + str(GLOBVAR["cycletime"])) GPIO.cleanup() print("thread 1 exit\n") shared_memory["thread1_run"] = False thread.exit() ''' main_thread end ''' pass
def user_command(self): "User command interface" global CLOSE_INFORMING while NODE_SHUTDOWN == False: try: command = raw_input('Command>> ') except: thread.exit() if command == 'SHOWRT': self.display_table() elif command == 'SHOWNB': self.display_neighbor() elif command == 'CLOSE': self.node_close() elif command[0:8] == 'LINKDOWN': linkdown_ip = command[9:command.rfind(' ')] linkdown_port = int(command[command.rfind(' ') + 1:]) self.node_linkdown(linkdown_ip, linkdown_port) elif command[0:6] == 'LINKUP': linkup_ip = command[7:command.rfind(' ')] linkup_port = int(command[command.rfind(' ') + 1:]) self.node_linkup(linkup_ip, linkup_port) elif command == 'CLOSEMODE': close_mode_choice = raw_input( '\r>> Should the node inform its neighbors when it is closed (y/n)? ' ) if close_mode_choice == 'y': CLOSE_INFORMING = True print '\r>> Now the node will inform its neighbors when it is closed' elif close_mode_choice == 'n': CLOSE_INFORMING = False print '\r>> Now the node will not inform its neighbors when it is closed' else: print '\r>> I can not understand...' elif command[0:10] == 'LINKCHANGE': command = command.split(' ') link_neighbor_ip = command[1] link_neighbor_port = int(command[2]) new_cost = float(command[3]) self.node_linkchange(link_neighbor_ip, link_neighbor_port, new_cost) elif command == 'CHANGETIMEOUT': new_timeout = float( raw_input('\r>> Please enter the new timeout value: ')) self.timeout = new_timeout print '>> Now the timeout value of the node is: %f s' % self.timeout elif command == 'HELP': print '=' * 100 print 'COMMAND'.ljust(30), 'FUNCTIONALITY' print '-' * 100 print 'SHOWRT'.ljust( 30), 'Show the distance vector list of the node' print 'SHOWNB'.ljust( 30), 'Show the neighbor information of the node' print 'CLOSE'.ljust(30), 'Close the node permanently' print 'LINKDOWN {ip port}'.ljust( 30 ), 'Temporarily shut down a link between the node and specified neighbor' print 'LINKUP {ip port}'.ljust( 30 ), 'Recover the shut down link between the node and specified neighbor' print 'LINKCHANGE {ip port cost}'.ljust( 30 ), 'Change a link cost between the node and the specified neighbor' print 'CLOSEMODE'.ljust( 30 ), 'Choose if the node should inform its neighbors when it is close' print 'CHANGETIMEOUT'.ljust( 30), 'Change the timeout value of the node' print '=' * 100 else: print 'We cannot understand your command. Please enter HELP for more information.' thread.exit()
def fork_command(cli_sock,addr,user_dict,root_dir): valid_flag=False data=cli_sock.recv(1024) uname,passwd,user_comm=data.split('|||') if uname in user_dict.keys(): #print("In first if") if user_dict[uname]==passwd: #print("In second if") cli_sock.send("Valid") valid_flag=True else: cli_sock.send('Invalid') if(valid_flag): #print("The user command is"+user_comm) if(user_comm=='PUT'): cwd=os.getcwd() dir_name=root_dir[1:] dir_serv=os.path.join(cwd,dir_name) if (not os.path.isdir(dir_serv)): #print("The directory doesn't exists") os.makedirs(dir_name) #os.chdir(dir) #print(os.getcwd()) #print(uname) usr_dir=os.path.join(dir_serv,uname) if (not os.path.isdir(usr_dir)): #print("User directory creating") os.makedirs(usr_dir) while(True): try: #print("Waiting for fname") fname=cli_sock.recv(1024) #print(fname) cli_sock.send("name_ACK") if(fname=="CLOSE"): #print("After closing") cli_sock.close() thread.exit() else: #print(fname+"Thread start") put_file(cli_sock,addr,root_dir,uname,fname) except socket.error: print("Closing after error") cli_sock.close() thread.exit() if(user_comm=='GET'): fname=cli_sock.recv(1024) #print("Inside get"+fname) fl_list=[] cwd=os.getcwd() dir_name=root_dir[1:] serv_path=os.path.join(cwd,dir_name) user_path=os.path.join(serv_path,uname) #print(user_path) if (os.path.isdir(user_path)): file_list=os.listdir(user_path) #print(file_list) for f in file_list: l=f[1:] z=l[:-2] if(z==fname): fl_list.append(f) #print("The final list") #print(fl_list) if(len(fl_list)==0): #print("NACK final list empty") cli_sock.send("NACK") else: for files in fl_list: get_file(files,cli_sock,user_path) cli_sock.close() thread.exit() else: #print("In else") cli_sock.send("NACK") if(user_comm=='LIST'): ready_ack=cli_sock.recv(1024) #print('ready ack'+ready_ack) if(ready_ack=='Ready'): cwd=os.getcwd() dir_name=root_dir[1:] serv_path=os.path.join(cwd,dir_name) user_path=os.path.join(serv_path,uname) #print(user_path) file_str='' if (os.path.isdir(user_path)): file_list=os.listdir(user_path) file_len=len(file_list) i=0 for files in file_list: if i==file_len-1: file_str=file_str+files break file_str=file_str+files+'|||' i=i+1 #print(file_str) cli_sock.sendall(file_str) else: #print("In else") cli_sock.send("NACK")
def receive(self): while self.listen: message_received = self.socket.recv(2048) self.on_received(message_received) print "[ INFO ]", "Received Message:", message_received thread.exit()
def acceptThread(clientSocket, clientAddr): print ‘<-> Client connected: ‘, clientAddr #Le a requisicao cliente request = receiveHttpMsg(clientSocket) #Valida o metodo. Somente CONNECT e aceito if not request.startswith(‘CONNECT’): print ‘<!> Client requisitou metodo != CONNECT!’ clientSocket.sendall(‘HTTP/1.1 405 Only_CONNECT_Method!\r\n\r\n’) clientSocket.close() thread.exit() #Separa dados da request enviada netData = getRequestNetData(request) protocol = getRequestProtocol(request) hostPort = getRequestHostPort(netData) #Gera a requisicao final a partir da payload, com base nos dados da request enviada finalRequest = getReplacedPayload(PAYLOAD, netData, hostPort, protocol) #Envia a requisicao ao servidor proxy proxySocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) proxySocket.connect((PROXT_ADDR, PROXY_PORT)) proxySocket.sendall(finalRequest) #Recebe a resposta do servidor proxy proxyResponse = receiveHttpMsg(proxySocket) print ‘<-> Status line: ‘ + getRequestNetData(proxyResponse) #Envia a resposta do proxy ao cliente clientSocket.sendall(proxyResponse) #Se a resposta do proxy contem codigo 200, executa metodo CONNECT if proxyResponse.find(‘200 ‘) != -1: doConnect(clientSocket, proxySocket, TAM_BUFFER) #Fecha a conexao com o cliente print ‘<-> Client ended : ‘, clientAddr proxySocket.close() clientSocket.close() thread.exit() #############################__INICIO__######################################## print ‘\n’ print ‘==>Injector.py’ print ‘–>Listening : ‘ + BIND_ADDR + ‘:’ + str(BIND_PORT) print ‘–>Remote proxy: ‘ + PROXT_ADDR + ‘:’ + str(PROXY_PORT) print ‘–>Payload : ‘ + PAYLOAD print ‘\n’ #Configura a escuta numa porta local server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server.bind((BIND_ADDR, BIND_PORT)) server.listen(1) print ‘<-> Server listening… ‘ #Recebe o cliente e despacha uma thread para atende-lo while True: clientSocket, clientAddr = server.accept() thread.start_new_thread(acceptThread, tuple([clientSocket, clientAddr])) server.close()
def recv_handler(connectionSocket): global onlineUsers global blockedUsers global connectionSockets global loginRecord global clientList global timeout global t_lock newClient = client() #create client object with each new connection while (1): try: #wait for username to arrive from the client clientUsername = connectionSocket.recv(1024) except: thread.exit() #if the username given is valid, break the loop and get the password if checkvalidUsername(clientUsername) == "valid": message = "valid username" try: connectionSocket.send(message) break except: thread.exit() else: message = "invalid username" #otherwise if the given username is not valid, restart process try: connectionSocket.send(message) continue except: thread.exit() while (1): global time try: #wait for data to arrive from the client request = connectionSocket.recv(1024) except: thread.exit() clientUsername = getUsername(request) newClient.name = clientUsername if checkBlocked( clientUsername ) == "blocked": #check to see if user is currently blocked loginResult = "Your account is blocked due to multiple login failures. Please try again later" try: connectionSocket.send(loginResult) connectionSocket.close( ) #close the connectionSocket if they are blocked except: thread.exit() elif checkBlocked( clientUsername ) == "not blocked" and newClient.name not in onlineUsers: #to log in the user, check that they're not blocked and that they're not already online if authenticate( request ) == 1: #if the username is not blocked, check credentials.txt to see if the username & password is valid onlineUsers[clientUsername] = time.time( ) #for the whoelse function loginRecord[clientUsername] = time.time( ) #for the whoElseSince function loginResult = "Authenticated" try: connectionSocket.send( loginResult) #if valid, send the result to the client except: thread.exit() time.sleep(0.2) presenceBroadcast( newClient, "on" ) #Someone has logged in, so send this result to everyone else online #send the user that just logged on, their offline messages if clientUsername in offlineMessages: for x in offlineMessages[clientUsername]: x = x + "\n" connectionSocket.send( x) #send the messages received offline to the user del offlineMessages[ clientUsername] #once the messages have been sent, remove the messages from storage connectionSockets[ clientUsername] = connectionSocket #add the user's connection socket to the dictionary connectionSocket.settimeout( timeout ) #set the connection socket timeout to the given value. Code syntax adpated from https://docs.python.org/2/library/socket.html while (1): try: request = connectionSocket.recv( 1024) #get data from the client except: #Log the user out if timeout has been reached del onlineUsers[newClient.name] del connectionSockets[newClient.name] presenceBroadcast(newClient, "off") message = "You have been logged out due to inactivity" connectionSocket.send(message) thread.exit() with t_lock: command = request.split( " " ) #parse the command sent by the client: look at the first term for the command and call the appropriate function if command[0] == "logout": del onlineUsers[newClient.name] del connectionSockets[newClient.name] presenceBroadcast(newClient, "off") message = "You have been logged out" connectionSocket.send(message) thread.exit() elif command[0] == "whoelse": whoElse(newClient.name, connectionSocket) elif command[0] == "whoelsesince": period = command[1] whoElseSince(newClient.name, connectionSocket, period) elif command[0] == "broadcast": message = command[1:] broadcast(newClient, message) elif command[0] == "message": user = command[1] message = command[ 2:] #message is everything beyond the 2nd term sendMessage(newClient, user, message) elif command[0] == "block": user = command[ 1] #get the name of the user to be blocked blacklistUser(newClient, user) #block this user elif command[0] == "unblock": user = command[ 1] #get the name of the user to be unblocked unblacklistUser(newClient, user) #unlock this user else: message = "Error: Invalid command" try: connectionSocket.send(message) except: thread.exit() else: newClient.loginAttempt = newClient.loginAttempt + 1 #otherwise, start counting number of login attempts if wrong password is entered if newClient.loginAttempt == 3: #if the client has failed 3 times, block them startBlockTime = time.time() blockedUsers[clientUsername] = startBlockTime loginResult = "Invalid Password. Your account has been blocked. Please try again later" connectionSocket.send(loginResult) connectionSocket.close( ) #close the connectionSocket. Note that the serverSocket is still alive waiting for new clients thread.exit() else: loginResult = "Invalid Password. Please try again" try: connectionSocket.send(loginResult) except: thread.exit() else: alreadyOnline = "That user is already logged in" try: connectionSocket.send(alreadyOnline) thread.exit() except: thread.exit()
def client_connection(self, client_sock, client_address): logging.debug("CLIENT_CONN: Connected by: %s:%s", client_address[0], client_address[1]) while True: request = client_sock.recv(TCP_BUFFER_SIZE) # No request sent by the client if not request: break # Normal processing of the request ace_msg = int(request.split(';')[0]) logging.debug("CLIENT_CONN: Receiving message: %s from host %s", ACE_MSG_STR[ace_msg], client_address[0]) # ACE_MSG_GETSTATUS if ace_msg == ACE_MSG_GETSTATUS: # The response should include # staus, number of clusters, cluster id (when following one cluster only) r_state = str(self.get_mystate()) r_number_of_chs = str(len(self.cluster_membership)) r_cluster_id = '' if len(self.cluster_membership) == 1: r_cluster_id = self.cluster_membership.keys()[0] response_str = ';'.join( [r_state, r_number_of_chs, r_cluster_id]) logging.debug("CLIENT_CONN: Sending message %s to node %s", response_str, client_address[0]) client_sock.send(response_str) # ACE_MSG_RECRUIT if ace_msg == ACE_MSG_RECRUIT: new_ch_address = request.split(';')[1] new_cluster_id = request.split(';')[2] # Disable the migrating flag soon as the node receive a RECRUIT message # from the node that it is migrating to if self.migrating and self.migrating_to == new_ch_address: self.migrating = False self.migrating_to = '' self.is_cluster_head = False # Normal processing of a RECRUIT MSG if self.get_mystate() == ACE_STATE_CLUSTER_HEAD: logging.info( "CLIENT_CONN: I am a CH. I will not follow %s.", new_ch_address) else: self.join_cluster(new_cluster_id, new_ch_address) logging.info( "CLIENT_CONN: OK! I am a follower of the CH %s", new_ch_address) # ACE_MSG_POLL if ace_msg == ACE_MSG_POLL: response_str = '' if not self.ace_done: ch_to_poll = request.split(';')[1] num_loyal_followers = self.count_loyal_followers( ch_to_poll) # num_loyal_followers = str(len(self.loyal_followers)) # Not used anymore r_status = str(ACE_MSG_POLL_OK) r_num_loyal_followers = str(num_loyal_followers) response_str = ';'.join([r_status, r_num_loyal_followers]) logging.debug( "CLIENT_CONN: POLL Done! The answer was sent.") else: r_status = str(ACE_MSG_POLL_NA) r_num_loyal_followers = str(0) response_str = ';'.join([r_status, r_num_loyal_followers]) logging.debug("CLIENT_CONN: POLL NA!") client_sock.send(response_str) # ACE_MSG_PROMOTE if ace_msg == ACE_MSG_PROMOTE: cluster_id = request.split(';')[1] self.is_cluster_head = True self.my_cluster_id = cluster_id self.locally_broadcast(ACE_MSG_RECRUIT, self.node_address, cluster_id) self.send_promote_done(client_address[0]) # ACE_MSG_PROMOTE_DONE if ace_msg == ACE_MSG_PROMOTE_DONE: self.migrating = False self.migrating_to = '' # ACE_MSG_ABDICATE if ace_msg == ACE_MSG_ABDICATE: ch_address = request.split(';')[1] cluster_id = request.split(';')[2] self.left_cluster(cluster_id, ch_address) logging.debug("CLIENT_CONN: Closing client connection: %s:%s", client_address[0], client_address[1]) client_sock.close() thread.exit()
def extractEmail(baseurl, basename, headers, f): global numberOfThreads, numberOfThreadsLock, consoleLock, tocrawl, tocrawlLock, crawled, crawledLock, pattern, numberOfPages, numberOfPagesLock, fileLock # increase the threads number numberOfThreadsLock.acquire() numberOfThreads += 1 numberOfThreadsLock.release() tocrawlLock.acquire() if not len(tocrawl): tocrawlLock.release() numberOfThreadsLock.acquire() numberOfThreads -= 1 numberOfThreadsLock.release() thread.exit() return crawling = tocrawl.pop() tocrawlLock.release() try: # Get the url components for checks url = urlparse.urlparse(crawling) reqObj = urllib2.Request(crawling, None, headers) response = urllib2.urlopen(reqObj, None, 4) consoleLock.acquire() print thread.get_ident(), 'conectare la: %s' % crawling consoleLock.release() except: numberOfThreadsLock.acquire() numberOfThreads -= 1 numberOfThreadsLock.release() thread.exit() return numberOfPagesLock.acquire() numberOfPages += 1 numberOfPagesLock.release() #After connecting, read the response msg = response.read() #Find all emails emailuri = pattern.findall(msg) #Create default formatter sformat = formatter.NullFormatter() htmlparser = LinksExtractor(sformat) #Parse the response, saving the info about links htmlparser.feed(msg) htmlparser.close() links = htmlparser.get_links() # Add the emails to the file for i in range(len(emailuri)): try: fileLock.acquire() f.write(emailuri[i] + "\n") fileLock.release() except: numberOfThreadsLock.acquire() numberOfThreads -= 1 numberOfThreadsLock.release() thread.exit() return # Mark url as crawled try: crawledLock.acquire() crawled.add(crawling) crawledLock.release() except: numberOfThreadsLock.acquire() numberOfThreads -= 1 numberOfThreadsLock.release() thread.exit() return if not len(links): thread.exit() return #Find the new links to crawl in the extracted links for link in links: if (link.endswith('.png') or link.endswith('.jpg') or \ link.endswith('.gif') or link.endswith('.jpeg') or \ link.endswith('.zip') or link.endswith('.rar') or \ link.endswith('.avi') or link.endswith('.exe')): continue if link.startswith('/'): link = 'http://' + url[1] + link elif link.startswith('#'): link = 'http://' + url[1] + url[2] + link elif not link.startswith('http'): link = 'http://' + url[1] + '/' + link if link not in crawled: if basename[1] == url[1]: try: tocrawlLock.acquire() tocrawl.add(link) tocrawlLock.release() except: numberOfThreadsLock.acquire() numberOfThreads -= 1 numberOfThreadsLock.release() thread.exit() return numberOfThreadsLock.acquire() numberOfThreads -= 1 numberOfThreadsLock.release() thread.exit() return
def main(): os.system('cls') current_playlist = None pygame.mixer.init() print_banner() while True: cmd = raw_input(" >").strip() try: if "open" in cmd: if "help" in cmd: print " open <OPTIONAL path>" print " Opens a playlist at the desired path and saves in order to be played." else: if len(cmd) > 4: songs = open_playlist(cmd[5:]) else: songs = open_playlist() current_playlist = Playlist(songs) elif "list" in cmd: if "help" in cmd: print " list" print " Lists the songs in the current playlist (must have a playlist loaded to work)." else: if current_playlist == None: raise Exception(" ERR: Playlist not open.") for i in range(len(current_playlist.oggpaths)): if i == current_playlist.get_current_song_index(): print(" {GREEN}[%d] : %s{END}" % (i, current_playlist.get_song_title(index=i)) ).format(**color_format) else: print(" {WHITE}[%d] : %s{END}" % (i, current_playlist.get_song_title(index=i)) ).format(**color_format) elif "current" in cmd: if "help" in cmd: print " current" print " Prints the index and the name of the song currently playing." else: if current_playlist == None: raise Exception(" ERR: Playlist not open.") title = current_playlist.get_song_title() if title is None: raise Exception(" ERR: No song playing.") else: print(" {GREEN}[%d] : %s{END}" % (current_playlist.get_current_song_index(), title)).format(**color_format) elif "new" in cmd: if "help" in cmd: print " new <path <?>>" print " Creates a new directory specified in path." else: if len(cmd) > 3: new_playlist(cmd[4:]) else: new_playlist() elif "delete" in cmd: if "help" in cmd: print " delete <index>" print " Deletes the song at the desired index (must have a playlist loaded to work)." else: if current_playlist is None: raise Exception(" ERR: Playlist not loaded.") if len(cmd) > 6: current_playlist.delete_song(int(cmd[7:])) else: current_playlist.delete_song() elif "play" in cmd: if "help" in cmd: print " play <index>" print " Plays the loaded playlist at the desired index (0 if none given) (must have a playlist loaded to work)." else: if current_playlist == None: raise Exception(" ERR: Playlist not open.") if len(cmd) > 4: current_playlist.index = int(cmd[5:]) else: current_playlist.index = 0 if pygame.mixer.music.get_busy(): pygame.mixer.music.stop() thread.start_new_thread(run_player, ( 'Thread-1', current_playlist, )) elif "search" in cmd: if "help" in cmd: pass else: if current_playlist == None: raise Exception(" ERR: Playlist not open.") if len(cmd) > 6: songs = current_playlist.search_for_song(cmd[7:]) else: songs = current_playlist.search_for_song() if len(songs) == 0: raise Exception(" ERR: Song(s) not found.") for i in range(len(songs)): if songs[i] == current_playlist.get_current_song_index( ): print("{GREEN}[%d] : %s{END}" % (songs[i], current_playlist.get_song_title( songs[i]))).format(**color_format) else: print("{WHITE}[%d] : %s{END}" % (songs[i], current_playlist.get_song_title( songs[i]))).format(**color_format) elif "shuffle" in cmd: if "help" in cmd: print " shuffle" print " Toggle the shuffle feature (play random songs after each other) (must have a playlist loaded to work)." else: if current_playlist == None: raise Exception(" ERR: Playlist not open.") current_playlist.shuffle = not current_playlist.shuffle print " *** Shuffle : %s ***" % current_playlist.shuffle elif "skip" in cmd: if "help" in cmd: print " skip" print " Skips the current song in the playlist (must have a playlist loaded to work)." else: if current_playlist == None: raise Exception(" ERR: Playlist not open.") #playlist automatically increments index after playing a song current_playlist.skip_song() elif "previous" in cmd: if "help" in cmd: print " previous" print " Plays the previous song in the playlist (must have a playlist loaded to work)." else: if current_playlist == None: raise Exception(" ERR: Playlist not open.") current_playlist.previous_song() elif "restart" in cmd: if "help" in cmd: print " restart" print " Restarts the current song in the playlist (must have a playlist loaded to work)." else: if current_playlist == None: raise Exception(" ERR: Playlist not open.") current_playlist.restart_song() elif "pause" in cmd: if "help" in cmd: print " pause" print " Pauses the music player." else: pygame.mixer.music.pause() elif "unpause" in cmd: if "help" in cmd: print " unpause" print " Unpauses the music player." else: pygame.mixer.music.unpause() elif "volume" in cmd: if "help" in cmd: print " volume <new volume [0.0, 1.0]>" print " Sets the volume to a desired float value (must be between 0.0 and 1.0 inclusive)." else: new_volume = float(cmd[6:]) if new_volume < 0 or new_volume > 1: raise Exception(" ERR: Volume out of bounds [0, 1.0].") pygame.mixer.music.set_volume() elif "download" in cmd: if "help" in cmd: print " download <OPTIONAL youtube url>" print " Searches for (no url provided) and downloads audio from a desired youtube video and converts it from AAC to OGG." print " Places the audio file in a directory of the user's choice." print " Possible thanks to pafy (yt download) and ffmpeg (convert aac to ogg)." else: if len(cmd) > 8: download_path = download_from_url(cmd[9:]) else: download_path = download_from_url(search_youtube()) elif "clear" in cmd: if "help" in cmd: print " clear" print " Clears/resets the command window." else: os.system('cls') print_banner() elif "version" in cmd: if "help" in cmd: print " version" print " Prints the current version of PyPlayer." else: print " PyPlayer v:1.0 by Brendan McCloskey (mccloskeydev)" elif "exit" in cmd: if "help" in cmd: print " exit" print " Exits the player." else: thread.exit() sys.exit(0) else: if "help" not in cmd: print " ERR: Command not recognized." print " *** HELP ***" print " COMMANDS: type <command> help to see help with specific functions" print " open <path>, new <path>, play <index>, list, current, search <keyword>" print " download <url>, shuffle, skip, previous, restart, pause, unpause, volume <new volume [0.0, 1.0]>" print " help, clear, version, exit" except Exception as inst: print("{RED}%s{END}" % inst.message).format(**color_format)