def create_containers(self, pool=None, pool_num=10, num_containers=100, with_io=False): """To create number of containers parallelly on pool. Args: pool(str): pool handle. pool_num (int): pool number to create containers. num_containers (int): number of containers to create. with_io (bool): enable container test with execute_io. """ self.log.info("==(2.%d)create_containers start.", pool_num) thread_manager = ThreadManager(self.create_container_and_test, self.timeout - 30) for cont_num in range(num_containers): thread_manager.add( pool=pool, pool_num=pool_num, cont_num=cont_num, with_io=with_io) # Launch the create_container_and_test threads self.log.info("==Launching %d create_container_and_test threads", thread_manager.qty) failed_thread_count = thread_manager.check_run() self.log.info( "==(2.%d) after thread_manager_run, %d containers created.", pool_num, num_containers) if failed_thread_count > 0: msg = "#(2.{}) FAILED create_container_and_test Threads".format(failed_thread_count) self.d_log.error(msg) self.fail(msg)
def __init__(self): self.service_manager = ServiceManager() ThreadManager.instance().add_thread( 'ServicesVerifier', self.service_manager.verify_services_status, sleep_time=2) ThreadManager.instance().start_thread('ServicesVerifier')
def __init__(self): """Program entry point""" op = argparse.ArgumentParser() op.add_argument("-c", "--config", dest="file", default="/etc/mysql-statsd.conf", help="Configuration file") op.add_argument("-d", "--debug", dest="debug", help="Debug mode", default=False, action="store_true") # TODO switch the default to True, and make it fork by default in init script. op.add_argument("-f", "--foreground", dest="foreground", help="Dont fork main program", default=False, action="store_true") opt = op.parse_args() self.get_config(opt.file) logfile = self.config.get('daemon').get('logfile', '/tmp/daemon.log') if not opt.foreground: self.daemonize(stdin='/dev/null', stdout=logfile, stderr=logfile) # Set up queue self.queue = Queue.Queue() # split off config for each thread mysql_config = dict(mysql=self.config['mysql']) mysql_config['metrics'] = self.config['metrics'] statsd_config = self.config['statsd'] # Spawn MySQL polling thread mysql_thread = ThreadMySQL(queue=self.queue, **mysql_config) # t1 = ThreadMySQL(config=self.config, queue=self.queue) # Spawn Statsd flushing thread statsd_thread = ThreadStatsd(queue=self.queue, **statsd_config) # Get thread manager tm = ThreadManager(threads=[mysql_thread, statsd_thread]) tm.run()
def __init__(self): """""" ThreadManager.__init__(self) self.active_downloads = {} self.queue_downloads = OrderedDict() self.complete_downloads = {} self.stopped_downloads = {} self.global_slots = Slots()
def __init__(self, username='', password='', get_dialogs_interval=60): self.delay_on_reply = config.get('vkbot_timing.delay_on_reply', 'i') self.chars_per_second = config.get('vkbot_timing.chars_per_second', 'i') self.same_user_interval = config.get('vkbot_timing.same_user_interval', 'i') self.same_conf_interval = config.get('vkbot_timing.same_conf_interval', 'i') self.forget_interval = config.get('vkbot_timing.forget_interval', 'i') self.delay_on_first_reply = config.get( 'vkbot_timing.delay_on_first_reply', 'i') self.stats_dialog_count = config.get('stats.dialog_count', 'i') self.no_leave_conf = config.get('vkbot.no_leave_conf', 'b') self.api = vkapi.VkApi(username, password, ignored_errors=ignored_errors, timeout=config.get( 'vkbot_timing.default_timeout', 'i'), token_file=accounts.getFile('token.txt'), log_file=accounts.getFile('inf.log') if args.args['logging'] else '', captcha_handler=createCaptchaHandler()) stats.update('logging', bool(self.api.log_file)) # hi java self.users = UserCache( self.api, self.fields + ',' + FriendController.requiredFields(_getFriendControllerParams()), config.get('cache.user_invalidate_interval', 'i')) self.confs = ConfCache( self.api, config.get('cache.conf_invalidate_interval', 'i')) self.vars = json.load(open('data/defaultvars.json', encoding='utf-8')) self.vars['default_bf'] = self.vars['bf']['id'] self.initSelf(True) self.guid = int(time.time() * 5) self.last_viewed_comment = stats.get('last_comment', 0) self.good_conf = {} self.tm = ThreadManager() self.last_message = MessageCache() if os.path.isfile(accounts.getFile('msgdump.json')): try: data = json.load(open(accounts.getFile('msgdump.json'))) self.last_message.load(data['cache']) self.api.longpoll = data['longpoll'] except json.JSONDecodeError: logging.warning('Failed to load messages') os.remove(accounts.getFile('msgdump.json')) else: logging.info('Message dump does not exist') self.bad_conf_title = lambda s: False self.admin = None self.banned_list = [] self.message_lock = threading.Lock() self.banned = set() self.receiver = MessageReceiver(self.api, get_dialogs_interval) self.receiver.longpoll_callback = self.longpollCallback
def __evaluate_derived_cost(self): thread_manager = ThreadManager(1) for theta_index in range(len(self.__theta)): thread_manager.attach(derived_cost, (theta_index, self.__y, self.__x, self.__theta, self.__predict)) return thread_manager.execute_all()
def stop_service(self, service_name): """Stops a service on the thread manager""" thread_manager = ThreadManager.instance() if thread_manager.is_thread_alive(service_name): if service_name == 'AudioRetriever': self.audio_retriever.stop_audio() ThreadManager.instance().stop_thread(service_name) print('\nService stopped successfully\n') else: print('\nCannot stop service that is not running\n')
def __init__(self, runtimeDomain, fileManager, remoteObjectManager, basepath='.', **kwargs): HandlerDomain.__init__(self, **kwargs) self.runtimeDomain = runtimeDomain self.fileManager = fileManager self.remoteObjectManager = remoteObjectManager self.locationSerializer = serialize.LocationSerializer( fileManager, basepath) self.moduleSourcePathUpdater = ModuleSourcePathUpdater( self.debugger.GetSelectedTarget(), fileManager, basepath) self.thread_manager = ThreadManager(self.socket, self.locationSerializer, self.remoteObjectManager)
def main_menu(self): """Choice for main menu""" looping = True while looping: MenuMessages.instance().print_main_menu() option = self.parse_option(input()) if option == 1: self.services_menu() elif option == 2: looping = False ThreadManager.instance().stop_all() else: print('Invalid option')
def __init__(self, source, resolution): ''' Sets the video source and creates the thread manager. Args: source (str): The file path of the video stream. resolution (tuple[int]): The camera resolution. ''' self.source = source self.resolution = resolution self.frame = None # current frame is stored here by background thread self.thread_manager = ThreadManager(self)
def start_light_manager(self, show_message=True): """Starts the light maanager service""" thread_manager = ThreadManager.instance() thread_manager.add_thread('LightManager', self.light_manager.rgb_set) thread_manager.start_thread('LightManager') print('\nStarted light manager successfully\n') self.services_running['LightManager'] = True
def start_big_query_synchronizer(self, show_message=True): thread_manager = ThreadManager.instance() thread_manager.add_thread('BigQuerySynchronizer', self.big_query_synchronizer.synchronize, sleep_time=300) thread_manager.start_thread('BigQuerySynchronizer') print('\nStarted service big query synchronization successfully\n') self.services_running['BigQuerySynchronizer'] = True
def start_data_processor(self, show_message=True): """Starts the data processor service""" thread_manager = ThreadManager.instance() thread_manager.add_thread('DataProcessor', self.data_processor.convert_data, sleep_time=60) thread_manager.start_thread('DataProcessor') print('\nStarted data processor successfully\n') self.services_running['DataProcessor'] = True
def minibatch_error_evaluation_function(y, x, theta, prediction_function, percentage_to_evaluate=0.3): setsize = math.floor(len(y) * percentage_to_evaluate) thread_manager = ThreadManager(1) for i in range(setsize): thread_manager.attach(evaluate_error, (y[i], x[i], theta, prediction_function)) errors = thread_manager.execute_all() error = sum(errors) / setsize return error
class ThreadedEngine(object): def __init__(self, source, engine): self.source = source self.engine = engine self.thread_manager = ThreadManager(self) self.pred = None def __next__(self): return self.get_prediction() def _thread(self): predictions = self.inference_gen() for prediction in predictions: self.pred = prediction self.thread_manager.set() def inference_gen(self): while True: self.frame = next(self.source) yield self.engine.invoke(self.frame) def get_prediction(self): self.thread_manager.wait() return self.pred, self.frame def get_max_length(self): return self.engine.get_max_length() def label(self, pred): return self.engine.label(pred) def start(self): self.thread_manager.start()
def start_audio_retriever(self, show_message=True): """Starts the audio retrieving service""" thread_manager = ThreadManager.instance() if not self.audio_retriever.is_recorder_device_ready(): if show_message: print( '\nCannot start audio retrieving, verify audio devices\n') return thread_manager.add_thread('AudioRetriever', self.audio_retriever.retrieve_audio) thread_manager.start_thread('AudioRetriever') print('\nStarted audio retrieving successfully\n') self.services_running['AudioRetriever'] = True
def __init__(self): """Program entry point""" op = argparse.ArgumentParser() op.add_argument("-c", "--config", dest="file", default="/etc/mysql-statsd.conf", help="Configuration file") op.add_argument("-d", "--debug", dest="debug", help="Debug mode", default=False, action="store_true") self.opt = op.parse_args() opt = self.opt # Set up queue self.queue = Queue.Queue() # Spawn MySQL polling thread t1 = ThreadMySQL(config=self.config, queue=self.queue) # Spawn Statsd flushing thread t2 = ThreadStatsd(config=self.config, queue=self.queue) # Get thread manager tm = ThreadManager(threads=[t1, t2]) tm.run()
def create_pools(self, num_pools=10, num_containers=100, with_io=False): """To create number of pools and containers parallelly. Args: num_pools (int): number of pools to create. num_containers (int): number of containers to create. with_io (bool): enable container test with execute_io. """ # Setup the thread manager thread_manager = ThreadManager(self.create_containers, self.timeout - 30) for pool_number in range(num_pools): pool = self.get_pool() thread_manager.add( pool=pool, pool_num=pool_number, num_containers=num_containers, with_io=with_io) self.log.info("=(1.%d) pool created, %d.", pool_number, pool) # Launch the create_containers threads self.log.info("=Launching %d create_containers threads", thread_manager.qty) failed_thread_count = thread_manager.check_run() if failed_thread_count > 0: msg = "#(1.{}) FAILED create_containers Threads".format(failed_thread_count) self.d_log.error(msg) self.fail(msg)
def start_drive_synchronizer(self, show_message=True): thread_manager = ThreadManager.instance() self.drive_synchronizer.authenticate() if not self.drive_synchronizer._is_authenticated: if show_message: print( '\nNot authenticated, cannot start drive synchronization\n' ) return thread_manager.add_thread('DriveSynchronizer', self.drive_synchronizer.synchronize, sleep_time=300) thread_manager.start_thread('DriveSynchronizer') print('\nStarted service drive synchronization successfully\n') self.services_running['DriveSynchronizer'] = True
def __init__(self, debugger, chrome_channel, ipc_channel, is_attach, basepath='.'): ''' chrome_channel: channel to send client chrome notification messages. ipc_channel: channel to send output/atom notification messages. debugger: lldb SBDebugger object. ''' self._debugger = debugger self._chrome_channel = chrome_channel self._ipc_channel = ipc_channel self._is_attach = is_attach self._file_manager = FileManager(chrome_channel) self._remote_object_manager = RemoteObjectManager() self._location_serializer = serialize.LocationSerializer( self._file_manager, basepath) self._thread_manager = ThreadManager(self)
def start_service(self, service_name): """Starts a service on the thread manager""" thread_manager = ThreadManager.instance() if thread_manager.is_thread_alive(service_name): print('Service is already running') return if service_name == 'AudioRetriever': self.start_audio_retriever() if service_name == 'DataProcessor': self.start_data_processor() if service_name == 'LightManager': self.start_light_manager() if service_name == 'DriveSynchronizer': self.start_drive_synchronizer() if service.name == 'BigQuerySynchronizer': self.start_big_query_synchronizer()
def __init__(self, debugger, chrome_channel, ipc_channel, is_attach, basepath='.'): ''' chrome_channel: channel to send client chrome notification messages. ipc_channel: channel to send output/atom notification messages. debugger: lldb SBDebugger object. ''' self._debugger = debugger self._chrome_channel = chrome_channel self._ipc_channel = ipc_channel self._is_attach = is_attach self._file_manager = FileManager(chrome_channel) self._remote_object_manager = RemoteObjectManager() basepath = self._resolve_basepath_heuristic(basepath) log_debug('basepath: %s' % basepath) self._fixup_lldb_cwd_if_needed(basepath) self._location_serializer = serialize.LocationSerializer( self._file_manager, basepath) self._thread_manager = ThreadManager(self) self._debugger_settings = self._setDefaultDebuggerSettings()
def __init__(self, username='', password=''): self.api = vkapi.VkApi(username, password, ignored_errors=ignored_errors, timeout=config.get('vkbot.default_timeout', 'i'), token_file=accounts.getFile('token.txt'), log_file=accounts.getFile('inf.log') if args.args['logging'] else '', captcha_handler=captcha.CaptchaHandler()) self.api.initLongpoll() self.users = UserCache(self.api, 'sex,crop_photo,blacklisted,blacklisted_by_me,' + check_friend.fields) self.confs = ConfCache(self.api) self.vars = json.load(open('data/defaultvars.json', encoding='utf-8')) self.initSelf(True) self.guid = int(time.time() * 5) self.last_viewed_comment = stats.get('last_comment', 0) self.good_conf = {} self.tm = ThreadManager() self.last_message = MessageCache() self.last_message_id = 0 self.whitelist = None self.whitelist_includeread = True self.bad_conf_title = lambda s: False self.admin = None self.banned_list = [] self.longpoll_queue = queue.Queue() self.message_lock = threading.Lock()
def verify_services_status(self): """ Verify the status of the services running in a thread, if one has been stopped by an exception it will start it again automatically """ thread_manager = ThreadManager.instance() try: for key, value in self.services_running.items(): if value and not thread_manager.stopped_manually(key): if key == 'AudioRetriever': self.start_audio_retriever(show_message=False) logging.info( 'Restarting automatically service audio retrieving' ) if key == 'DataProcessor': self.start_data_processor(show_message=False) logging.info( 'Restarting automatically service data processor') if key == 'LightManager': self.start_light_manager(show_message=False) logging.info( 'Restarting automatically service light manager') if key == 'DriveSynchronizer': self.start_drive_synchronizer(show_message=False) logging.info( 'Restarting automatically service drive synchronizer' ) if key == 'BigQuerySynchronizer': self.start_big_query_synchronizer(show_message=False) logging.info( 'Restarting automatically service big query synchronizer' ) except Exception as error: logging.info( 'Tried to restart a service but a fatal error ocurred ' + str(error))
def __init__(self, source, engine): self.source = source self.engine = engine self.thread_manager = ThreadManager(self) self.pred = None
def test_metadata_server_restart(self): """JIRA ID: DAOS-1512. Test Description: This test will verify 2000 IOR small size container after server restart. Test will write IOR in 5 different threads for faster execution time. Each thread will create 400 (8bytes) containers to the same pool. Restart the servers, read IOR container file written previously and validate data integrity by using IOR option "-R -G 1". Use Cases: ? :avocado: tags=all,full_regression :avocado: tags=hw,large :avocado: tags=server,metadata,metadata_ior,nvme """ self.create_pool() files_per_thread = 400 total_ior_threads = 5 processes = self.params.get("slots", "/run/ior/clientslots/*") list_of_uuid_lists = [[ str(uuid.uuid4()) for _ in range(files_per_thread) ] for _ in range(total_ior_threads)] # Setup the thread manager thread_manager = ThreadManager(run_ior_loop, self.timeout - 30) # Launch threads to run IOR to write data, restart the agents and # servers, and then run IOR to read the data for operation in ("write", "read"): # Create the IOR threads for index in range(total_ior_threads): # Define the arguments for the run_ior_loop method ior_cmd = IorCommand() ior_cmd.get_params(self) ior_cmd.set_daos_params(self.server_group, self.pool) ior_cmd.flags.value = self.params.get( "F", "/run/ior/ior{}flags/".format(operation)) # Define the job manager for the IOR command self.ior_managers.append(Orterun(ior_cmd)) env = ior_cmd.get_default_env(str(self.ior_managers[-1])) self.ior_managers[-1].assign_hosts(self.hostlist_clients, self.workdir, None) self.ior_managers[-1].assign_processes(processes) self.ior_managers[-1].assign_environment(env) self.ior_managers[-1].verbose = False # Add a thread for these IOR arguments thread_manager.add(manager=self.ior_managers[-1], uuids=list_of_uuid_lists[index], tmpdir_base=self.test_dir) self.log.info("Created %s thread %s with container uuids %s", operation, index, list_of_uuid_lists[index]) # Launch the IOR threads self.log.info("Launching %d IOR %s threads", thread_manager.qty, operation) failed_thread_count = thread_manager.check_run() if failed_thread_count > 0: msg = "{} FAILED IOR {} Thread(s)".format( failed_thread_count, operation) self.d_log.error(msg) self.fail(msg) # Restart the agents and servers after the write / before the read if operation == "write": # Stop the agents errors = self.stop_agents() self.assertEqual( len(errors), 0, "Error stopping agents:\n {}".format("\n ".join(errors))) # Restart the servers w/o formatting the storage errors = self.restart_servers() self.assertEqual( len(errors), 0, "Error stopping servers:\n {}".format( "\n ".join(errors))) # Start the agents self.start_agent_managers() self.log.info("Test passed")
def __init__(self): """""" DownloadCore.__init__(self) #inicializar download_core.py ThreadManager.__init__(self) #inicializar thread_manager.py self.global_slots = Slots() #slots.py
def obtain_services_status(self): """Obtains and prints the status of services""" print('\n', ThreadManager.instance().status(), '\n')
class VkBot: fields = 'sex,crop_photo,blacklisted,blacklisted_by_me' def __init__(self, username='', password='', get_dialogs_interval=60): self.delay_on_reply = config.get('vkbot_timing.delay_on_reply', 'i') self.chars_per_second = config.get('vkbot_timing.chars_per_second', 'i') self.same_user_interval = config.get('vkbot_timing.same_user_interval', 'i') self.same_conf_interval = config.get('vkbot_timing.same_conf_interval', 'i') self.forget_interval = config.get('vkbot_timing.forget_interval', 'i') self.delay_on_first_reply = config.get('vkbot_timing.delay_on_first_reply', 'i') self.stats_dialog_count = config.get('stats.dialog_count', 'i') self.no_leave_conf = config.get('vkbot.no_leave_conf', 'b') self.api = vkapi.VkApi(username, password, ignored_errors=ignored_errors, timeout=config.get('vkbot_timing.default_timeout', 'i'), token_file=accounts.getFile('token.txt'), log_file=accounts.getFile('inf.log') if args.args['logging'] else '', captcha_handler=createCaptchaHandler()) stats.update('logging', bool(self.api.log_file)) # hi java self.users = UserCache(self.api, self.fields + ',' + FriendController.requiredFields(_getFriendControllerParams()), config.get('cache.user_invalidate_interval', 'i')) self.confs = ConfCache(self.api, config.get('cache.conf_invalidate_interval', 'i')) self.vars = json.load(open('data/defaultvars.json', encoding='utf-8')) self.vars['default_bf'] = self.vars['bf']['id'] self.initSelf(True) self.guid = int(time.time() * 5) self.last_viewed_comment = stats.get('last_comment', 0) self.good_conf = {} self.tm = ThreadManager() self.last_message = MessageCache() if os.path.isfile(accounts.getFile('msgdump.json')): try: data = json.load(open(accounts.getFile('msgdump.json'))) self.last_message.load(data['cache']) self.api.longpoll = data['longpoll'] except json.JSONDecodeError: logging.warning('Failed to load messages') os.remove(accounts.getFile('msgdump.json')) else: logging.info('Message dump does not exist') self.bad_conf_title = lambda s: False self.admin = None self.banned_list = [] self.message_lock = threading.Lock() self.banned = set() self.receiver = MessageReceiver(self.api, get_dialogs_interval) self.receiver.longpoll_callback = self.longpollCallback @property def whitelist(self): return self.receiver.whitelist @whitelist.setter def whitelist(self, new): self.receiver.whitelist = new def initSelf(self, sync=False): def do(): try: res = self.api.users.get(fields='contacts,relation,bdate')[0] except IndexError: self.api.login() do() return self.self_id = res['id'] self.vars['phone'] = res.get('mobile_phone') or self.vars['phone'] self.vars['name'] = (res['first_name'], res['last_name']) self.vars['bf'] = res.get('relation_partner') or self.vars['bf'] try: bdate = res['bdate'].split('.') today = datetime.date.today() self.vars['age'] = today.year - int(bdate[2]) - ((today.month, today.day) < (int(bdate[1]), int(bdate[0]))) except LookupError: pass if not sync: logging.info('My phone: ' + self.vars['phone']) if sync: do() else: threading.Thread(target=do).start() def loadUsers(self, arr, key, clean=False): users = [] confs = [] for i in arr: try: pid = key(i) if pid <= 0: continue if pid > CONF_START: confs.append(pid - CONF_START) else: users.append(pid) except Exception: pass self.users.load(users, clean) self.confs.load(confs, clean) def replyOne(self, message, gen_reply): if self.whitelist and getSender(message) not in self.whitelist: if getSender(message) > CONF_START: return if self.users[message['user_id']]['first_name'] + ' ' + self.users[message['user_id']]['last_name'] not in self.whitelist: return if message['user_id'] == self.self_id: # chat with myself return if 'chat_id' in message and not self.checkConf(message['chat_id']): return try: if self.tm.isBusy(getSender(message)) and not self.tm.get(getSender(message)).attr['unimportant']: return except Exception: return if message['id'] < self.last_message.bySender(getSender(message)).get('id', 0): return try: ans = gen_reply(message) except Exception as e: ans = None logging.exception('local {}: {}'.format(e.__class__.__name__, str(e))) time.sleep(1) if ans: self.replyMessage(message, ans[0], ans[1]) def replyAll(self, gen_reply): self.tm.gc() self.banned_list = [] messages = self.receiver.getMessages() self.loadUsers(messages, lambda x: x['user_id']) self.loadUsers(messages, lambda x: x['chat_id'] + CONF_START) for cur in messages: self.replyOne(cur, gen_reply) if self.receiver.used_get_dialogs: stats.update('banned_messages', ' '.join(map(str, sorted(self.banned_list)))) # noinspection PyUnusedLocal def longpollCallback(self, mid, flags, sender, ts, random_id, text, opt): if opt == {'source_mid': str(self.self_id), 'source_act': 'chat_kick_user', 'from': str(self.self_id)}: self.good_conf[sender] = False return True if opt.get('source_mid') == str(self.self_id) and opt.get('source_act') == 'chat_invite_user' and sender in self.good_conf: del self.good_conf[sender] return True if opt.get('source_act') == 'chat_title_update': del self.confs[sender - CONF_START] logging.info('Conf {} renamed into "{}"'.format(sender - CONF_START, opt['source_text'])) if not self.no_leave_conf and self.bad_conf_title(opt['source_text']): self.leaveConf(sender - CONF_START) log.write('conf', 'conf ' + str(sender - CONF_START) + ' (name: {})'.format(opt['source_text'])) return True if opt.get('source_act') == 'chat_invite_user' and opt['source_mid'] == str(self.self_id) and opt['from'] != str(self.self_id): self.logSender('%sender% added me to conf "{}" ({})'.format(self.confs[sender - CONF_START]['title'], sender - CONF_START), {'user_id': int(opt['from'])}) if not self.no_leave_conf and int(opt['from']) not in self.banned: self.deleteFriend(int(opt['from'])) if flags & 2: # out if not opt.get('source_act'): self.tm.terminate(sender) return True try: if 'from' in opt and int(opt['from']) != self.tm.get(sender).attr['user_id'] and not opt.get('source_act'): self.tm.get(sender).attr['reply'] = True except Exception: pass def sendMessage(self, to, msg, forward=None): if not self.good_conf.get(to, 1): return with self.message_lock: self.guid += 1 time.sleep(1) if forward: return self.api.messages.send(peer_id=to, message=msg, random_id=self.guid, forward_messages=forward) else: return self.api.messages.send(peer_id=to, message=msg, random_id=self.guid) def replyMessage(self, message, answer, skip_mark_as_read=False): sender = getSender(message) sender_msg = self.last_message.bySender(sender) if 'id' in message and message['id'] <= sender_msg.get('id', 0): return if not answer: if self.tm.isBusy(sender): return if not sender_msg or time.time() - sender_msg['time'] > self.forget_interval: tl = Timeline().sleep(self.delay_on_first_reply).do(lambda: self.api.messages.markAsRead(peer_id=sender)) tl.attr['unimportant'] = True self.tm.run(sender, tl, tl.terminate) elif answer is None: # ignored self.api.messages.markAsRead.delayed(peer_id=sender, _once=True) else: tl = Timeline().sleep((self.delay_on_reply - 1) * random.random() + 1).do(lambda: self.api.messages.markAsRead(peer_id=sender)) tl.attr['unimportant'] = True self.tm.run(sender, tl, tl.terminate) if answer is not None: self.last_message.byUser(message['user_id'])['text'] = message['body'] self.last_message.updateTime(sender) if sender > CONF_START and 'action' not in message: sender_msg.setdefault('ignored', {})[message['user_id']] = time.time() return typing_time = 0 if not answer.startswith('&#'): typing_time = len(answer) / self.chars_per_second resend = False # answer is not empty if sender_msg.get('reply', '').upper() == answer.upper() and sender_msg['user_id'] == message['user_id']: logging.info('Resending') typing_time = 0 resend = True def _send(attr): if not set(sender_msg.get('ignored', [])) <= {message['user_id']}: ctime = time.time() for uid, ts in sender_msg['ignored'].items(): if uid != message['user_id'] and ctime - ts < self.same_conf_interval * 3: attr['reply'] = True try: if resend: res = self.sendMessage(sender, '', sender_msg['id']) elif attr.get('reply'): res = self.sendMessage(sender, answer, message['id']) else: res = self.sendMessage(sender, answer) if res is None: del self.users[sender] self.logSender('Failed to send a message to %sender%', message) return msg = self.last_message.add(sender, message, res, answer) if resend: msg['resent'] = True except Exception as e: logging.exception('thread {}: {}'.format(e.__class__.__name__, str(e))) cur_delay = (self.delay_on_reply - 1) * random.random() + 1 send_time = cur_delay + typing_time user_delay = 0 if sender_msg and sender != self.admin: user_delay = sender_msg['time'] - time.time() + (self.same_user_interval if sender < CONF_START else self.same_conf_interval) # can be negative tl = Timeline(max(send_time, user_delay)) if 'chat_id' in message: tl.attr['user_id'] = message['user_id'] if not sender_msg or time.time() - sender_msg['time'] > self.forget_interval: if not skip_mark_as_read: tl.sleep(self.delay_on_first_reply) tl.do(lambda: self.api.messages.markAsRead(peer_id=sender)) else: tl.sleepUntil(send_time, (self.delay_on_reply - 1) * random.random() + 1) if not skip_mark_as_read: tl.do(lambda: self.api.messages.markAsRead(peer_id=sender)) tl.sleep(cur_delay) if message.get('_onsend_actions'): for i in message['_onsend_actions']: tl.do(i) tl.sleep(cur_delay) if typing_time: tl.doEveryFor(vkapi.utils.TYPING_INTERVAL, lambda: self.api.messages.setActivity(type='typing', user_id=sender), typing_time) tl.do(_send, True) self.tm.run(sender, tl, tl.terminate) def checkConf(self, cid): if self.no_leave_conf: return True if cid + CONF_START in self.good_conf: return self.good_conf[cid + CONF_START] messages = self.api.messages.getHistory(chat_id=cid)['items'] for i in messages: if i.get('action') == 'chat_create' and i['user_id'] not in self.banned: self.leaveConf(cid) self.deleteFriend(i['user_id']) log.write('conf', self.loggableName(i.get('user_id')) + ' ' + str(cid)) return False title = self.confs[cid]['title'] if self.bad_conf_title(title): self.leaveConf(cid) log.write('conf', 'conf ' + str(cid) + ' (name: {})'.format(title)) return False self.good_conf[cid + CONF_START] = True return True def leaveConf(self, cid): if not self.confs[cid]: return False logging.info('Leaving conf {} ("{}")'.format(cid, self.confs[cid]['title'])) self.good_conf[cid + CONF_START] = False return self.api.messages.removeChatUser(chat_id=cid, user_id=self.self_id) def addFriends(self, gen_reply, is_good): data = self.api.friends.getRequests(extended=1) to_rep = [] self.loadUsers(data['items'], lambda x: x['user_id'], True) for i in data['items']: if self.users[i['user_id']].get('blacklisted'): self.api.friends.delete.delayed(user_id=i['user_id']) continue res = is_good(i['user_id'], True) if res is None: self.api.friends.add.delayed(user_id=i['user_id']) self.logSender('Adding %sender%', i) if 'message' in i: ans = gen_reply(i) to_rep.append((i, ans)) else: self.api.friends.delete.delayed(user_id=i['user_id']) self.logSender('Not adding %sender% ({})'.format(res), i) for i in to_rep: self.replyMessage(i[0], i[1][0], i[1][1]) self.api.sync() def unfollow(self): result = [] requests = self.api.friends.getRequests(out=1)['items'] suggested = self.api.friends.getRequests(suggested=1)['items'] for i in requests: if i not in self.banned: result.append(i) for i in suggested: self.api.friends.delete.delayed(user_id=i) self.deleteFriend(result) return result def deleteFriend(self, uid): if type(uid) == int: self.api.friends.delete(user_id=uid) else: for i in uid: self.api.friends.delete.delayed(user_id=i) self.api.sync() def setOnline(self): self.api.account.setOnline() def getUserId(self, domain, is_conf=False): domain = str(domain).lower().rstrip().rstrip('}').rstrip() conf = re.search('sel=c(\\d+)', domain) or re.search('^c(\\d+)$', domain) or re.search('chat=(\\d+)', domain) or re.search('peer=2(\\d{9})', domain) if conf is not None: return int(conf.group(1)) + CONF_START if is_conf: if domain.isdigit(): return int(domain) + CONF_START else: return None if '=' in domain: domain = domain.split('=')[-1] if '/' in domain: domain = domain.split('/')[-1] data = self.api.users.get(user_ids=domain) if not data: return None return data[0]['id'] def deleteComment(self, rep): if rep['type'] == 'wall': self.api.wall.delete(owner_id=self.self_id, post_id=rep['feedback']['id']) elif rep['type'].endswith('photo'): self.api.photos.deleteComment(owner_id=self.self_id, comment_id=rep['feedback']['id']) elif rep['type'].endswith('video'): self.api.video.deleteComment(owner_id=self.self_id, comment_id=rep['feedback']['id']) else: self.api.wall.deleteComment(owner_id=self.self_id, comment_id=rep['feedback']['id']) def filterComments(self, test): data = self.api.notifications.get(start_time=self.last_viewed_comment + 1, count=100)['items'] to_del = set() to_bl = set() self.loadUsers(data, lambda x: x['feedback']['from_id'], True) for rep in data: if rep['date'] != 'i': self.last_viewed_comment = max(self.last_viewed_comment, int(rep['date'])) def _check(s): if 'photo' in s: return s['photo']['owner_id'] == self.self_id if 'video' in s: return s['video']['owner_id'] == self.self_id if 'post' in s: return s['post']['to_id'] == self.self_id if rep['type'].startswith('comment_') or (rep['type'].startswith('reply_comment') and _check(rep['parent'])) or rep['type'] == 'wall': txt = html.escape(rep['feedback']['text']) res = 'good' frid = int(rep['feedback']['from_id']) if self.users[frid]['blacklisted']: res = 'blacklisted' log.write('comments', self.loggableName(frid) + ' (blacklisted): ' + txt) self.deleteComment(rep) to_bl.add(frid) elif test(txt): res = 'bad' log.write('comments', self.loggableName(frid) + ': ' + txt) self.deleteComment(rep) to_del.add(frid) elif 'attachments' in rep['feedback'] and any(i.get('type') in ['video', 'link'] for i in rep['feedback']['attachments']): res = 'attachment' log.write('comments', self.loggableName(frid) + ' (attachment)') self.deleteComment(rep) self.logSender('Comment {} (by %sender%) - {}'.format(txt, res), {'user_id': frid}) stats.update('last_comment', self.last_viewed_comment) for i in to_bl: self.blacklist(i) return to_del def likeAva(self, uid): del self.users[uid] if 'crop_photo' not in self.users[uid]: return photo = self.users[uid]['crop_photo']['photo'] self.api.likes.add(type='photo', owner_id=photo['owner_id'], item_id=photo['id']) self.logSender('Liked %sender%', {'user_id': uid}) def setRelation(self, uid, set_by=None): if uid: log.write('relation', self.loggableName(uid)) else: log.write('relation', self.loggableName(set_by) + ' (removed)') uid = self.vars['default_bf'] self.api.account.saveProfileInfo(relation_partner_id=uid) self.vars['bf'] = self.users[uid] self.logSender('Set relationship with %sender%', {'user_id': uid}) def waitAllThreads(self, loop_thread, reply): lp = self.api.longpoll.copy() self.receiver.terminate_monitor = True loop_thread.join(60) while not self.receiver.longpoll_queue.empty(): self.replyAll(reply) for t in self.tm.all(): t.join(60) with open(accounts.getFile('msgdump.json'), 'w') as f: json.dump({'cache': self.last_message.dump(), 'longpoll': lp}, f) # {name} - first_name last_name # {id} - id def printableName(self, pid, user_fmt, conf_fmt='Conf "{name}" ({id})'): if pid > CONF_START: return conf_fmt.format(id=(pid - CONF_START), name=self.confs[pid - CONF_START]['title']) else: return user_fmt.format(id=pid, name=self.users[pid]['first_name'] + ' ' + self.users[pid]['last_name']) def logSender(self, text, message): text_msg = text.replace('%sender%', self.printableSender(message, False)) html_msg = html.escape(text).replace('%sender%', self.printableSender(message, True)) logging.info(text_msg, extra={'db': html_msg}) def printableSender(self, message, need_html): if message.get('chat_id', 0) > 0: if need_html: res = self.printableName(message['user_id'], user_fmt='Conf "%c" (%i), <a href="https://vk.com/id{id}" target="_blank">{name}</a>') return res.replace('%i', str(message['chat_id'])).replace('%c', html.escape(self.confs[message['chat_id']]['title'])) else: res = self.printableName(message['user_id'], user_fmt='Conf "%c" (%i), {name}') return res.replace('%i', str(message['chat_id'])).replace('%c', html.escape(self.confs[message['chat_id']]['title'])) else: if need_html: return self.printableName(message['user_id'], user_fmt='<a href="https://vk.com/id{id}" target="_blank">{name}</a>') else: return self.printableName(message['user_id'], user_fmt='{name}') def loggableName(self, uid): return self.printableName(uid, '{id} ({name})') def blacklist(self, uid): self.api.account.banUser(user_id=uid) def blacklistedCount(self): return self.api.account.getBanned(count=0)['count'] def lastDialogs(self): def cb(req, resp): d.append((req['peer_id'], resp['count'])) dialogs = self.api.messages.getDialogs(count=self.stats_dialog_count, preview_length=1) d = [] confs = {} try: items = list(dialogs['items']) for dialog in items: if getSender(dialog['message']) in self.banned: continue self.api.messages.getHistory.delayed(peer_id=getSender(dialog['message']), count=0).callback(cb) if 'title' in dialog['message']: confs[getSender(dialog['message'])] = dialog['message']['title'] self.api.sync() except TypeError: logging.warning('Unable to fetch dialogs') return (None, None, None) return (dialogs['count'], d, confs) def acceptGroupInvites(self): for i in self.api.groups.getInvites()['items']: logging.info('Joining group "{}"'.format(i['name'])) self.api.groups.join(group_id=i['id']) log.write('groups', '{}: <a target="_blank" href="https://vk.com/club{}">{}</a>{}'.format( self.loggableName(i['invited_by']), i['id'], i['name'], ['', ' (closed)', ' (private)'][i['is_closed']])) def clearCache(self): self.users.clear() self.confs.clear()
def send_stat(self, item): (k, v, t) = item try: if t[1] == 'd': v = self.calculate_delta(k, v) t = t[0] except: pass sender = self.get_sender(t) sender(k, float(v)) def run(self): while self.run: try: # Timeout after 1 second so we can respond to quit events item = self.queue.get(True, 1) self.send_stat(item) except Queue.Empty: continue if __name__ == '__main__': # Run standalone to test this module, it will generate garbage from thread_manager import ThreadManager q = Queue.Queue() threads = [ThreadGenerateGarbage(q), ThreadStatsd(q)] tm = ThreadManager(threads=threads) tm.run()
class DebuggerDomain(HandlerDomain): def __init__(self, runtimeDomain, fileManager, remoteObjectManager, basepath='.', **kwargs): HandlerDomain.__init__(self, **kwargs) self.runtimeDomain = runtimeDomain self.fileManager = fileManager self.remoteObjectManager = remoteObjectManager self.locationSerializer = serialize.LocationSerializer( fileManager, basepath) self.moduleSourcePathUpdater = ModuleSourcePathUpdater( self.debugger.GetSelectedTarget(), fileManager, basepath) self.thread_manager = ThreadManager(self.socket, self.locationSerializer, self.remoteObjectManager) @property def name(self): return 'Debugger' @handler() def canSetScriptSource(self, params): # Return False, becuase we don't support # changing source at runtime. return {"result": False} @handler() def continueToLocation(self, params): # TODO(williamsc) - This is probably setting a one off breakpoint and continuing. raise UndefinedHandlerError('continueToLocation not implemented') @handler() def disable(self, params): # Not exactly the same as disable. Detach() might be closer to # what Chrome Dev Tools is trying to do. self.debugger.GetSelectedTarget().DisableAllBreakpoints() return {} @handler() def enable(self, params): process = self.debugger.GetSelectedTarget().process self.event_thread = LLDBListenerThread( server=self.socket, location_serializer=self.locationSerializer, remote_object_manager=self.remoteObjectManager, module_source_path_updater=self.moduleSourcePathUpdater, thread_manager=self.thread_manager, process=process) self.moduleSourcePathUpdater.modules_updated() self.event_thread.start() return {} @handler() def evaluateOnCallFrame(self, params): frameId = params['callFrameId'] thread, frame = frameId.split('.') # TODO: These return booleans to indicate success. Throw something if False. self.debugger.GetSelectedTarget().process.SetSelectedThreadByIndexID( int(thread)) self.debugger.GetSelectedTarget().process.GetSelectedThread( ).SetSelectedFrame(int(frame)) return self.runtimeDomain.evaluate(params) @handler() def getScriptSource(self, params): filelike = self.fileManager.get_by_script_id(params['scriptId']) if filelike: return {'scriptSource': filelike.script_source} else: return {'scriptSource': '<Failed to fetch source.>'} @handler() def pause(self, params): self.debugger.GetSelectedTarget().process.Stop() return {} @handler() def removeBreakpoint(self, params): self.debugger.GetSelectedTarget().BreakpointDelete( int(params['breakpointId'])) return {} @handler() def resume(self, params): self.debugger.GetSelectedTarget().process.Continue() return {} @handler() def selectThread(self, params): threadId = params['threadId'] self.debugger.GetSelectedTarget().process.SetSelectedThreadByID( threadId) return {} @handler() def getThreadStack(self, params): threadId = params['threadId'] thread = self.debugger.GetSelectedTarget().process.GetThreadByID( threadId) params = {"callFrames": []} if not thread == None: params["callFrames"] = self.thread_manager.get_thread_stack(thread) return params @handler() def searchInContent(self, params): raise UndefinedHandlerError('searchInContent not implemented') @handler() def setBreakpoint(self, params): filelike = self.fileManager.get_by_script_id( params['location']['scriptId']) if not filelike or not isinstance(filelike, file_manager.File): # Only support setting breakpoints in real files. return {} return self._set_breakpoint_by_filespec( filelike.server_obj, int(params['location']['lineNumber']) + 1) @handler() def setBreakpointByUrl(self, params): filelike = self.fileManager.get_by_client_url(params['url']) if not filelike or not isinstance(filelike, file_manager.File): raise RuntimeError('Cannot find file for breakpoint.') return self._set_breakpoint_by_filespec(filelike.server_obj, int(params['lineNumber']) + 1) @handler() def setBreakpointsActive(self, params): if params['active']: self.debugger.GetSelectedTarget().EnableAllBreakpoints() else: self.debugger.GetSelectedTarget().DisableAllBreakpoints() return {} @handler() def setPauseOnExceptions(self, params): # TODO(williamsc) - Support add support for pausing on exceptions raise UndefinedHandlerError('setPauseOnExceptions not implemented') @handler() def setScriptSource(self, params): raise UndefinedHandlerError('setScriptSource not supported for LLDB') @handler() def stepInto(self, params): self.debugger.GetSelectedTarget().GetProcess().GetSelectedThread( ).StepInto() return {} @handler() def stepOut(self, params): self.debugger.GetSelectedTarget().GetProcess().GetSelectedThread( ).StepOut() return {} @handler() def stepOver(self, params): self.debugger.GetSelectedTarget().GetProcess().GetSelectedThread( ).StepOver() return {} def _set_breakpoint_by_filespec(self, filespec, line): breakpoint = self.debugger.GetSelectedTarget( ).BreakpointCreateByLocation(filespec, line) return { 'breakpointId': str(breakpoint.id), 'locations': self.locationSerializer.get_breakpoint_locations(breakpoint), }
elif t is 'r': return self.client.update_stats elif t is 'c': return self.client.incr elif t is 't': return self.client.timing def send_stat(self, item): (k, v, t) = item sender = self.get_sender(t) sender(k, float(v)) def run(self): while self.run: try: # Timeout after 1 second so we can respond to quit events item = self.queue.get(True, 1) self.send_stat(item) except Queue.Empty: continue if __name__ == '__main__': # Run standalone to test this module, it will generate garbage from thread_manager import ThreadManager q = Queue.Queue() threads = [ThreadGenerateGarbage(q), ThreadStatsd(q)] tm = ThreadManager(threads=threads) tm.run()
class VkBot: delay_on_reply = config.get('vkbot.delay_on_reply', 'i') chars_per_second = config.get('vkbot.chars_per_second', 'i') same_user_interval = config.get('vkbot.same_user_interval', 'i') same_conf_interval = config.get('vkbot.same_conf_interval', 'i') typing_interval = 5 forget_interval = config.get('vkbot.forget_interval', 'i') delay_on_first_reply = config.get('vkbot.delay_on_first_reply', 'i') stats_dialog_count = config.get('stats.dialog_count', 'i') def __init__(self, username='', password=''): self.api = vkapi.VkApi(username, password, ignored_errors=ignored_errors, timeout=config.get('vkbot.default_timeout', 'i'), token_file=accounts.getFile('token.txt'), log_file=accounts.getFile('inf.log') if args.args['logging'] else '', captcha_handler=captcha.CaptchaHandler()) self.api.initLongpoll() self.users = UserCache(self.api, 'sex,crop_photo,blacklisted,blacklisted_by_me,' + check_friend.fields) self.confs = ConfCache(self.api) self.vars = json.load(open('data/defaultvars.json', encoding='utf-8')) self.initSelf(True) self.guid = int(time.time() * 5) self.last_viewed_comment = stats.get('last_comment', 0) self.good_conf = {} self.tm = ThreadManager() self.last_message = MessageCache() self.last_message_id = 0 self.whitelist = None self.whitelist_includeread = True self.bad_conf_title = lambda s: False self.admin = None self.banned_list = [] self.longpoll_queue = queue.Queue() self.message_lock = threading.Lock() def initSelf(self, sync=False): self.users.clear() def do(): res = self.api.users.get(fields='contacts,relation,bdate')[0] self.self_id = res['id'] self.vars['phone'] = res.get('mobile_phone') or self.vars['phone'] self.vars['name'] = (res['first_name'], res['last_name']) self.vars['bf'] = res.get('relation_partner') or self.vars['bf'] try: bdate = res['bdate'].split('.') today = datetime.date.today() self.vars['age'] = today.year - int(bdate[2]) - ((today.month, today.day) < (int(bdate[1]), int(bdate[0]))) except LookupError: pass if not sync: logging.info('My phone: ' + self.vars['phone']) if sync: do() else: threading.Thread(target=do).start() @staticmethod def getSender(message): if 'chat_id' in message: return CONF_START + message['chat_id'] return message['user_id'] def loadUsers(self, arr, key, clean=False, confs=False): users = [] for i in arr: try: users.append(key(i)) except Exception: pass (self.confs if confs else self.users).load(users, clean) def replyOne(self, message, gen_reply, method=None): if self.whitelist and self.getSender(message) not in self.whitelist: if self.getSender(message) > CONF_START or self.users[message['user_id']]['first_name'] + ' ' + self.users[message['user_id']]['last_name'] not in self.whitelist: return if 'chat_id' in message and not self.checkConf(message['chat_id']): return try: if self.tm.isBusy(self.getSender(message)) and not self.tm.get(self.getSender(message)).attr['unimportant']: return except Exception: return if method is not None: message['_method'] = method try: ans = gen_reply(message) except Exception as e: ans = None logging.exception('local {}: {}'.format(e.__class__.__name__, str(e))) time.sleep(1) if ans: self.replyMessage(message, ans[0], ans[1]) def replyAll(self, gen_reply, include_read=False): self.tm.gc() self.banned_list = [] if include_read: self.users.gc() if self.whitelist: messages = self.api.messages.getDialogs(unread=(0 if self.whitelist_includeread else 1), count=20) self.whitelist_includeread = False else: messages = self.api.messages.getDialogs(unread=1, count=200) try: messages = messages['items'][::-1] except TypeError: logging.warning('Unable to fetch messages') return self.loadUsers(messages, lambda x: x['message']['user_id']) self.loadUsers(messages, lambda x: x['message']['chat_id'], confs=True) for msg in sorted(messages, key=lambda m: m['message']['id']): cur = msg['message'] if cur['out']: continue if self.last_message_id and cur['id'] > self.last_message_id: continue self.replyOne(cur, gen_reply, 'getDialogs') stats.update('banned_messages', ' '.join(map(str, sorted(self.banned_list)))) else: messages = self.longpollMessages() self.loadUsers(messages, lambda x: x['user_id']) self.loadUsers(messages, lambda x: x['chat_id'], confs=True) for cur in sorted(messages, key=lambda m: m['id']): self.last_message_id = max(self.last_message_id, cur['id']) self.replyOne(cur, gen_reply) def longpollMessages(self): res = [] while not self.longpoll_queue.empty(): res.append(self.longpoll_queue.get()) return res def getLongpoll(self): arr = self.api.getLongpoll() need_extra = [] result = [] for i in arr: if i[0] == 4: # new message mid = i[1] sender = i[3] ts = i[4] text = i[6] opt = i[7] flags = i[2] if opt == {'source_mid': str(self.self_id), 'source_act': 'chat_kick_user', 'from': str(self.self_id)}: self.good_conf[sender] = False continue if opt.get('source_act') == 'chat_title_update': del self.confs[sender - CONF_START] logging.info('Conf {} renamed into "{}"'.format(sender - CONF_START, opt['source_text'])) if not config.get('vkbot.no_leave_conf') and self.bad_conf_title(opt['source_text']): self.leaveConf(sender - CONF_START) log.write('conf', 'conf ' + str(sender - CONF_START) + ' (name: {})'.format(opt['source_text'])) continue if opt.get('source_act') == 'chat_invite_user' and opt['source_mid'] == str(self.self_id) and opt['from'] != str(self.self_id): self.logSender('%sender% added me to conf "{}"'.format(self.confs[sender - CONF_START]['title']), {'user_id': int(opt['from'])}) if not config.get('vkbot.no_leave_conf') and int(opt['from']) not in self.banned: self.deleteFriend(int(opt['from'])) if flags & 2: # out if not opt.get('source_act'): self.tm.terminate(sender) continue for number in range(1, 11): if opt.get('attach{}_type'.format(number)) == 'photo': del opt['attach{}_type'.format(number)] del opt['attach{}'.format(number)] text += ' ..' if opt.get('attach{}_type'.format(number)) == 'doc': if opt.get('attach{}_kind'.format(number)) == 'graffiti': del opt['attach{}_type'.format(number)] del opt['attach{}'.format(number)] del opt['attach{}_kind'.format(number)] text += ' ..' if opt.get('attach{}_kind'.format(number)) == 'audiomsg': del opt['attach{}_type'.format(number)] del opt['attach{}'.format(number)] del opt['attach{}_kind'.format(number)] text += ' [Voice]' msg = {'id': mid, 'date': ts, 'body': text, 'out': 0, '_method': ''} if opt.get('source_act'): msg['body'] = None if opt.get('attach1_type') == 'sticker': msg['attachments'] = [{'type': 'sticker'}] if 'from' in opt: msg['chat_id'] = sender - CONF_START msg['user_id'] = int(opt['from']) else: msg['user_id'] = sender try: if 'chat_id' in msg and msg['user_id'] != self.tm.get(sender).attr['user_id'] and msg['body'] is not None: self.tm.get(sender).attr['reply'] = True except Exception: pass if not (set(opt) <= {'from', 'emoji'} or opt.get('attach1_type') == 'sticker') and not opt.get('source_act'): need_extra.append(str(mid)) continue result.append(msg) if need_extra: need_extra = ','.join(need_extra) for i in self.api.messages.getById(message_ids=need_extra)['items']: i['_method'] = 'getById' result.append(i) return result def monitorLongpoll(self): def _monitor(): while True: for i in self.getLongpoll(): self.longpoll_queue.put(i) self.longpoll_thread = threading.Thread(target=_monitor, daemon=True) self.longpoll_thread.start() def sendMessage(self, to, msg, forward=None): if not self.good_conf.get(to, 1): return with self.message_lock: self.guid += 1 time.sleep(1) if forward: return self.api.messages.send(peer_id=to, message=msg, random_id=self.guid, forward_messages=forward) else: return self.api.messages.send(peer_id=to, message=msg, random_id=self.guid) def replyMessage(self, message, answer, skip_mark_as_read=False): sender = self.getSender(message) sender_msg = self.last_message.bySender(sender) if 'id' in message and message['id'] <= sender_msg.get('id', 0): return if not answer: if self.tm.isBusy(sender): return if not sender_msg or time.time() - sender_msg['time'] > self.forget_interval: tl = Timeline().sleep(self.delay_on_first_reply).do(lambda: self.api.messages.markAsRead(peer_id=sender)) tl.attr['unimportant'] = True self.tm.run(sender, tl, tl.terminate) elif answer is None: # ignored self.api.messages.markAsRead.delayed(peer_id=sender, _once=True) else: tl = Timeline().sleep((self.delay_on_reply - 1) * random.random() + 1).do(lambda: self.api.messages.markAsRead(peer_id=sender)) tl.attr['unimportant'] = True self.tm.run(sender, tl, tl.terminate) self.last_message.byUser(message['user_id'])['text'] = message['body'] self.last_message.updateTime(sender) if sender > CONF_START and message['body'] is not None: sender_msg.setdefault('ignored', {})[message['user_id']] = time.time() return typing_time = 0 if not answer.startswith('&#'): typing_time = len(answer) / self.chars_per_second resend = False # answer is not empty if sender_msg.get('reply', '').upper() == answer.upper() and sender_msg['user_id'] == message['user_id']: logging.info('Resending') typing_time = 0 resend = True def _send(attr): if not set(sender_msg.get('ignored', [])) <= {message['user_id']}: ctime = time.time() for uid, ts in sender_msg['ignored'].items(): if uid != message['user_id'] and ctime - ts < self.same_conf_interval * 3: attr['reply'] = True try: if resend: res = self.sendMessage(sender, '', sender_msg['id']) elif attr.get('reply'): res = self.sendMessage(sender, answer, message['id']) else: res = self.sendMessage(sender, answer) if res is None: del self.users[sender] self.logSender('Failed to send a message to %sender%', message) return self.last_message.add(sender, message, res, answer) except Exception as e: logging.exception('thread {}: {}'.format(e.__class__.__name__, str(e))) cur_delay = (self.delay_on_reply - 1) * random.random() + 1 send_time = cur_delay + typing_time user_delay = 0 if sender_msg and sender != self.admin: user_delay = sender_msg['time'] - time.time() + (self.same_user_interval if sender < 2000000000 else self.same_conf_interval) # can be negative tl = Timeline(max(send_time, user_delay)) if 'chat_id' in message: tl.attr['user_id'] = message['user_id'] if not sender_msg or time.time() - sender_msg['time'] > self.forget_interval: if not skip_mark_as_read: tl.sleep(self.delay_on_first_reply) tl.do(lambda: self.api.messages.markAsRead(peer_id=sender)) else: tl.sleepUntil(send_time, (self.delay_on_reply - 1) * random.random() + 1) if not skip_mark_as_read: tl.do(lambda: self.api.messages.markAsRead(peer_id=sender)) tl.sleep(cur_delay) if message.get('_onsend_actions'): for i in message['_onsend_actions']: tl.do(i) tl.sleep(cur_delay) if typing_time: tl.doEveryFor(self.typing_interval, lambda: self.api.messages.setActivity(type='typing', user_id=sender), typing_time) tl.do(_send, True) self.tm.run(sender, tl, tl.terminate) def checkConf(self, cid): if config.get('vkbot.no_leave_conf'): return True if cid + CONF_START in self.good_conf: return self.good_conf[cid + CONF_START] messages = self.api.messages.getHistory(chat_id=cid)['items'] for i in messages: if i.get('action') == 'chat_create' and i['user_id'] not in self.banned: self.leaveConf(cid) self.deleteFriend(i['user_id']) log.write('conf', self.loggableName(i.get('user_id')) + ' ' + str(cid)) return False title = self.confs[cid]['title'] if self.bad_conf_title(title): self.leaveConf(cid) log.write('conf', 'conf ' + str(cid) + ' (name: {})'.format(title)) return False self.good_conf[cid + CONF_START] = True return True def leaveConf(self, cid): logging.info('Leaving conf {} ("{}")'.format(cid, self.confs[cid]['title'])) self.good_conf[cid + CONF_START] = False return self.api.messages.removeChatUser(chat_id=cid, user_id=self.self_id) def addFriends(self, gen_reply, is_good): data = self.api.friends.getRequests(extended=1) to_rep = [] self.loadUsers(data['items'], lambda x: x['user_id'], True) for i in data['items']: if self.users[i['user_id']].get('blacklisted'): self.api.friends.delete.delayed(user_id=i['user_id']) continue res = is_good(i['user_id'], True) if res is None: self.api.friends.add.delayed(user_id=i['user_id']) self.logSender('Adding %sender%', i) if 'message' in i: ans = gen_reply(i) to_rep.append((i, ans)) else: self.api.friends.delete.delayed(user_id=i['user_id']) self.logSender('Not adding %sender% ({})'.format(res), i) for i in to_rep: self.replyMessage(i[0], i[1][0], i[1][1]) self.api.sync() def unfollow(self): result = [] requests = self.api.friends.getRequests(out=1)['items'] suggested = self.api.friends.getRequests(suggested=1)['items'] for i in requests: if i not in self.banned: result.append(i) for i in suggested: self.api.friends.delete.delayed(user_id=i) self.deleteFriend(result) return result def deleteFriend(self, uid): if type(uid) == int: self.api.friends.delete(user_id=uid) else: for i in uid: self.api.friends.delete.delayed(user_id=i) self.api.sync() def setOnline(self): self.api.account.setOnline() def getUserId(self, domain): domain = str(domain).lower().rstrip().rstrip('}').rstrip() conf = re.search('sel=c(\\d+)', domain) or re.search('^c(\\d+)$', domain) or re.search('chat=(\\d+)', domain) or re.search('peer=2(\\d{9})', domain) if conf is not None: return int(conf.group(1)) + CONF_START if '=' in domain: domain = domain.split('=')[-1] if '/' in domain: domain = domain.split('/')[-1] data = self.api.users.get(user_ids=domain) if not data: return None return data[0]['id'] def deleteComment(self, rep): if rep['type'].endswith('photo'): self.api.photos.deleteComment(owner_id=self.self_id, comment_id=rep['feedback']['id']) elif rep['type'].endswith('video'): self.api.video.deleteComment(owner_id=self.self_id, comment_id=rep['feedback']['id']) else: self.api.wall.deleteComment(owner_id=self.self_id, comment_id=rep['feedback']['id']) def filterComments(self, test): data = self.api.notifications.get(start_time=self.last_viewed_comment + 1, count=100)['items'] to_del = set() to_bl = set() self.loadUsers(data, lambda x: x['feedback']['from_id'], True) for rep in data: if rep['date'] != 'i': self.last_viewed_comment = max(self.last_viewed_comment, int(rep['date'])) stats.update('last_comment', self.last_viewed_comment) def _check(s): if 'photo' in s: return s['photo']['owner_id'] == self.self_id if 'video' in s: return s['video']['owner_id'] == self.self_id if 'post' in s: return s['post']['to_id'] == self.self_id if rep['type'].startswith('comment_') or rep['type'].startswith('reply_comment') and _check(rep['parent']): txt = html.escape(rep['feedback']['text']) res = 'good' if self.users[rep['feedback']['from_id']]['blacklisted']: res = 'blacklisted' log.write('comments', self.loggableName(rep['feedback']['from_id']) + ' (blacklisted): ' + txt) self.deleteComment(rep) to_bl.add(rep['feedback']['from_id']) elif test(txt): res = 'bad' log.write('comments', self.loggableName(rep['feedback']['from_id']) + ': ' + txt) self.deleteComment(rep) to_del.add(rep['feedback']['from_id']) elif 'attachments' in rep['feedback'] and any(i.get('type') in ['video', 'link'] for i in rep['feedback']['attachments']): res = 'attachment' log.write('comments', self.loggableName(rep['feedback']['from_id']) + ' (attachment)') self.deleteComment(rep) self.logSender('Comment {} (by %sender%) - {}'.format(txt, res), {'user_id': rep['feedback']['from_id']}) for i in to_bl: self.blacklist(i) return to_del def likeAva(self, uid): del self.users[uid] if 'crop_photo' not in self.users[uid]: return photo = self.users[uid]['crop_photo']['photo'] self.api.likes.add(type='photo', owner_id=photo['owner_id'], item_id=photo['id']) self.logSender('Liked %sender%', {'user_id': uid}) def setRelation(self, uid): self.api.account.saveProfileInfo(relation_partner_id=uid) self.vars['bf'] = self.users[uid] log.write('relation', self.loggableName(uid)) self.logSender('Set relationship with %sender%', {'user_id': uid}) def waitAllThreads(self): for t in self.tm.all(): t.join(60) # {name} - first_name last_name # {id} - id def printableName(self, pid, user_fmt, conf_fmt='Conf "{name}"'): if pid > CONF_START: return conf_fmt.format(id=(pid - CONF_START), name=self.confs[pid - CONF_START]['title']) else: return user_fmt.format(id=pid, name=self.users[pid]['first_name'] + ' ' + self.users[pid]['last_name']) def logSender(self, text, message): text_msg = text.replace('%sender%', self.printableSender(message, False)) html_msg = text.replace('%sender%', self.printableSender(message, True)) logging.info(text_msg, extra={'db': html_msg}) def printableSender(self, message, need_html): if message.get('chat_id', 0) > 0: if need_html: return self.printableName(message['user_id'], user_fmt='Conf "%c" (%i), <a href="https://vk.com/id{id}" target="_blank">{name}</a>').replace('%i', str( message['chat_id'])).replace('%c', html.escape(self.confs[message['chat_id']]['title'])) else: return self.printableName(message['user_id'], user_fmt='Conf "%c" (%i), {name}').replace('%i', str(message['chat_id'])).replace('%c', html.escape( self.confs[message['chat_id']]['title'])) else: if need_html: return self.printableName(message['user_id'], user_fmt='<a href="https://vk.com/id{id}" target="_blank">{name}</a>') else: return self.printableName(message['user_id'], user_fmt='{name}') def loggableName(self, uid): return self.printableName(uid, '{id} ({name})') def blacklist(self, uid): self.api.account.banUser(user_id=uid) def blacklistedCount(self): return self.api.account.getBanned(count=0)['count'] def lastDialogs(self): def cb(req, resp): d.append((req['peer_id'], resp['count'])) dialogs = self.api.messages.getDialogs(count=self.stats_dialog_count, preview_length=1) d = [] confs = {} try: items = list(dialogs['items']) for i in items: self.api.messages.getHistory.delayed(peer_id=self.getSender(i['message']), count=0).callback(cb) if 'title' in i['message']: confs[self.getSender(i['message'])] = i['message']['title'] self.api.sync() except TypeError: logging.warning('Unable to fetch dialogs') return (None, None, None) return (dialogs['count'], d, confs) def acceptGroupInvites(self): for i in self.api.groups.getInvites()['items']: logging.info('Joining group "{}"'.format(i['name'])) self.api.groups.join(group_id=i['id']) log.write('groups', '{}: <a target="_blank" href="https://vk.com/club{}">{}</a>{}'.format(self.loggableName(i['invited_by']), i['id'], i['name'], ['', ' (closed)', ' (private)'][i['is_closed']]))
def test_ior_intercept_verify_data(self): """Jira ID: DAOS-3502. Test Description: Purpose of this test is to run ior through dfuse with interception library on 5 clients and without interception library on 1 client for at least 30 minutes and verify the data integrity using ior's Read Verify and Write Verify options. Use case: Run ior with read, write, fpp, read verify write verify for 30 minutes Run ior with read, write, read verify write verify for 30 minutes :avocado: tags=all,full_regression :avocado: tags=hw,large :avocado: tags=daosio,dfuse,il,ior_intercept :avocado: tags=ior_intercept_verify_data """ self.add_pool() self.add_container(self.pool) # Start dfuse for POSIX api. This is specific to interception library test requirements. self.start_dfuse(self.hostlist_clients, self.pool, self.container) # Setup the thread manager thread_manager = ThreadManager(run_ior, self.timeout - 30) index_clients_intercept_file = [ (0, self.hostlist_clients[0:-1], os.path.join(self.prefix, 'lib64', 'libioil.so'), os.path.join(self.dfuse.mount_dir.value, "testfile_0_intercept")), (1, self.hostlist_clients[-1:], None, os.path.join(self.dfuse.mount_dir.value, "testfile_1")), ] self.job_manager = [] for index, clients, intercept, test_file in index_clients_intercept_file: # Add a job manager for each ior command. Use a timeout for the ior command that leaves # enough time to report the summary of all the threads job_manager = get_job_manager(self, "Mpirun", None, False, "mpich", self.get_remaining_time() - 30) # Define the parameters that will be used to run an ior command in this thread thread_manager.add( test=self, manager=job_manager, log=self.client_log, hosts=clients, path=self.workdir, slots=None, group=self.server_group, pool=self.pool, container=self.container, processes=(self.processes // len(self.hostlist_clients)) * len(clients), intercept=intercept, ior_params={"test_file": test_file}) self.log.info("Created thread %s for %s with intercept: %s", index, clients, str(intercept)) # Launch the IOR threads self.log.info("Launching %d IOR threads", thread_manager.qty) results = thread_manager.run() # Stop dfuse self.stop_dfuse() # Check the ior thread results failed_thread_count = thread_manager.check(results) if failed_thread_count > 0: msg = "{} FAILED IOR Thread(s)".format(failed_thread_count) self.d_log.error(msg) self.fail(msg) for index, clients, intercept, _ in index_clients_intercept_file: with_intercept = "without" if intercept is None else "with" IorCommand.log_metrics( self.log, "{} clients {} interception library".format( len(clients), with_intercept), IorCommand.get_ior_metrics(results[index].result))
class DebuggerDomain(HandlerDomain): '''Implement Chrome debugger domain protocol and convert into lldb python API. ''' def __init__(self, runtimeDomain, fileManager, remoteObjectManager, basepath='.', **kwargs): HandlerDomain.__init__(self, **kwargs) self.runtimeDomain = runtimeDomain self.fileManager = fileManager self.remoteObjectManager = remoteObjectManager self.locationSerializer = serialize.LocationSerializer( fileManager, basepath) self.moduleSourcePathUpdater = ModuleSourcePathUpdater( self.debugger.GetSelectedTarget(), fileManager, basepath) self.thread_manager = ThreadManager(self.socket, self.locationSerializer, self.remoteObjectManager) @property def name(self): return 'Debugger' @handler() def canSetScriptSource(self, params): # Return False, becuase we don't support # changing source at runtime. return {"result": False} @handler() def continueToLocation(self, params): # TODO(williamsc) - This is probably setting a one off breakpoint and continuing. raise UndefinedHandlerError('continueToLocation not implemented') @handler() def disable(self, params): # Not exactly the same as disable. Detach() might be closer to # what Chrome Dev Tools is trying to do. self.debugger.GetSelectedTarget().DisableAllBreakpoints() return {} @handler() def enable(self, params): process = self.debugger.GetSelectedTarget().process self.event_thread = LLDBListenerThread( server=self.socket, location_serializer=self.locationSerializer, remote_object_manager=self.remoteObjectManager, module_source_path_updater=self.moduleSourcePathUpdater, thread_manager = self.thread_manager, process=process) self.moduleSourcePathUpdater.modules_updated() self.event_thread.start() return {} @handler() def evaluateOnCallFrame(self, params): frameId = params['callFrameId'] thread, frame = frameId.split('.') # TODO: These return booleans to indicate success. Throw something if False. self.debugger.GetSelectedTarget().process.SetSelectedThreadByIndexID(int(thread)) self.debugger.GetSelectedTarget().process.GetSelectedThread().SetSelectedFrame(int(frame)) return self.runtimeDomain.evaluate(params) @handler() def getScriptSource(self, params): filelike = self.fileManager.get_by_script_id(params['scriptId']) if filelike: return {'scriptSource': filelike.script_source} else: return {'scriptSource': '<Failed to fetch source.>'} @handler() def pause(self, params): self.debugger.GetSelectedTarget().process.Stop() return {} @handler() def removeBreakpoint(self, params): self.debugger.GetSelectedTarget().BreakpointDelete(int(params['breakpointId'])) return {} @handler() def resume(self, params): self.debugger.GetSelectedTarget().process.Continue() return {} @handler() def selectThread(self, params): threadId = params['threadId'] self.debugger.GetSelectedTarget().process.SetSelectedThreadByID(threadId) return {} @handler() def getThreadStack(self, params): threadId = params['threadId'] thread = self.debugger.GetSelectedTarget().process.GetThreadByID(threadId) params = { "callFrames": [] } if not thread == None: params["callFrames"] = self.thread_manager.get_thread_stack(thread) return params @handler() def searchInContent(self, params): raise UndefinedHandlerError('searchInContent not implemented') @handler() def setBreakpoint(self, params): filelike = self.fileManager.get_by_script_id(params['location']['scriptId']) if not filelike or not isinstance(filelike, file_manager.File): # Only support setting breakpoints in real files. return {} return self._set_breakpoint_by_filespec( filelike.server_obj, int(params['location']['lineNumber']) + 1) @handler() def setBreakpointByUrl(self, params): filelike = self.fileManager.get_by_client_url(params['url']) if not filelike or not isinstance(filelike, file_manager.File): raise RuntimeError('Cannot find file for breakpoint.') return self._set_breakpoint_by_filespec( filelike.server_obj, int(params['lineNumber']) + 1) @handler() def setBreakpointsActive(self, params): if params['active']: self.debugger.GetSelectedTarget().EnableAllBreakpoints() else: self.debugger.GetSelectedTarget().DisableAllBreakpoints() return {} @handler() def setPauseOnExceptions(self, params): # TODO(williamsc) - Support add support for pausing on exceptions raise UndefinedHandlerError('setPauseOnExceptions not implemented') @handler() def setScriptSource(self, params): raise UndefinedHandlerError('setScriptSource not supported for LLDB') @handler() def stepInto(self, params): self.debugger.GetSelectedTarget().GetProcess().GetSelectedThread().StepInto() return {} @handler() def stepOut(self, params): self.debugger.GetSelectedTarget().GetProcess().GetSelectedThread().StepOut() return {} @handler() def stepOver(self, params): self.debugger.GetSelectedTarget().GetProcess().GetSelectedThread().StepOver() return {} def _set_breakpoint_by_filespec(self, filespec, line): breakpoint = self.debugger.GetSelectedTarget().BreakpointCreateByLocation(filespec, line) return { 'breakpointId': str(breakpoint.id), 'locations': self.locationSerializer.get_breakpoint_locations(breakpoint), }
def __init__(self): """Program entry point""" op = argparse.ArgumentParser() op.add_argument("-c", "--config", dest="cfile", default="/etc/mysql-statsd.conf", help="Configuration file" ) op.add_argument("-d", "--debug", dest="debug", help="Prints statsd metrics next to sending them", default=False, action="store_true" ) op.add_argument("--dry-run", dest="dry_run", default=False, action="store_true", help="Print the output that would be sent to statsd without actually sending data somewhere" ) # TODO switch the default to True, and make it fork by default in init script. op.add_argument("-f", "--foreground", dest="foreground", help="Dont fork main program", default=False, action="store_true") opt = op.parse_args() self.get_config(opt.cfile) if not self.config: sys.exit(op.print_help()) try: logfile = self.config.get('daemon').get('logfile', '/tmp/daemon.log') except AttributeError: logfile = sys.stdout pass if not opt.foreground: self.daemonize(stdin='/dev/null', stdout=logfile, stderr=logfile) # Set up queue self.queue = Queue.Queue() # split off config for each thread mysql_config = dict(mysql=self.config['mysql']) mysql_config['metrics'] = self.config['metrics'] statsd_config = self.config['statsd'] # Spawn MySQL polling thread mysql_thread = ThreadMySQL(queue=self.queue, **mysql_config) # t1 = ThreadMySQL(config=self.config, queue=self.queue) # Spawn Statsd flushing thread statsd_thread = ThreadStatsd(queue=self.queue, **statsd_config) if opt.dry_run: statsd_thread = ThreadFakeStatsd(queue=self.queue, **statsd_config) if opt.debug: """ All debug settings go here """ statsd_thread.debug = True # Get thread manager tm = ThreadManager(threads=[mysql_thread, statsd_thread]) try: tm.run() except: # Protects somewhat from needing to kill -9 if there is an exception # within the thread manager by asking for a quit an joining. try: tm.stop_threads() except: pass raise