def get_messages_from_queue(self): messages = [] for i in range(self.options.get_messages_from_queue): resp = self.sqs_client.receive_message( QueueUrl=self.options.sqs_queue_url, AttributeNames=['All']) try: messages.extend(resp['Messages']) except KeyError: break entries = [{ 'Id': msg['MessageId'], 'ReceiptHandle': msg['ReceiptHandle'] } for msg in resp['Messages']] resp = self.sqs_client.delete_message_batch( QueueUrl=self.options.sqs_queue_url, Entries=entries) if len(resp['Successful']) != len(entries): logger.debug( "Failed to delete messages: entries={} resp={}".format( entries, resp)) raise RuntimeError( f"Failed to delete messages: entries={entries!r} resp={resp!r}" ) return messages
def update_deployment(self, deployment): # Update the deployment api_response = self.apps_v1.patch_namespaced_deployment( name=self.options.kubernetes_deployment, namespace=self.options.kubernetes_namespace, body=deployment) logger.debug("Deployment updated. status='%s'" % str(api_response.status))
def post(self, msg, audience=[]): p = {"api": "/statuses/update.json", "_": int(time.time() * 1000)} cookie = self.load_cookies() url = urljoin(BASE_URL, TOKEN_URL) r = self.session.get(url, params=p, cookies=cookie, headers=BASE_HEADER) try: token = r.json()['token'] except (IndexError, TypeError, ValueError): logger.error("MLGB 出错了!") logger.error("\n%s\n", r.text) return audience = ' @'.join(audience) audience = ' @' + audience.strip() msg = '%s %s' % (msg, audience) logger.info('发送的内容是: %s' % msg) msg = msg.encode().decode() data = {"status": "<p>%s</p>" % msg, "session_token": token} url = urljoin(BASE_URL, POST_URL) r = self.session.post(url, data=data, cookies=cookie, headers=BASE_HEADER) if r.status_code == 200: data = r.json() if not data.get('error_code') > -1: logger.debug("完事儿了.") return logger.error("MLGB 又出错了!") logger.error("\n%s\n", r.text) raise ValueError('发广播出错了')
def run(self): options = self.options logger.debug("Starting poll for {} every {}s".format( options.sqs_queue_url, options.poll_period, )) while True: self.poll()
def run(self): """ Run the poller forever """ options = self.options logger.debug("Starting poll for {} every {}s".format( options.sqs_queue_url, options.poll_period)) while True: self._deployment = None self.poll()
def deployment(self): logger.debug("loading deployment: {} from namespace: {}".format( self.options.kubernetes_deployment, self.options.kubernetes_namespace)) deployments = self.extensions_v1_beta1.list_namespaced_deployment( self.options.kubernetes_namespace, label_selector="app={}".format(self.options.kubernetes_deployment)) return deployments.items[0]
def deployment(self): deployments = self.apps_v1.list_namespaced_deployment( self.options.kubernetes_namespace, label_selector="app={}".format(self.options.label_selector)) logger.debug("loading deployment: {} from namespace: {}".format( deployments.items[0].metadata.name, self.options.kubernetes_namespace)) return deployments.items[0]
def update_deployment(self, deployment): # Update the deployment api_response = self.extensions_v1_beta1.patch_namespaced_deployment( name=deployment.metadata.name, namespace=self.options.kubernetes_namespace, body=deployment, ) logger.debug("Deployment updated. status='%s'" % str(api_response.status))
def scale_down(self, deployment): if deployment.spec.replicas > self.options.min_pods: deployment.spec.replicas -= 1 logger.info("Scaling down to %d" % deployment.spec.replicas) self.update_deployment(deployment) elif deployment.spec.replicas < self.options.min_pods: self.scale_up(deployment) else: logger.debug("Min pods reached")
def run(self): options = self.options start_http_server(self.options.prometheus_port) logger.debug("Starting poll for {} every {}s".format(options.sqs_queue_url, options.poll_period)) logger.info("Started metrics exporter at port 9095") while True: self.poll() self.update_metrics() sleep(self.options.poll_period)
def update_deployment(self, deployment): # Update the deployment try: api_response = self.extensions_v1_beta1.patch_namespaced_deployment( name=self.options.kubernetes_deployment, namespace=self.options.kubernetes_namespace, body=deployment) except: sys.exit(1) logger.debug("Deployment updated. status='%s'" % str(api_response.status))
def deployment(self): if self._deployment: return self._deployment logger.debug("loading deployment: {} from namespace: {}".format( self.options.kubernetes_deployment, self.options.kubernetes_namespace)) deployments = self.apps_v1.list_namespaced_deployment( self.options.kubernetes_namespace, label_selector="app={}".format(self.options.kubernetes_deployment)) self._deployment = deployments.items[0] return self._deployment
def poll(self): message_count, invisible_message_count = self.message_counts() deployment = self.deployment() logger.debug( "Current message counts: %d visible / %d invisible. %d replicas." % (message_count, invisible_message_count, deployment.spec.replicas)) t = time() if message_count >= self.options.scale_up_messages: if t - self.last_scale_up_time > self.options.scale_up_cool_down: self.scale_up(deployment) self.last_scale_up_time = t else: logger.debug("Waiting for scale up cooldown") if message_count <= self.options.scale_down_messages: # special case - do not scale to zero unless there are no invisible messages if invisible_message_count > 0 and deployment.spec.replicas <= invisible_message_count: logger.debug( "Not scaling down because messages are still in-flight") elif t - self.last_scale_down_time > self.options.scale_down_cool_down: self.scale_down(deployment) self.last_scale_down_time = t else: if deployment.spec.replicas > self.options.min_pods: logger.debug("Waiting for scale down cooldown") # code for scale to use msg_count sleep(self.options.poll_period)
def update_metrics(self): """ Updates the values of the prometheus metrics with the values of the attributes from each SQS queue """ queues = self.options.queues_to_monitor.split(",") for queue_name in queues: q = self.sqs_resource.get_queue_by_name(QueueName=queue_name) logger.debug(f"Updating metrics for queue [{queue_name}]") self.approximate_number_of_messages.labels(queue=queue_name).set( q.attributes.get('ApproximateNumberOfMessages')) self.approximate_number_of_messages_not_visible.labels(queue=queue_name).set( q.attributes.get('ApproximateNumberOfMessagesNotVisible')) self.approximate_number_of_messages_delayed.labels(queue=queue_name).set( q.attributes.get('ApproximateNumberOfMessagesDelayed'))
def send_chat_msg(): spider = Spider() spider.login() chat_obj = Chat().get() logger.debug('从%s开始' % chat_obj.chatting_id) people = People.select().where(People.id > chat_obj.chatting_id) send_count = 0 for person in people: result = spider.chat(person.uid, CHAT_MESSAGE) if result: send_count += 1 logger.debug('第%s消息,发送给’%s‘成功' % (send_count, person.user_name)) chat_obj.chatting_id = person.uid chat_obj.save() time.sleep(PERCHAT_INTEVAL)
def poll(self): message_count = self.message_count() t = time() if message_count >= self.options.scale_up_messages: if t - self.last_scale_up_time > self.options.scale_up_cool_down: self.scale_up() self.last_scale_up_time = t else: logger.debug("Waiting for scale up cooldown") if message_count <= self.options.scale_down_messages: if t - self.last_scale_down_time > self.options.scale_down_cool_down: self.scale_down() self.last_scale_down_time = t else: logger.debug("Waiting for scale down cooldown")
def deployments(self): logger.debug( "loading deployments: %s from namespace: %s", self.options.kubernetes_deployment or self.options.kubernetes_deployment_selector, self.options.kubernetes_namespace, ) if self.options.kubernetes_deployment_selector: selector = self.options.kubernetes_deployment_selector else: selector = "app={}".format(self.options.kubernetes_deployment) logger.debug("Selector is %s", selector) deployments = self.extensions_v1_beta1.list_namespaced_deployment( self.options.kubernetes_namespace, label_selector=selector) return deployments.items
def crawl_people_info(): spider = Spider() spider.visit_index() star_people = spider.get_people() for item in star_people: logger.debug(u'开始抓取’%s‘的粉丝' % item[1]) uid = item[0] if not if_int(uid): uid = spider.get_people_id(uid) if not uid: continue followers = spider.get_followers(uid) print followers followers = [{'uid': _[0], 'user_name': _[1]} for _ in followers] print followers with database.atomic(): for idx in range(0, len(followers), 100): People.insert_many(followers[idx:idx + 100]).execute() logger.debug('总共抓取了%s个粉丝' % len(followers)) time.sleep(PER_STAR_FOLLOWER_INTEVAL)
def chat(self, uid, msg): sequenceId = self.get_chat_sequence_id(uid) if not sequenceId: return False data = { 'plain': msg, 'to_group': False, 'toId': uid, 'sequenceId': sequenceId + 1 } params = {'user_id': self.uid} cookies = self.load_cookies() respond = self.session.post(CHAT_URL, headers=CHAT_HEADER, cookies=cookies, params=params, data=json.dumps(data)) if respond.status_code == 200: result = respond.json() error = result.get('error') if error: print '发送消息出错了' logger.debug(respond.content) raise ValueError(error.encode('utf8')) return True logger.debug(respond.status_code) logger.debug(respond.content) return False
def poll(self): message_count, invisible_message_count = self.message_counts() t = time() for deployment in self.deployments(): name = deployment.metadata.name logger.info("Checking deployment %s", name) if message_count >= self.options.scale_up_messages: if t - self.last_scale_up_time[ name] > self.options.scale_up_cool_down: self.scale_up(deployment) self.last_scale_up_time[name] = t else: logger.debug("Waiting for scale up cooldown") if message_count <= self.options.scale_down_messages: # special case - do not scale to zero unless there are no invisible messages if (invisible_message_count > 0 and deployment.spec.replicas <= invisible_message_count): logger.debug( "Not scaling down because messages are still in-flight" ) elif t - self.last_scale_down_time[ name] > self.options.scale_down_cool_down: self.scale_down(deployment) self.last_scale_down_time[name] = t else: if deployment.spec.replicas > self.options.min_pods: logger.debug("Waiting for scale down cooldown") # code for scale to use msg_count sleep(self.options.poll_period)
def login(self): url = urljoin(BASE_URL, LOGIN_URL) if self.check_login(): logger.info('已经登录') return data = { 'areacode': 86, 'remember_me': 'on', 'username': self.user_name, 'password': self.get_hash(self.password), } if if_int(self.user_name): data['telephone'] = data.pop('username') response = self.session.post(url, headers=BASE_HEADER, data=data) logger.debug(response.content) if self.check_login(): logger.info('登录成功') self.get_people_id('8276760920') self.save_cookies() return raise ValueError('登录失败')
def poll(self): """ Main polling function Scaling should work in the following way: a) If zero, assume it's intentional and do nothing b) Average messages per pod is used to decide scaling c) Scaling up/down should occur based on their own cooldowns d) Sleep at the end of every loop """ if self.replicas == 0: # We don't want to try to scale up if intentionally # scaled to zero sleep(self.options.poll_period) return message_count = self.message_count() average_messages = int(message_count / self.replicas) t = time() if average_messages >= self.options.scale_up_messages: if t - self.last_scale_up_time > self.options.scale_up_cool_down: pods = int(ceil(message_count / self.options.scale_up_messages)) self.scale_up(pods) self.last_scale_up_time = t else: logger.debug("Waiting for scale up cooldown") if average_messages <= self.options.scale_down_messages: if t - self.last_scale_down_time > self.options.scale_down_cool_down: pods = int( ceil(message_count / self.options.scale_down_messages)) self.scale_down(pods) self.last_scale_down_time = t else: logger.debug("Waiting for scale down cooldown") # code for scale to use msg_count sleep(self.options.poll_period)
def run(self): options = self.options logger.debug("Starting poll for %s every %d seconds", options.sqs_queue_url, options.poll_period) while True: self.poll()