def exception_cb(exc_info, private_data): logger.error("Get Some Exception:") #traceback.print_exception(*exc_info) print_traceback(logger) reader_event = private_data reader_event.msg_finish_ack() logger.error("Raw is:\n%s" % reader_event.raw)
def handle_query(params, start_response): print >> sys.stderr, params if params['lang'] and params['book']: try: ret_code = '200 OK' result = hocr.get_hocr(params['lang'], params['book']) except: utils.print_traceback() ret_code = '500 Internal Server Error' result = { 'error' : 1, 'text' : ret_code } else: ret_code = '400 Bad Request' result = { 'error' : 1, 'text' : ret_code } try: text = json.dumps(result) except UnicodeDecodeError: print >> sys.stderr, result ret_code = '400 Bad Request' text = json.dumps({ 'error' : 1, 'text' : ret_code }) start_response(ret_code, [('Content-Type', 'application/json' + '; charset=UTF-8'), ('Content-Length', len(text)), ('Access-Control-Allow-Origin', '*')]) return [ text ]
def handle_query(params, start_response): print >> sys.stderr, params if params['lang'] and params['book']: try: ret_code = '200 OK' result = hocr.get_hocr(params['lang'], params['book']) except: utils.print_traceback() ret_code = '500 Internal Server Error' result = {'error': 1, 'text': ret_code} else: ret_code = '400 Bad Request' result = {'error': 1, 'text': ret_code} try: text = json.dumps(result) except UnicodeDecodeError: print >> sys.stderr, result ret_code = '400 Bad Request' text = json.dumps({'error': 1, 'text': ret_code}) start_response(ret_code, [('Content-Type', 'application/json' + '; charset=UTF-8'), ('Content-Length', len(text)), ('Access-Control-Allow-Origin', '*')]) return [text]
def extract_image(opt, page_nr, filename): try: width, height = image_size(page_nr, filename) subsample = 1 while (width*height) / subsample > (1 << 20) * 50: subsample += 1 subsample = min(subsample, 12) except Exception: utils.print_traceback("Unable to get image size, subsample=1", filename) subsample = 1 if subsample != 1: print "subsample", subsample tiff_name = opt.out_dir + 'page_%04d.tif' % page_nr ddjvu = djvulibre_path + 'ddjvu' ls = subprocess.Popen([ ddjvu, "-format=tiff", "-page=%d" % page_nr, "-subsample=%d" % subsample, filename, tiff_name], stdout=subprocess.PIPE, preexec_fn=setrlimits, close_fds = True) text = utils.safe_read(ls.stdout) if text: print text ls.wait() if ls.returncode != 0: print >> sys.stderr, "extract_image fail: ", ls.returncode, filename, page_nr return None return tiff_name
def input(self, event): if None == self.processer: raise PoolNotReady try: self.processer.proc(event) except: print_traceback(logger)
async def previous_track(query: types.CallbackQuery): await query_answer(query) token = await spotify_api.get_token(query.from_user.id) if not token: return SendMessage(query.from_user.id, 'Please authorize', reply_markup=auth_keyboard(query.from_user.id)) await request_post('https://api.spotify.com/v1/me/player/previous', headers={'Authorization': f'Bearer {token}'}) await asyncio.sleep(.7) req = await request_get( 'https://api.spotify.com/v1/me/player/currently-playing', headers={'Authorization': f'Bearer {token}'}) try: json = await req.json() track = AttrDict(json['item']) except Exception as e: print_traceback(e) return SendMessage(query.message.chat.id, f'Play something in Spotify and try again', reply_to_message_id=query.message.message_id) await bot.edit_message_text( text='Currently playing track:\n' + f'{track.artists[0].name} - {track.name}' f'<a href="{track.album.images[0].url}">​</a>', chat_id=query.message.chat.id, message_id=query.message.message_id, reply_markup=current_track_keyboard(track), parse_mode='HTML')
def run(self): for logger in self.loggers: logger.initialize() while True: try: data = self.logger_queue.get(True, 1) except Queue.Empty: if self.stop_event.is_set(): break continue else: ts = data["timestamp"] name = data["name"] level = data["level"] body = data["body"] for logger in self.loggers: try: if not logger.enable: continue if __log_level__[level] < __log_level__[logger.level]: continue logger.log(ts, name, level, body) g_local_logger.log("debug", "send log `%s` to remote logger %s" % (data, logger.name)) except: print_traceback(g_local_logger) for logger in self.loggers: logger.finish()
def parse(opt, filename): try: ret_code = do_parse(opt, filename) except Exception: utils.print_traceback(filename) ret_code = -1 return ret_code
async def userbot_unban_user(chat_id: int, user_id: int) -> bool: try: await client.edit_permissions(await client.get_input_entity(chat_id), await get_input_entity(user_id, chat_id), until_date=0) return True except Exception: print_traceback(debug=DEBUG) return False
async def userbot_delete_message(chat_id: int, message_id: int) -> bool: try: await client.delete_messages(await client.get_input_entity(chat_id), [ message_id, ], revoke=True) return True except Exception: print_traceback(debug=DEBUG) return False
def do_file(job_queue, opt, filename): while True: page_nr = job_queue.get() if page_nr == None: print "Stopping thread" return try: do_one_page(opt, page_nr, filename) except Exception: utils.print_traceback(filename)
def msg_finish_ack(self, private_data): finish_ack_cb = private_data['finish_ack_cb'] private_data = private_data['private_data'] try: if finish_ack_cb: finish_ack_cb(private_data) except: print_traceback() self._inc_finished_msg_count()
def run(self): try: worker = self._cls(self._args) except: print_traceback(logger) exit() _type = worker._type if callable(getattr(worker, 'initialize', None)): worker.initialize() self._start_success.set() while True: try: request = self._requests_queue.get(True, self._poll_timeout) self._inc_total_msg_count() except Queue.Empty: if self._dismissed.isSet(): self_unfinished = self.get_total_msg_count( ) - self.get_finished_msg_count() if self_unfinished == 0: break continue else: if "_sync" == _type: try: result = worker.proc(*request.args, **request.kwds) self._inc_finished_msg_count() try: request.callback(request.private_data, *result) except: print_traceback(logger) except: self._inc_finished_msg_count() request.exc_callback(request.private_data, sys.exc_info()) elif "_async" == _type: try: async_context = AsyncWorkerContext( request, self.async_callback_hook, self.async_exc_callback_hook) worker.proc(async_context, *request.args, **request.kwds) self._inc_finished_msg_count() except: self._inc_finished_msg_count() request.exc_callback(request.private_data, sys.exc_info()) if callable(getattr(worker, 'finish', None)): worker.finish()
def msg_recv_callback(self, raw, finish_ack_cb, private_data): try: self._inc_total_msg_count() private_data = { "finish_ack_cb": finish_ack_cb, "private_data": private_data } event = ReaderEvent(raw, self.msg_finish_ack, private_data) self._read_callback(raw, event) except Exception, e: print_traceback()
def do_garbage_collection(context: CallbackContext) -> None: u_freed: int = 0 m_freed: int = 0 u_checked: int = 0 m_checked: int = 0 all_chat_data = updater.dispatcher.chat_data logger.debug('gc: check for abandoned keys') for chat_id in all_chat_data: for key in [k for k in all_chat_data[chat_id]]: if key in ('my_msg', 'rest_users'): d = all_chat_data[chat_id].pop(key, None) logger.warning( f'Update pickle: Removed {{{key}: {d}}} for {chat_id}') logger.debug('gc: reinit old versions of u_mgr') for chat_id in all_chat_data: u_mgr: UserManager = all_chat_data[chat_id].get('u_mgr', None) if u_mgr and u_mgr._cver != u_mgr.ver: all_chat_data[chat_id].pop('u_mgr', None) logger.warning( f'Update u_mgr: reinit {u_mgr._cver} to {u_mgr.ver} for {chat_id}' ) del u_mgr logger.debug('gc: check for outdated rest_users and sto_msgs') for chat_id in all_chat_data: u_mgr: UserManager = all_chat_data[chat_id].get('u_mgr', None) if u_mgr: for _ulist in (u_mgr._nfusers, u_mgr._fldusers): for k in (_culist := _ulist.copy()): u_checked += 1 _u = _culist.get(k, None) t = _u.time if _u else 0 if int(time()) - t > 7200: _ulist.pop(k, None) u_freed += 1 sto_msgs: list = all_chat_data[chat_id].get('stored_messages', None) if type(sto_msgs) is list: to_rm = list() try: for item in sto_msgs: m_checked += 1 if len(item) == 3: stime = item[2] if int(time()) - stime > 7200: to_rm.append(item) except Exception: print_traceback(debug=DEBUG) for item in to_rm: m_freed += 1 try: sto_msgs.remove(item) except Exception: print_traceback(debug=DEBUG)
def kick_user(context: CallbackContext, chat_id: int, kick_id: int, reason: str = '') -> bool: bot: Bot = context.bot try: if bot.kick_chat_member(chat_id=chat_id, user_id=kick_id, until_date=datetime.utcnow()+timedelta(days=367)): logger.info(f"Kicked {kick_id} in the group {chat_id}{', reason: ' if reason else ''}{reason}") else: raise TelegramError('kick_chat_member returned bad status') except TelegramError as err: logger.error(f"Cannot kick {kick_id} in the group {chat_id}, {err}") except Exception: print_traceback(DEBUG) else: return True return False
def delete_message(context: CallbackContext, chat_id: int, message_id: int) -> bool: try: if context.bot.delete_message(chat_id=chat_id, message_id=message_id): logger.debug(f"Deleted message {message_id} in the group {chat_id}") else: raise TelegramError('delete_message returned bad status') except NetworkError: raise except TelegramError as err: logger.error(f"Cannot delete message {message_id} in the group {chat_id}, {err}") except Exception: print_traceback(DEBUG) else: return True return False
def handle_suggest_query(params, start_response): if params['lang'] and params['title']: try: modernize = modernization.Modernization(params['lang']) result = modernize.suggest_dict(params['title']) ret_code = '200 OK' except: utils.print_traceback() ret_code = '500 Internal Server Error' result = { 'error' : 1, 'text' : ret_code } else: ret_code = '400 Bad Request' result = { 'error' : 1, 'text' : ret_code } return return_response(start_response, result, True, ret_code, 'application/json')
def handle_suggest_query(params, start_response): if params['lang'] and params['title']: try: modernize = modernization.Modernization(params['lang']) result = modernize.suggest_dict(params['title']) ret_code = '200 OK' except: utils.print_traceback() ret_code = '500 Internal Server Error' result = {'error': 1, 'text': ret_code} else: ret_code = '400 Bad Request' result = {'error': 1, 'text': ret_code} return return_response(start_response, result, True, ret_code, 'application/json')
def safe_put(page, text, comment): if re.match("^[\s\n]*$", text): return max_retry = 5 retry_count = 0 while retry_count < max_retry: retry_count += 1 try: page.put(text, comment = comment) break except pywikibot.LockedPage: print >> sys.stderr, "put error : Page %s is locked?!" % page.title(asUrl=True).encode("utf8") utils.print_traceback() break except pywikibot.NoPage: print >> sys.stderr, "put error : Page does not exist %s" % page.title(asUrl=True).encode("utf8") utils.print_traceback() break except pywikibot.NoUsername: print >> sys.stderr, "put error : No user name on wiki %s" % page.title(asUrl=True).encode("utf8") utils.print_traceback() break except pywikibot.PageNotSaved: print >> sys.stderr, "put error : Page not saved %s" % page.title(asUrl=True).encode("utf8") print >> sys.stderr, "text len: ", len(text) utils.print_traceback() print >> sys.stderr, "sleeping for:", 10 * retry_count time.sleep(10 * retry_count) continue except pywikibot.OtherPageSaveError: # this can occur for read-only DB because slave lag, so retry # a few time print >> sys.stderr, "put error : Page not saved %s" % page.title(asUrl=True).encode("utf8") print >> sys.stderr, "retrying in", retry_count, "minute(s)" time.sleep(retry_count * 60) continue except: print >> sys.stderr, "put error: unknown exception" utils.print_traceback() time.sleep(10) break if retry_count >= max_retry: print >> sys.stderr, "unable to save page after", max_retry, "try, bailing out" pass
def unban_user(context: CallbackContext, chat_id: int, user_id: int, reason: str = '') -> bool: try: if context.bot.restrict_chat_member(chat_id=chat_id, user_id=user_id, permissions = CHAT_PERMISSION_RW, until_date=datetime.utcnow()+timedelta(days=367)): logger.info(f"Unbanned {user_id} in the group {chat_id}{', reason: ' if reason else ''}{reason}") else: raise TelegramError('restrict_chat_member returned bad status') except NetworkError: raise except TelegramError as err: logger.error(f"Cannot unban {user_id} in the group {chat_id}, {err}") except Exception: print_traceback(DEBUG) else: return True return False
def handle_blacklist_query(params, start_response): if params['lang'] and params['blacklist']: try: modernize = modernization.Modernization(params['lang']) blacklist = json.loads(params['blacklist']) modernize.save_blacklist(blacklist) ret_code = '200 OK' result = { 'error' : 0, 'text' :'OK' } except: utils.print_traceback() ret_code = '500 Internal Server Error' result = { 'error' : 1, 'text' : ret_code } else: ret_code = '400 Bad Request' result = { 'error' : 1, 'text' : ret_code } return return_response(start_response, result, True, ret_code, 'application/json')
def add_flac_tags(fileobj, tags, image, lyrics=None, image_mimetype='image/jpg'): handle = FLAC(fileobj) pic = Picture() pic.data = image pic.type = 3 pic.mime = image_mimetype handle.add_picture(pic) for key, val in tags.items(): try: handle[key] = str(val) except Exception as e: print_traceback(e) handle.save(fileobj) fileobj.seek(0)
def handle_blacklist_query(params, start_response): if params['lang'] and params['blacklist']: try: modernize = modernization.Modernization(params['lang']) blacklist = json.loads(params['blacklist']) modernize.save_blacklist(blacklist) ret_code = '200 OK' result = {'error': 0, 'text': 'OK'} except: utils.print_traceback() ret_code = '500 Internal Server Error' result = {'error': 1, 'text': ret_code} else: ret_code = '400 Bad Request' result = {'error': 1, 'text': ret_code} return return_response(start_response, result, True, ret_code, 'application/json')
def handle_scan_query(params, start_response): text = common_html.get_head( 'pages without scan', css='shared.css').encode('utf-8') + '\n <body>\n' if params['lang']: try: offset = int(params.get('offset', 0)) limit = min(500, int(params.get('limit', 500))) lang = params['lang'] conn = db.create_conn(domain=lang, family='wikisource') cursor = db.use_db(conn, domain=lang, family='wikisource') ns = ws_category.domain_urls[lang][0] page_ids = disamb_page(cursor) | page_with_scan(ns, cursor) all_p = all_pages(cursor) result = [(unicode(x[0], 'utf-8'), x[1]) for x in all_p if x[2] not in page_ids] text += 'Total: ' + str(len(result)) + '<br />' next_link = prev_next_link(False, len(result), lang, limit, offset) prev_link = prev_next_link(True, len(result), lang, limit, offset) text += prev_link + ' ' + next_link + '<br /><br />' result = result[offset:offset + limit] for x in result: text += u'<a href="//%s.wikisource.org/wiki/%s">' % ( lang, x[0]) + x[0].replace('_', ' ') + u'</a>, ' + str( x[1]) + u'<br />' text += u'<br />' + prev_link + ' ' + next_link cursor.close() conn.close() ret_code = '200 OK' except: utils.print_traceback() ret_code = '500 Internal Server Error' text = '<h1>' + ret_code + '</h1>' else: ret_code = '400 Bad Request' text = '<h1>' + ret_code + '</h1>' text += ' </body>\n</html>' return return_response(start_response, text.encode('utf-8'), False, ret_code, 'text/html')
def handle_scan_query(params, start_response): text = common_html.get_head("pages without scan", css="shared.css").encode("utf-8") + "\n <body>\n" if params["lang"]: try: offset = int(params.get("offset", 0)) limit = min(500, int(params.get("limit", 500))) lang = params["lang"] conn = db.create_conn(domain=lang, family="wikisource") cursor = db.use_db(conn, domain=lang, family="wikisource") ns = ws_category.domain_urls[lang][0] page_ids = disamb_page(cursor) | page_with_scan(ns, cursor) all_p = all_pages(cursor) result = [(unicode(x[0], "utf-8"), x[1]) for x in all_p if x[2] not in page_ids] text += "Total: " + str(len(result)) + "<br />" next_link = prev_next_link(False, len(result), lang, limit, offset) prev_link = prev_next_link(True, len(result), lang, limit, offset) text += prev_link + " " + next_link + "<br /><br />" result = result[offset : offset + limit] for x in result: text += ( u'<a href="//%s.wikisource.org/wiki/%s">' % (lang, x[0]) + x[0].replace("_", " ") + u"</a>, " + str(x[1]) + u"<br />" ) text += u"<br />" + prev_link + " " + next_link cursor.close() conn.close() ret_code = "200 OK" except: utils.print_traceback() ret_code = "500 Internal Server Error" text = "<h1>" + ret_code + "</h1>" else: ret_code = "400 Bad Request" text = "<h1>" + ret_code + "</h1>" text += " </body>\n</html>" return return_response(start_response, text.encode("utf-8"), False, ret_code, "text/html")
async def now_playing(message: types.Message): if message.from_user.id not in admins \ and message.from_user.id not in donated_users: trial_mode = True times = int(await trial_mode_times(message.from_user.id, 'spotify')) if times > 10: return SendMessage( message.chat.id, 'This feature works only for donated users\n' 'please /donate and help developer') else: trial_mode = False token = await get_token(message.from_user.id) if not token: return SendMessage(message.chat.id, 'Please authorize', reply_markup=auth_keyboard(message.from_user.id)) req = await request_get( 'https://api.spotify.com/v1/me/player/currently-playing', headers={'Authorization': f'Bearer {token}'}) try: json = await req.json() track = AttrDict(json['item']) except Exception as e: print_traceback(e) return SendMessage(message.chat.id, f'Play something in Spotify and try again', reply_to_message_id=message.message_id) if trial_mode: await bot.send_message( message.chat.id, f'You can use this feature {10-times} more times' 'to enable this and many other features ' 'permanently, please /donate') return SendMessage(message.chat.id, 'Currently playing track:\n' + f'{track.artists[0].name} - {track.name}' f'<a href="{track.album.images[0].url}">​</a>', reply_markup=current_track_keyboard(track), parse_mode='HTML', reply_to_message_id=message.message_id)
def thread_run(self, processer, dismissed, start_success): try: processer.initialize() except: print_traceback(logger) start_success.set() while True: try: event = self._event_queue.get(True, self._poll_timeout) except Queue.Empty: if dismissed.is_set(): break continue else: try: processer.proc(event) except: print_traceback(logger) processer.finish()
async def userbot_restrict_user(chat_id: int, user_id: int) -> bool: try: await client.edit_permissions(await client.get_input_entity(chat_id), await get_input_entity(user_id, chat_id), until_date=0, view_messages=True, send_messages=False, send_media=False, send_stickers=False, send_gifs=False, send_games=False, send_inline=False, send_polls=False, change_info=False, invite_users=False, pin_messages=False) return True except Exception: print_traceback(debug=DEBUG) return False
def get_djvu(cache, mysite, djvuname, check_timestamp = False): print "get_djvu", repr(djvuname) djvuname = djvuname.replace(" ", "_") cache_filename = djvuname + '.dat' obj = cache.get(cache_filename) if not obj: print "CACHE MISS" filepage = get_filepage(mysite, djvuname) if not filepage: # can occur if File: has been deleted return None try: url = filepage.fileUrl() obj = extract_djvu_text(url, djvuname, filepage.getFileSHA1Sum()) except: utils.print_traceback("extract_djvu_text() fail") obj = None if obj: cache.set(cache_filename, obj) else: return None else: if check_timestamp: filepage = get_filepage(mysite, djvuname) if not filepage: # can occur if File: has been deleted return None sha1 = filepage.getFileSHA1Sum() if sha1 != obj[0]: print "OUTDATED FILE" url = filepage.fileUrl() try: obj = extract_djvu_text(url, djvuname, sha1) cache.set(cache_filename, obj) except: return None return obj[1]
def exec_request(self, r): sge_job_nr = 0 cmdline_arg = job_cmdline_arg(r, 'job_run_cmd') sge_cmdline = sge_cmdline_arg(r) ls = subprocess.Popen(sge_cmdline + cmdline_arg, stdin=None, stdout=subprocess.PIPE, close_fds = True) text = ls.stdout.read() ls.wait() try: sge_job_nr = int(re.search('Your job (\d+) ', text).group(1)) new_state = 'running' except: utils.print_traceback("sge failure to exec job: %d" % r['job_id'], text) new_state = 'sge_fail' with db.connection(self): q = 'UPDATE job SET job_state=%s, sge_jobnumber=%s WHERE job_id=%s' self.cursor.execute(q, [ new_state, sge_job_nr, r['job_id'] ])
async def authorize(code=None, user_id=None, refresh=False): if code and user_id: data = {} if refresh: grant_type = 'refresh_token' data['refresh_token'] = code else: grant_type = 'authorization_code' data['code'] = code data.update({ 'grant_type': grant_type, 'redirect_uri': REDIRECT_URL, 'state': user_id}) else: data = {'grant_type': 'client_credentials'} try: req = await request_post( 'https://accounts.spotify.com/api/token', data=data, headers={ **AUTH_HEADER, 'Content-Type': 'application/x-www-form-urlencoded'}) except ValueError as exc: print_traceback(exc) return False resp = await req.json() if code and user_id: access_token = resp.get('access_token') refresh_token = resp.get('refresh_token') await set_spotify_token( user_id, access_token, refresh_token) return access_token else: var.spotify_token = resp.get('access_token') var.spotify_token_expires = int(resp.get('expires_in')) + time() return var.spotify_token
def safe_put(page, text, comment): if re.match("^[\s\n]*$", text): return max_retry = 3 retry_count = 0 while retry_count < max_retry: try: page.put(text, comment=comment) break except pywikibot.LockedPage: print >>sys.stderr, "put error : Page %s is locked?!" % page.title(asUrl=True).encode("utf8") utils.print_traceback() break except pywikibot.NoPage: print >>sys.stderr, "put error : Page does not exist %s" % page.title(asUrl=True).encode("utf8") utils.print_traceback() break except pywikibot.NoUsername: print >>sys.stderr, "put error : No user name on wiki %s" % page.title(asUrl=True).encode("utf8") utils.print_traceback() break except pywikibot.PageNotSaved: print >>sys.stderr, "put error : Page not saved %s" % page.title(asUrl=True).encode("utf8") print >>sys.stderr, "text len: ", len(text) utils.print_traceback() print >>sys.stderr, "sleeping for:", 10 * (retry_count + 1) time.sleep(10 * (retry_count + 1)) retry_count += 1 continue except: print >>sys.stderr, "put error: unknown exception" utils.print_traceback() time.sleep(10) break if retry_count == max_retry: print >>sys.stderr, "unable to save page after", max_retry, "try, bailing out" pass
def main(): try: username, password, tenant_name, keystone_url = \ utils.get_token_config() glance_url = utils.get_glance_url() if DEBUG: print username, password, tenant_name, keystone_url, glance_url user = keystone.Keystone(username, password, tenant_name, keystone_url) token = user.get_token() # glance_url = 'http://localhost:9292' glance = glanceclient.Client(endpoint=glance_url, token=token) image = glance.images.create(name='__nvs_monitor__', data='a' * 1024, disk_format='qcow2', container_format='ovf', is_public=False) if image.status != 'active': print 'create image error. %s' % image image.delete() try: image.get() except glanceexc.HTTPNotFound: pass except Exception: if DEBUG: utils.print_traceback() result = 'failed' else: result = 'success' utils.print_result(result)
def exec_request(self, r): sge_job_nr = 0 # This is a bit convoluted but we need it to avoid a race condition: # we set the job as running before starting it so on if this script # run twice in parallel we don't try to start the same job twice. Then # when the job really started or fail to start we update its state # again. As we don't know yet the sge job number, we setup it as zero. # Note this could be done in pending_request() but I prefer to protect # it locally. really_pending = False with db.connection(self): q = 'UPDATE job SET job_state=%s, sge_jobnumber=%s WHERE job_id=%s AND job_state="pending"' if self.cursor.execute(q, [ 'running', 0, r['job_id'] ]): really_pending = True if not really_pending: print >> sys.stderr, "run request for job_id %s cancelled, as it's no longer pending" % r['job_id'] return cmdline_arg = job_cmdline_arg(r, 'job_run_cmd') sge_cmdline = sge_cmdline_arg(r) ls = subprocess.Popen(sge_cmdline + cmdline_arg, stdin=None, stdout=subprocess.PIPE, close_fds = True) text = ls.stdout.read() ls.wait() try: sge_job_nr = int(re.search('Your job (\d+) ', text).group(1)) new_state = 'running' except: utils.print_traceback("sge failure to exec job: %d" % r['job_id'], text) new_state = 'sge_fail' # Now we can really update the job state, see comment above. with db.connection(self): q = 'UPDATE job SET job_state=%s, sge_jobnumber=%s WHERE job_id=%s' self.cursor.execute(q, [ new_state, sge_job_nr, r['job_id'] ])
def running_jobs(job_base_name): jobs = set() try: ls = subprocess.Popen([ qstat, '-xml' ], stdout=subprocess.PIPE, close_fds = True) for event, elem in etree.iterparse(ls.stdout): if event == 'end' and elem.tag == 'job_list': job_id = elem.find('JB_job_number').text job_name = elem.find('JB_name').text if job_name.startswith(job_base_name): jobs.add(int(job_id)) elem.clear() ls.wait() if ls.returncode: print >> sys.stderr, 'qstat failed', ls.returncode print >> sys.stderr, 'RECOVER' jobs = set() except: utils.print_traceback() print >> sys.stderr, 'RECOVER' jobs = set() return jobs
#!/usr/bin/env python #-*- encoding: utf-8 -*- import nova_utils import utils try: nova_utils.init_nova() host = nova_utils.get_random_host() utils.log(host) info = nova_utils.rpccall_scheduler('show_host_resources', host=host) assert info utils.log(info) except Exception: utils.print_traceback() result = 'failed' else: result = 'success' utils.print_result(result)