def fetch(self, startTime, endTime): data = whisper.fetch(self.fs_path, startTime, endTime) if not data: return None time_info, values = data (start, end, step) = time_info # Merge in data from carbon's cache try: cached_datapoints = CarbonLink.query(self.real_metric_path) except: #FIXME log.exception("Failed CarbonLink query '%s'" % self.real_metric_path) cached_datapoints = [] for (timestamp, value) in cached_datapoints: interval = timestamp - (timestamp % step) try: i = int(interval - start) / step values[i] = value except: pass return (time_info, values)
def _get_domain_ips_and_macs(domain: libvirt.virDomain) -> Tuple[List[str], List[str]]: interfaces_sources = [ # getting all DHCP leases IPs libvirt.VIR_DOMAIN_INTERFACE_ADDRESSES_SRC_LEASE, # getting static IPs via ARP libvirt.VIR_DOMAIN_INTERFACE_ADDRESSES_SRC_ARP, ] interfaces = {} for addresses_source in interfaces_sources: try: interfaces.update(**domain.interfaceAddresses(addresses_source)) except libvirt.libvirtError: log.exception("Got an error while updating domain's network addresses") ips = [] macs = [] logging.debug(f"Host {domain.name()} interfaces are {interfaces}") if interfaces: for (_, val) in interfaces.items(): if val["addrs"]: for addr in val["addrs"]: ips.append(addr["addr"]) macs.append(val["hwaddr"]) if ips: logging.info("Host %s ips are %s", domain.name(), ips) if macs: logging.info("Host %s macs are %s", domain.name(), macs) return ips, macs
def request(self, request): if self._socket is None: if not self._connect(): return None packet = struct.pack('<I%ds' % len(request), len(request), request) try: self._socket.sendall(packet) length_data = self._recv(4) except Exception as ex: self.close() if not self._retry: log.exception('tcp_request_fail|error=recv_length_fail,address=%s,port=%u,request=%s', self._address, self._port, request.encode('hex')) return None if isinstance(ex, socket.timeout): log.exception('tcp_request_fail|error=recv_length_timeout,address=%s,port=%u,retry=0,request=%s', self._address, self._port, request.encode('hex')) return None log.warn('tcp_request_fail|error=recv_length_fail_will_retry,address=%s,port=%u,retry=0,request=%s,ex=%s', self._address, self._port, request.encode('hex'), ex) if not self._connect(): log.exception('tcp_request_fail|error=retry_reconnect,address=%s,port=%u,retry=0,request=%s', self._address, self._port, request.encode('hex')) return None try: self._socket.sendall(packet) length_data = self._recv(4) except Exception as ex: log.exception('tcp_request_fail|error=retry_recv_length_fail,address=%s,port=%u,retry=1,request=%s', self._address, self._port, request.encode('hex')) self.close() return None try: length = struct.unpack('<I', length_data)[0] return self._recv(length) except Exception as ex: log.exception('tcp_request_fail|error=recv_data_fail,address=%s,port=%u,request=%s', self._address, self._port, request.encode('hex')) self.close() return None
def __init__(self): try: self.connection = cx_Oracle.connect(connection_string) self.cursor = self.connection.cursor() log.info("Connected to Oracle Host: %s" %config.Oracle_Host) except cx_Oracle.DatabaseError, e: log.exception(e)
def eval(self, func, args=None): '''Evaluate an uristscript or other callable object.''' # If the function is actually an urist, make sure we know that uristinstance = None if isinstance(func, uristscript): uristinstance = func func = uristinstance.func name = uristinstance.getname() else: name = func.__name__ # Actually execute the script log.info('Running script %s%s.' % (name, ('with args %s' % args) if args else '')) try: response = func(self.df, **args) if args else func(self.df) # Call the function if response is not None: # Handle success/failure response log.info(str(response)) (self.successes if response.success else self.failures).append(uristinstance if uristinstance else func) else: log.error('Received no response from script %s.' % name) self.noresponse.append(uristinstance if uristinstance else func) except Exception: log.exception('Unhandled exception while running script %s.' % name) return False else: return True
def gTrans(query, destlanguage='en', prompterror=True): log.info( "Using Google translate to determine the unknown translation of %s", query) # No meanings if we don't have a query if query == None: return None # No meanings if our query is blank after stripping HTML out query = utils.striphtml(query) if query.strip() == u"": return None try: # Return the meanings (or lack of them) directly return lookup(query, destlanguage) except urllib2.URLError, e: # The only 'meaning' should be an error telling the user that there was some problem log.exception("Error while trying to obtain Google response") if prompterror: return [[ Word(Text('<span style="color:gray">[Internet Error]</span>')) ]] else: return None
def add_request(cls, request): """ Add website and user in db to process """ try: user = None if not Database.user_exists(email_id=request['email_id']): user_id = get_new_id(request['email_id']) # create user identity object and save to db user = User(id=user_id, email_id=request['email_id']) s.add(user) s.commit() else: user = Database.fetch_user(email_id=request['email_id'], serialize=False) # create website identity object and save to db website_id = get_new_id(request['url']) website = Website(id=website_id, url=request['url']) website.keywords = ','.join(request['keywords']) website.status = Status.PENDING website.user = user s.add(website) s.commit() log.info('Add Request :: {0} :: {1}'.format(website.url, user.email_id)) return WebsiteJsonSerializer().serialize(website) except Exception: log.exception('Error in add request')
def parse_radius_log(radius_log_file, out_dir): log_file = open(radius_log_file, 'r') outfile_name = "RADIUS_MAC2NSP_%s.txt" % (DATE.strftime('%Y%m%d')) output_file = open(os.path.join(out_dir, outfile_name), 'w') existing = set() try: for record in rad_recv_record_extractor(log_file): for converter_func, check_func in CONVERTER_CHAIN: if check_func(record): try: mac, nssp = converter_func(record) except Exception, e: #logging.error("Error passing radius record: " + repr(e)) log.exception('Error parsing radius record') continue mac2nsp = "%s %s" % (mac, nssp) if mac2nsp in existing: log.warn("Duplicate record found: %s, ignoring..." % mac2nsp) else: log.info("Matched: %s %s" % (mac, nssp)) output_file.write("%s %s\n" % (mac, nssp)) existing.add(mac2nsp) break else: log.info("Radius record not matching rule: " + repr(record)) finally: log_file.close() output_file.close()
def run(self): random.seed() self.on_init() from gtcpclient import GTcpClient log.info('tcp_worker_start|id=%d', self._id) self._client = GTcpClient(self._config.WORK_ENDPOINT['address'], self._config.WORK_ENDPOINT['port'], 0) while True: try: request = self._client.receive() if request is None: log.warn('tcp_worker_lost_connection|client_id=%s,client=%s', self._client.id.encode('hex'), self._client.remote_address) self._client.close() elif len(request) < GTCP_HEADER_SIZE: log.error('tcp_worker_request_packet_error|client_id=%s,client=%s,request=%s', self._client.id.encode('hex'), self._client.remote_address, request.encode('hex')) self._client.close() else: request_cmd = request[:GTCP_CMD_SIZE] request_client = TcpEndpoint(request[GTCP_CMD_SIZE:GTCP_HEADER_SIZE]) reply_body = None if request_cmd == GTCP_CMD_RELAY: request_body = request[GTCP_HEADER_SIZE:] reply_body = self.on_packet(request_client, request_body) elif request_cmd == GTCP_CMD_CONNECT: reply_body = self.on_client_connect(request_client) elif request_cmd == GTCP_CMD_DISCONNECT: self.on_client_disconnect(request_client) if reply_body is None: self._client.send(GTCP_CMD_NONE + request_client.client_id) else: self._client.send(GTCP_CMD_RELAY + request_client.client_id + reply_body) except Exception as ex: log.exception('tcp_worker_exception|id=%u,exception=%s', self._id, ex, exc_info=True) self._client.close()
def _sysctl(self, cmd): """execute() doesn't return the exit status of the command it runs, it returns stdout and stderr. Setting check_exit_code=True will cause it to raise a RuntimeError if the exit status of the command is non-zero, which in sysctl's case is an error. So we're normalizing that into zero (success) and one (failure) here to mimic what "echo $?" in a shell would be. This is all because sysctl is too verbose and prints the value you just set on success, unlike most other utilities that print nothing. execute() will have dumped a message to the logs with the actual output on failure, so it's not lost, and we don't need to print it here. """ cmd = ['sysctl', '-w'] + cmd ip_wrapper = IPWrapper(self.namespace) try: ip_wrapper.netns.execute(cmd, run_as_root=True, check_exit_code=True) except RuntimeError: LOG.exception(("Failed running %s"), cmd) return 1 return 0
def get_results(self): if self.failed: return if self.cachedResult is not None: results = self.cachedResult else: if self.connection is None: self.send() try: response = self.connection.getresponse() assert response.status == 200, "received error response %s - %s" % (response.status, response.reason) result_data = response.read() results = unpickle.loads(result_data) except: log.exception("FindRequest.get_results(host=%s, query=%s) exception processing response" % (self.store.host, self.query)) self.store.fail() return cache.set(self.cacheKey, results, settings.FIND_CACHE_DURATION) for node_info in results: if node_info.get('is_leaf'): reader = RemoteReader(self.store, node_info, bulk_query=self.query.pattern) node = LeafNode(node_info['path'], reader) else: node = BranchNode(node_info['path']) node.local = False yield node
def getStatus(): try: f = open(ROOM_STATUS_FILE, 'r') roomInternal = simplejson.loads(f.read()) if determineStatus(roomInternal['lastOpenSignal'], roomInternal['people']): status = { 'roomStatus': 'open', 'since': int(roomInternal["lastStatusSignal"]) } else: since = datetime.fromtimestamp(roomInternal["lastOpenSignal"]) + timedelta(minutes=ROOM_TIMEOUT) sinceInSecs = time.mktime(since.timetuple()) status = { 'roomStatus': 'closed', 'since': int(sinceInSecs) } return status except IOError: log.exception("Could not read %s." % ROOM_STATUS_FILE) message = { 'success': False, 'status': 'Room status record unreadable.' } resp = jsonify(message) resp.status_code = 500 return resp else: f.close()
def submitStatus(people): if not determineStatus(datetime.now(), people): # people limit not reached, but thx anyway return jsonify({ 'success': True }) try: with open(ROOM_STATUS_FILE, 'r') as f: roomInternal = simplejson.loads(f.read()) with open(ROOM_STATUS_FILE, 'w') as f: if determineStatus(roomInternal['lastOpenSignal'], roomInternal['people']) != \ determineStatus(datetime.now(), people): newStatus = { 'lastOpenSignal': time.time(), 'lastStatusSignal': time.time(), 'people': people } f.write(simplejson.dumps(newStatus)) else: newStatus = { 'lastOpenSignal': time.time(), 'lastStatusSignal': roomInternal['lastStatusSignal'], 'people': people } f.write(simplejson.dumps(newStatus)) return jsonify({ 'success': True }) except IOError: log.exception("Could not read/write %s." % ROOM_STATUS_FILE) message = { 'success': False, 'status': 'Room status record unwriteable/unreadable.' } resp = jsonify(message) resp.status_code = 500 return resp
def send(self): log.info("FindRequest.send(host=%s, query=%s) called" % (self.store.host, self.query)) self.cachedResult = cache.get(self.cacheKey) if self.cachedResult is not None: log.info("FindRequest(host=%s, query=%s) using cached result" % (self.store.host, self.query)) return self.connection = HTTPConnectionWithTimeout(self.store.host) self.connection.timeout = settings.REMOTE_FIND_TIMEOUT query_params = [ ('local', '1'), ('format', 'pickle'), ('query', self.query.pattern), ] if self.query.startTime: query_params.append( ('from', self.query.startTime) ) if self.query.endTime: query_params.append( ('until', self.query.endTime) ) query_string = urlencode(query_params) try: self.connection.request('GET', '/metrics/find/?' + query_string) except: log.exception("FindRequest.send(host=%s, query=%s) exception during request" % (self.store.host, self.query)) self.store.fail() self.failed = True
def make_grequest(urls, content=False, size=5): """ Return the dict of (url,status_code, content_type Or content) of each list of url in urls """ try: reqs = set() ret = dict() if content: reqs = (grequests.get(url) for url in urls) else: reqs = (grequests.head(url) for url in urls) res = grequests.map(reqs, stream=False, size=size) for url, r in zip(urls, res): log.info('Made Request %s :: %d ' % (url, r.status_code)) if content: ret[url] = { 'status_code': r.status_code, 'content': r.text } else: ret[url] = { 'status_code': r.status_code } if ret: return ret raise Exception except Exception as e: log.exception('Error in make_grequest')
def delete_conntrack_state(self, namespace, ip): """Delete conntrack state associated with an IP address. This terminates any active connections through an IP. Call this soon after removing the IP address from an interface so that new connections cannot be created before the IP address is gone. namespace: the name of the namespace where the IP has been configured ip: the IP address for which state should be removed. This can be passed as a string with or without /NN. A netaddr.IPAddress or netaddr.Network representing the IP address can also be passed. """ ip_str = str(netaddr.IPNetwork(ip).ip) ip_wrapper = ip_lib.IPWrapper(namespace=namespace) # Delete conntrack state for ingress traffic # If 0 flow entries have been deleted # conntrack -D will return 1 try: ip_wrapper.netns.execute(["conntrack", "-D", "-d", ip_str], check_exit_code=True, extra_ok_codes=[1]) except RuntimeError: LOG.exception(("Failed deleting ingress connection state of" " floatingip %s"), ip_str) # Delete conntrack state for egress traffic try: ip_wrapper.netns.execute(["conntrack", "-D", "-q", ip_str], check_exit_code=True, extra_ok_codes=[1]) except RuntimeError: LOG.exception(("Failed deleting egress connection state of" " floatingip %s"), ip_str)
def wait_for_results(): if wait_lock.acquire(False): # the FetchInProgress that gets waited on waits for the actual completion try: response = connection.getresponse() if response.status != 200: raise Exception("Error response %d %s from %s" % (response.status, response.reason, url)) pickled_response = response.read() results = unpickle.loads(pickled_response) self.cache_lock.acquire() self.request_cache[url] = results self.cache_lock.release() completion_event.set() return results except: completion_event.set() self.store.fail() log.exception("Error requesting %s" % url) raise else: # otherwise we just wait on the completion_event completion_event.wait(settings.REMOTE_FETCH_TIMEOUT) cached_results = self.request_cache.get(url) if cached_results is None: raise Exception("Passive remote fetch failed to find cached results") else: return cached_results
def make_grequest(urls, content=False, size=5): """ Return the dict of (url,status_code, content_type Or content) of each list of url in urls """ try: reqs = set() ret = dict() if content: reqs = (grequests.get(url) for url in urls) else: reqs = (grequests.head(url) for url in urls) res = grequests.map(reqs, stream=False, size=size) for url, r in zip(urls, res): log.info('Made Request %s :: %d ' % (url, r.status_code)) if content: ret[url] = {'status_code': r.status_code, 'content': r.text} else: ret[url] = {'status_code': r.status_code} if ret: return ret raise Exception except Exception as e: log.exception('Error in make_grequest')
def add_request(cls, request): """ Add website and user in db to process """ try: user = None if not Database.user_exists(email_id=request['email_id']): log.debug('User Not exists') user_id = get_new_id(request['email_id']) # create user identity object and save to db user = User(id=user_id, email_id=request['email_id']) s.add(user) s.commit() else: log.debug('User exists') user = Database.fetch_user(email_id=request['email_id'], serialize=False) # create website identity object and save to db website_id = get_new_id(request['url']) website = Website(id=website_id, url=request['url']) website.keywords = ','.join(request['keywords']) website.status = Status.PENDING website.user = user s.add(website) s.commit() log.info('Add Request :: {0} :: {1}'.format( website.url, user.email_id)) return WebsiteJsonSerializer().serialize(website) except Exception: log.exception('Error in add request')
def _forward_client_answer(self, msg, addr): try: # ?todo?: should copy call_ctx_data from the other party? buf = msg.serialize() yield CommMessage(addr, ClientAnswer,buf) except: log.exception('exception')
def submit(self, flags): """ this function will submit the flags to the scoreboard""" status = [] try: with self.remote("flags.e.ructf.org", 31337) as r: r.read() for flag in flags: r.send(flag + "\n") output = r.recv() if "Accepted" in output: s = STATUS["accepted"] elif "Old" in output: s = STATUS["old"] else: s = STATUS["rejected"] status.append(s) except Exception as e: log.exception(e) return status
def login_handler(request): def verify_login(username, password): try: dbuser = users.get(unicode(username)) '''match supplied credentials with the database''' if dbuser and str(dbuser.password) == str(password): log.info('login succseed') return dbuser else: log.info('login failed') return None except: log.exception('exception') def reply_login(ctx_id, ctx_data): try: '''returns a login reply''' lr = LoginReply() ip, port = ctx_data.addr codecs = sorted(Codecs.values()) lr.set_values(client_ctx=ctx_id, client_public_ip=ip , client_public_port=port, ctx_expire=ctx_table[ctx_id].expire - time.time(), num_of_codecs=len(codecs), codec_list=''.join((c for c in codecs))) buf = lr.serialize() yield CommMessage(request.addr, LoginReply, buf) except: log.exception('exception') def deny_login(): try: '''returns login-denied reply''' ld = ShortResponse() ld.set_values(client_ctx = 0, result = struct.unpack('!h', Errors.LoginFailure)) buf = ld.serialize() log.info('login error') yield CommMessage(request.addr, ShortResponse, buf) except: log.exception('exception') try: username, password = (request.msg.username.value, request.msg.password.value) dbuser = verify_login(username, password) if dbuser: #creates new client context and register it ctx_id, ctx_data = create_client_context( request, status=dbuser.login_status) ctx_table.add_client((ctx_id, ctx_data)) return reply_login(ctx_id, ctx_data) else: return deny_login() except: log.exception('exception')
def _forward_invite(self, call_ctx, matched_codecs): try: caller_ctx = call_ctx.caller_ctx callee_ctx = call_ctx.callee_ctx caller_name = ctx_table[caller_ctx].client_name caller_ip, caller_port = ctx_table.get_addr(caller_ctx) codec_list = ''.join(matched_codecs) sfi = ServerForwardInvite() sfi.set_values( client_ctx = callee_ctx, call_ctx = call_ctx.ctx_id, call_type = config.CallTypes.ViaProxy, client_name_length = len(caller_name), client_name = caller_name, client_public_ip = caller_ip, client_public_port = caller_port, num_of_codecs = len(matched_codecs), codec_list = codec_list ) sfi_buffer = sfi.serialize() yield CommMessage(ctx_table.get_addr(callee_ctx), ServerForwardInvite, sfi_buffer) except: log.exception('exception')
def remove_old_clients(): try: while thread_loop_active: now = time.time() expired_clients = [client.ctx_id for client in ctx_table.clients() if client.expire < now] for ctx_id in expired_clients: log.info('removing inactive client ' + repr(ctx_id)) ctx_table.remove_client(ctx_id) for i in xrange(CLIENT_EXPIRE): if thread_loop_active: time.sleep(1) else: break log.info('%d old clients have been removed' % len(expired_clients)) ctx_table.clear_orphan_calls() ctx_table.pprint() log.info('terminating thread: remove_old_clients') except: log.exception('exception')
def handle_outbound_queue(): while thread_loop_active: try: reply = outbound_messages.get(block=0) if reply and getattr(reply, 'msg') and getattr(reply, 'addr'): if reply.msg_type != ClientRTP: log.info('server sends %s to %s' % (reply.msg_type, repr(reply.addr))) else: log.debug('server sends %s to %s' % (reply.msg_type, repr(reply.addr))) try: data = reply.msg.pack() reactor.callFromThread( servers_pool.send_to,reply.addr, data) except Exception, inst: log.exception('exception') except Queue.Empty: time.sleep(0.010) except: log.exception('exception') log.info('terminating thread: handle_outbound_queue')
def create_client_context(comm_msg, status=ClientStatus.Unknown): try: '''creates the client context for each new logged in client returns a tuple(ctx_id, client_ctx_data) client_ctx_data.keys() => addr, status, expire, last_keep_alive, ctx_id, call_ctx, client_name ''' ctx_id = comm_msg.client_ctx addr = comm_msg.addr if servers_pool.known_address(addr): now = time.time() ctx = Storage ( addr=addr, status=status, expire=now + CLIENT_EXPIRE, last_keep_alive=now, ctx_id = ctx_id, current_call = None, client_name = comm_msg.msg.username.value ) return (ctx_id, ctx) else: return None except: log.exception('exception')
def get_fileHanzi(file): try: f = codecs.open(file, "r", "utf8") return set(utils.concat([[c for c in line if utils.isHanzi(c)] for line in f.readlines()])) except IOError, e: log.exception("Error reading hanzi statistics character file " + file) return set()
def create_new_users(mycursor, usertype=None): """Takes a cursor to the Bixby DB and Staff or Student as the usertype. I'd like to fix this so that by default it creates all types. BEH The usertype thing is BROKEN BEH""" log.info('######## Creating New Users ########') mycursor.execute(queries.get_new_users, (usertype,)) new_users = mycursor.fetchall() ac = google.appsclient.AppsClient() for user in new_users: uid, user_domain, user_type, first_name, last_name, external_usernumber = user new_username = unique_username(mycursor, uid) if user_type == 'Student': new_user_password = config.STUDENT_PASSWORD_PREFIX+external_usernumber else: new_user_password = config.STAFF_PASSWORD_PREFIX log.info('Creating %s User: %s (UID: %d)' %(user_type, new_username+'@'+user_domain, uid)) try: ac.create_user(user_type, new_username, last_name, first_name, new_user_password) except gdata.apps.service.AppsForYourDomainException, e: if e.error_code == 1300: # Entity Exists mycursor.execute(queries.update_username, (new_username, new_user_password, 0, uid)) log.exception('Error: %d Group: %s Reason: %s User: %s' %(e.error_code, e.invalidInput, e.reason, new_username)) mycursor.execute(queries.update_username, (new_username, new_user_password, 2, uid)) # Mark Created and Insert Username time.sleep(config.SLEEP_TIME_SECONDS)
def _set_values(self, items, values_dict=None): try: start = 0 # first field position for params in items: key, ctr = params[0], params[1] format = len(params) == 3 and params[2] args = [start] if format: if hasattr(format, "__call__"): format = format() args.append(format) # key -> field.name args.append(key) # set a property as field-name and its value as Field instance. self.__dict__[key] = ctr(*args) # set the value either to a supplied argument # or extract from the buffer if values_dict: self.__dict__[key].value = values_dict[key] else: self.__dict__[key].unpack_from(self.buf) # next field starting point start = self.__dict__[key].end except: log.exception("exception")
def dict_fields(self): try: """all fields as dict {name: value, ...}""" x = dict(((self.__dict__[field[0]].name, self.__dict__[field[0]].value) for field in self.seq)) return x except: log.exception("exception")
def submit(self, flags): """ this function will submit the flags to the scoreboard""" status = [] try: for flag in flags: params = { 'csrfmiddlewaretoken': '<csrf>', 'flag_input': flag } cookies = { 'sessionid': '<session_id>', 'csrftoken': '<csrf>' } r = requests.post("http://10.100.50.10/competition/submit/", data=params, cookies=cookies) output = r.content.decode('utf-8') if "Thank you" in output: s = STATUS["accepted"] elif "once" in output: s = STATUS["old"] else: s = STATUS["rejected"] status.append(s) except Exception as e: log.exception(e) return status
def send(self): log.info("FindRequest.send(host=%s, query=%s) called" % (self.store.host, self.query)) self.cachedResult = cache.get(self.cacheKey) if self.cachedResult is not None: log.info("FindRequest(host=%s, query=%s) using cached result" % (self.store.host, self.query)) return self.connection = HTTPConnectionWithTimeout(self.store.host) self.connection.timeout = settings.REMOTE_FIND_TIMEOUT query_params = [ ('local', '1'), ('format', 'pickle'), ('query', self.query.pattern), ] if self.query.startTime: query_params.append(('from', self.query.startTime)) if self.query.endTime: query_params.append(('until', self.query.endTime)) query_string = urlencode(query_params) try: self.connection.request('GET', '/metrics/find/?' + query_string) except: log.exception( "FindRequest.send(host=%s, query=%s) exception during request" % (self.store.host, self.query)) self.store.fail() self.failed = True
def wait_for_results(): if wait_lock.acquire( False ): # the FetchInProgress that gets waited on waits for the actual completion try: response = connection.getresponse() if response.status != 200: raise Exception( "Error response %d %s from %s" % (response.status, response.reason, url)) pickled_response = response.read() results = unpickle.loads(pickled_response) self.cache_lock.acquire() self.request_cache[url] = results self.cache_lock.release() completion_event.set() return results except: completion_event.set() self.store.fail() log.exception("Error requesting %s" % url) raise else: # otherwise we just wait on the completion_event completion_event.wait(settings.REMOTE_FETCH_TIMEOUT) cached_results = self.request_cache.get(url) if cached_results is None: raise Exception( "Passive remote fetch failed to find cached results") else: return cached_results
def _on_recv_body(self, data): if self._on_receive_packet_callback is not None: try: self._on_receive_packet_callback(self, data) except: log.exception('tcp_asnyc_client_on_receive_exception|id=%s,packet=%s', self._id, data.encode('hex')) self._recv_header()
def fetch(self, startTime, endTime): data = whisper.fetch(self.fs_path, startTime, endTime) if not data: return None time_info, values = data (start, end, step) = time_info # Merge in data from carbon's cache try: cached_datapoints = CarbonLink.query(self.real_metric_path) except: log.exception("Failed CarbonLink query '%s'" % self.real_metric_path) cached_datapoints = [] for (timestamp, value) in cached_datapoints: interval = timestamp - (timestamp % step) try: i = int(interval - start) / step values[i] = value except: pass return (time_info, values)
def serialize(self): try: """packs all values into the buffer and returns the buffer""" self._pack_values() return self.buf.raw except: log.exception("exception")
def my_excepthook(_type, value, tback): ''' Can be used as an override to the default excepthook by running "sys.excepthook = my_excepthook" Will log exceptions and then call the default excepthook. ''' log.exception("".join(traceback.format_exception(_type, value, tback)))
def _on_close(self): log.info('tcp_asnyc_client_disconnect|id=%s,address=%s,port=%s', self._id, self._address, self._port) if self._on_disconnect_callback is not None: try: self._on_disconnect_callback(self) except: log.exception('tcp_asnyc_client_on_disconnect_exception|id=%s,address=%s,port=%s', self._id, self._address, self._port) self._async_connect()
def _pack_values(self): try: """packs all the values into the buffer""" self._init_buffer() for v in self.seq: self.__dict__[v[0]].pack_into(self.buf) except: log.exception("exception")
def deserialize(self, buf=None): try: if buf: self._init_buffer(buf) self._set_values(self.seq) except: log.exception("exception")
def _fork(self): try: pid = os.fork() if pid > 0: sys.exit(0) except OSError: LOG.exception(('Fork failed')) sys.exit(1)
def get_fileHanzi(file): try: f = codecs.open(file, "r", "utf8") return set( utils.concat([[c for c in line if utils.isHanzi(c)] for line in f.readlines()])) except IOError, e: log.exception("Error reading hanzi statistics character file " + file) return set()
def safe_commit(session): try: session.commit() except SQLAlchemyError: log.exception("DB commiting failed") return except Exception: log.error("Something unexpected happened") raise
def make_directory(self): created = False try: os.makedirs(self.target_dir_path()) created = True except Exception as e: log.exception(e.message) finally: return created
def load(root=None, json='config.json', yaml='config.yaml', override='config.py', logoverridefailure=False, args=None): ''' Load a config object given default configuration file paths and command line arguments. ''' conf = config() # Load default config files with the precedence: python override > json > yaml if yaml: if root: yaml = os.path.join(root, yaml) if os.path.isfile(yaml): conf.yaml(yaml) if json: if root: json = os.path.join(root, json) if os.path.isfile(json): conf.json(json) if override: if root and override.endswith('.py'): override = os.path.join(root, override) try: conf.override(override) except: if logoverridefailure: log.debug( 'Tried and failed to apply default override from module %s. (But that\'s okay! It\'s just a default.)' % override) log.debug(traceback.format_exc()) # Handle --config argument if args and args.get('config'): argsconf = args['config'] if isinstance(argsconf, basestring): argsconf = (argsconf, ) for applyconf in argsconf: try: if applyconf.endswith('.json'): conf.json(applyconf) elif applyconf.endswith('.yaml'): conf.yaml(applyconf) else: conf.override(applyconf) except: log.exception('Failed to load configuration from %s.' % applyconf) # Apply other command line arguments if args: conf.apply(args) # Handle things like automatic version detection, package importing conf.setup() # All done! return conf
def gather_sosreport_from_node(node: Node, destination_dir: str): try: node.upload_file(SOSREPORT_SCRIPT, "/tmp/man_sosreport.sh") node.run_command("chmod a+x /tmp/man_sosreport.sh") node.run_command("sudo /tmp/man_sosreport.sh") node.download_file(f"/tmp/sosreport.tar.bz2", os.path.join(destination_dir, f"sosreport-{node.name}.tar.bz2")) except (TimeoutError, RuntimeError, SSHException): log.exception("Failed accessing node %s for sosreport data gathering", node)
def __init__(self, pidfile, procname, uuid=None): self.pidfile = pidfile self.procname = procname self.uuid = uuid try: self.fd = os.open(pidfile, os.O_CREAT | os.O_RDWR) fcntl.flock(self.fd, fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError: LOG.exception(("Error while handling pidfile: %s"), pidfile) sys.exit(1)
def fetches(cls, limit=3): """ Returns a list of websites having status pending """ try: websites = s.query(Website).filter( Website.status == Status.PENDING).limit(limit).all() return websites except Exception: log.exception('Error in fetches')
def setuppackages(self): '''Internal: Import packages.''' self.importedpackages = [] if isinstance(self.packages, basestring): self.packages = (self.packages, ) for package in self.packages: try: self.importedpackages.append(importlib.import_module(package)) except: log.exception('Failed to import package %s.' % package)
def try_to_delete_cluster(tfvars): try: cluster_id = tfvars.get("cluster_inventory_id") if cluster_id: client = bm_inventory_api.create_client(args.inventory_url, wait_for_url=False) client.delete_cluster(cluster_id=cluster_id) # TODO add different exception validations except: log.exception("Failed to delete cluster")
def create(cls): """ Create all tables in the engine. This is equivalent to "Create Table" statements in raw SQL. """ try: log.info('DB_URI :: {0} created'.format(DATABASE_URI)) Base.metadata.create_all(engine) except Exception: log.exception('Error in creating database')
def save_result_to_database(website): """ Saves result to database """ try: Database.set_website_status(id=website.id, status='finished', result=website.result_to_json()) log.info('Result Save successfully :: {0}'.format(website.url)) except Exception as e: log.exception('Error in saving result in database')
def __interactive_choose(self, key, elems, elem_name, elems_name=None, force_match=True): canceled = False if not elem_name: log.debug("invalid elem name") return None, canceled if not elems_name: elems_name = elem_name + "s" if not isinstance(elems, list) or len(elems) and not isinstance(elems[0], str): log.debug("invalid {} list type", elem_name) return key, canceled if not elems: if not key: log.debug("invalid {} key", elem_name) return key, canceled else: if not key: if force_match: log.debug("invalid {} key", elem_name) return None, canceled else: # match elem elems = filter( lambda h: h.find(key) != -1 if key else False, elems ) chose_elem = None if elems and len(elems) > 1: log.info("Following {} were found:", elems_name) for index, item in enumerate(elems): log.tips("\t ({}): {}", index + 1, item) while (True): try: number = int(raw_input("Choose one to continue: ").strip()) if 1 <= number <= len(elems): chose_elem = elems[number - 1] else: log.debug("input num is out of index of matched {}", elems_name) except KeyboardInterrupt: log.exception("\nNo {} was chose", elem_name) canceled = True break except: log.exception() if chose_elem: break else: chose_elem = elems[0] if elems else key return chose_elem, canceled
def move_downloaded_files(ftp, directory, files): try: ftp.mkd(directory) except: pass # Ignore try: for f in files: ftp.rename(f, '%s/%s' % (directory, f)) except: log.exception("Error moving files %s to history dir %s" % (repr(files), directory))
def wrapped(*args, **kwargs): try: return fn(*args, **kwargs) except errors as e: if message: log.exception(message) if callback: callback(e) if silent: return raise
def main(): if args.delete_all: delete_all() else: try: tfvars = utils.get_tfvars() if not args.only_nodes: try_to_delete_cluster(tfvars) delete_nodes(tfvars) except: log.exception("Failed to delete nodes")