def test_sub_update(self): self.test_sub_add() # sub1 has 5k credit subscriber.subtract_credit(self.sub1, 1000) #sub1 spends 1k # Do another checkin self.checkin_response['response']['subscribers'] = self.sub_section CheckinHandler(json.dumps(self.checkin_response)) subs_post = subscriber.get_subscriber_states() bal = crdt.PNCounter.from_state( json.loads(subs_post[self.sub1]['balance'])).value() self.assertEqual(4000, bal) # simulate cloud adds 11k credits, total should be 15k next checkin self.sub_section[self.sub1]['balance']['p']['3c470c85'] += 11000 # Do another checkin self.checkin_response['response']['subscribers'] = self.sub_section CheckinHandler(json.dumps(self.checkin_response)) subs_post = subscriber.get_subscriber_states() bal = crdt.PNCounter.from_state( json.loads(subs_post[self.sub1]['balance'])).value() self.assertEqual(15000, bal) # simulate cloud adds 10k credits, client spends 5k # total should be 20k next checkin self.sub_section[self.sub1]['balance']['p']['3c470c85'] += 10000 subscriber.subtract_credit(self.sub1, 5000) # Do another checkin self.checkin_response['response']['subscribers'] = self.sub_section CheckinHandler(json.dumps(self.checkin_response)) subs_post = subscriber.get_subscriber_states() bal = crdt.PNCounter.from_state( json.loads(subs_post[self.sub1]['balance'])).value() self.assertEqual(20000, bal)
def test_sub_remove(self): self.checkin_response['response']['subscribers'] = self.sub_section CheckinHandler(json.dumps(self.checkin_response)) subs_pre = subscriber.get_subscribers() self.assertTrue(len(subs_pre) == 2) sub_section = copy.deepcopy(self.sub_section) del (sub_section[self.sub2]) self.checkin_response['response']['subscribers'] = sub_section CheckinHandler(json.dumps(self.checkin_response)) subs_post = subscriber.get_subscriber_states() self.assertTrue(len(subs_post) == 1)
def test_get_one_subscriber(self): """ We can get a list of specific subscribers. """ imsi0 = 'IMSI90155%010d' % (randrange(100, 1e10)) imsi1 = 'IMSI90156%010d' % (randrange(100, 1e10)) imsi2 = 'IMSI90157%010d' % (randrange(100, 1e10)) subscriber.create_subscriber(imsi0, '') # MSISDN unused subscriber.create_subscriber(imsi1, '') # MSISDN unused subscriber.create_subscriber(imsi2, '') # MSISDN unused subs = subscriber.get_subscriber_states([imsi0, imsi1]) self.assertEqual(len(subs), 2) self.assertTrue(imsi0 in subs) self.assertTrue(imsi1 in subs)
def test_sub_add(self): subs_pre = subscriber.get_subscribers() self.checkin_response['response']['subscribers'] = self.sub_section CheckinHandler(json.dumps(self.checkin_response)) subs_post = subscriber.get_subscriber_states() self.assertTrue(len(subs_pre) == 0) self.assertTrue(len(subs_post) == 2) for sub in self.sub_section: e_bal = crdt.PNCounter.from_state( self.sub_section[sub]['balance']).value() actual_bal = crdt.PNCounter.from_state( json.loads(subs_post[sub]['balance'])).value() self.assertEqual(e_bal, actual_bal)
def test_get_all_subscribers(self): """ We can get a list of all registered subscribers. """ imsi0 = 'IMSI90155%010d' % (randrange(100, 1e10)) imsi1 = 'IMSI90156%010d' % (randrange(100, 1e10)) imsi2 = 'IMSI90157%010d' % (randrange(100, 1e10)) subscriber.create_subscriber(imsi0, '') # MSISDN unused subscriber.create_subscriber(imsi1, '') # MSISDN unused subscriber.create_subscriber(imsi2, '') # MSISDN unused subs = subscriber.get_subscriber_states() expected = { imsi0: '', # we don't get the MSISDN back imsi1: '', imsi2: '', self.TEST_IMSI: '', } self.assertEqual(len(expected), len(subs)) for imsi in list(subs.keys()): self.assertTrue(imsi in expected)
def checkin(self, timeout=11): """Gather system status.""" # Compile checkin data checkin_start = time.time() status = { 'usage': events.usage(), 'uptime': system_utilities.uptime(), 'system_utilization': self.utilization_tracker.get_data(), } # Append status if we can try: #get the software versions status['versions'] = bts.get_versions() except BSSError as e: logger.error("bts get_versions error: %s" % e) try: # Gather camped subscriber list status['camped_subscribers'] = bts.active_subscribers() except BSSError as e: logger.error("bts get active_subscribers error: %s" % e) # Gather tower load and noise data. # NOTE(matt): these values can vary quite a bit over a minute. It # might be worth capturing data more frequently and sending # something like average or median values. status['openbts_load'] = {} try: status['openbts_load'] = bts.get_load() except BSSError as e: logger.error("bts get_load error: %s" % e) for key, val in list(self._checkin_load_stats.items()): status['openbts_load']['checkin.' + key] = val self._checkin_load_stats.clear() try: status['openbts_noise'] = bts.get_noise() except BSSError as e: logger.error("bts get_noise error: %s" % e) status['radio'] = {} try: status['radio']['band'] = bts.get_band() # eventually need to also grab all used channels, not just c0 # TODO: (kheimerl) T13270338 Add multiband support status['radio']['c0'] = bts.get_arfcn_c0() #also add power here eventually # TODO: (kheimerl) T13270365 Add power level support except BSSError as e: #delete the key if this failed del status['radio'] logger.error("bts radio error: %s" % e) # Add balance sync data status['subscribers'] = subscriber.get_subscriber_states( imsis=events.EventStore().modified_subs()) # Add delta protocol context (if available) to let server know, # client supports delta optimization & has a prior delta state if delta.DeltaProtocol.CTX_KEY not in status: # just a precaution sections_ctx = {} for section, ctx in list(CheckinHandler.section_ctx.items()): if ctx: sections_ctx[section] = ctx.to_proto_dict() if sections_ctx: status[delta.DeltaProtocol.CTX_KEY] = { delta.DeltaProtocolOptimizer.SECTIONS_CTX_KEY: sections_ctx } # Send checkin request. uuid = snowflake.snowflake() data = { 'status': status, 'bts_uuid': uuid, } headers = dict(self.auth_header) # Set content type to app/json & utf-8, compressed or not - JSON should # be more efficient then URL encoded JSON form payload headers['Content-Type'] = 'application/json; charset=utf-8' data_json = json.dumps(data) decompressed_status_len = len(data_json) status_len = decompressed_status_len if status_len > endaga_ic.MIN_COMPRESSIBLE_REQUEST_SZ: # try to gzip payload, send uncompressed if compression failed try: gzbuf = BytesIO() with GzipFile(mode='wb', fileobj=gzbuf) as gzfile: gzfile.write(bytes(data_json, encoding='UTF-8')) data_json = gzbuf.getvalue() # Using Content-Encoding header since AWS cannot handle # Transfer-Encoding header which would be more appropriate here headers['Content-Encoding'] = 'gzip' status_len = len(data_json) # set len to reflect compression except BaseException as e: logger.error("Checkin request Gzip error: %s" % e) headers['Content-Length'] = str(status_len) post_start = time.time() try: r = self.session.post( self.conf['registry'] + "/checkin?id=" + # add part of uuid to the query, it helps with # debugging & server side logging and can # be used by LBs uuid[:8], headers=headers, data=data_json, timeout=timeout, cookies=self._session_cookies) except BaseException as e: logger.error("Endaga: checkin failed , network error: %s." % e) self._cleanup_session() self._checkin_load_stats['req_sz'] = status_len self._checkin_load_stats['raw_req_sz'] = decompressed_status_len self._checkin_load_stats['post_lat'] = time.time() - post_start raise post_end = time.time() # Make sure either server sent charset or we set it to utf-8 (JSON # default) if not r.encoding: r.encoding = 'utf-8' text = r.text decompressed_response_len = len(text) response_len = decompressed_response_len # Try to get correct content length from HTTP headers, it should # reflect correctly compressed length. if it fails - fall back to # getting length of returned text cont_len = r.headers.get('Content-Length') if cont_len: try: response_len = int(cont_len) except BaseException: pass if r.status_code == 200: try: CheckinHandler(text) logger.info("Endaga: checkin success.") if r.cookies is not None: if self._session_cookies is None: # First time cookies are seen from server # initialize the cookies dict self._session_cookies = dict(r.cookies) else: for key, value in r.cookies.items(): # if server sent new/updated cookies, update them, # but keep previously set cokies as well. ELBs # do not send AWSELB cookies on every request & # expect clients to 'remember' them self._session_cookies[key] = value except BaseException: self._cleanup_session() raise else: logger.error("Endaga: checkin failed (%d), reason: %s, body: %s" % (r.status_code, r.reason, r.text)) # cleanup session on any error if r.status_code >= 300: self._cleanup_session() checkin_end = time.time() self._checkin_load_stats['req_sz'] = status_len # request payload SZ self._checkin_load_stats['raw_req_sz'] = decompressed_status_len self._checkin_load_stats[ 'rsp_sz'] = response_len # response payload SZ self._checkin_load_stats['raw_rsp_sz'] = decompressed_response_len # Checkin Latencies self._checkin_load_stats['post_lat'] = post_end - post_start self._checkin_load_stats['process_lat'] = checkin_end - post_end self._checkin_load_stats['lat'] = checkin_end - checkin_start data['response'] = {'status': r.status_code, 'text': r.text} return data