def test_hashring(self): current_time = datetime.datetime.fromtimestamp(10101010101) current_srv = bytes([41])*32 previous_srv = bytes([42])*32 # Create 255 fake Tor nodes that will be used as part of the unittest network_nodes = [] for i in range(1,256): microdescriptor = mock.Mock() routerstatus = mock.Mock() routerstatus.fingerprint = (bytes([i])*20).hex() routerstatus.protocols = {'HSDir' : [2]} routerstatus.flags = ['HSDir'] node_ed25519_id_b64 = base64.b64encode(bytes([i])*32).decode('utf-8') microdescriptor.identifiers = {'ed25519' : node_ed25519_id_b64} node = tor_node.Node(microdescriptor, routerstatus) network_nodes.append(node) # Mock a fake consensus consensus = DummyConsensus() consensus.consensus = mock.Mock() consensus.consensus.valid_after = current_time consensus.get_current_srv = mock.Mock() consensus.get_current_srv.return_value = current_srv consensus.get_previous_srv = mock.Mock() consensus.get_previous_srv.return_value = previous_srv consensus.is_live = mock.Mock() consensus.is_live.return_value = True consensus.nodes = network_nodes # Mock a fake Tor network from onionbalance.hs_v3.onionbalance import my_onionbalance my_onionbalance.consensus = consensus previous_blinded_pubkey_hex = "063AEC5E1FD3025098F2DF71EF570B28D94B463FFCCB5EC6A9C061E38F551C6A" previous_blinded_pubkey_bytes = base64.b16decode(previous_blinded_pubkey_hex) responsible_hsdirs = hashring.get_responsible_hsdirs(previous_blinded_pubkey_bytes, True) i = 0 for responsible_hsdir in responsible_hsdirs: self.assertEqual(responsible_hsdir.upper(), CORRECT_HSDIR_FPRS_FIRST_DESCRIPTOR[i]) i+=1 print("===") # we need to use the new blinded key since this uses a new time period......... current_blinded_pubkey_hex = "5DB624F2D74F103E6E8C6FBCCD074586EF5A5572F90673C00B77DEF94EC11499" current_blinded_pubkey_bytes = base64.b16decode(current_blinded_pubkey_hex) responsible_hsdirs = hashring.get_responsible_hsdirs(current_blinded_pubkey_bytes, False) i = 0 for responsible_hsdir in responsible_hsdirs: self.assertEqual(responsible_hsdir.upper(), CORRECT_HSDIR_FPRS_SECOND_DESCRIPTOR[i]) i+=1
def _hsdir_set_changed(self, is_first_desc): """ Return True if the HSDir has changed between the last upload of this descriptor and the current state of things """ from onionbalance.hs_v3.onionbalance import my_onionbalance # Derive blinding parameter _, time_period_number = hashring.get_srv_and_time_period(is_first_desc) blinded_param = my_onionbalance.consensus.get_blinding_param(self._get_identity_pubkey_bytes(), time_period_number) # Get blinded key # TODO: hoho! this is dirty we are poking into internal stem API. We # should ask atagar to make it public for us! :) blinded_key = stem.descriptor.hidden_service._blinded_pubkey(self._get_identity_pubkey_bytes(), blinded_param) # Calculate current responsible HSDirs try: responsible_hsdirs = hashring.get_responsible_hsdirs(blinded_key, is_first_desc) except hashring.EmptyHashRing: return False if is_first_desc: previous_responsible_hsdirs = self.first_descriptor.responsible_hsdirs else: previous_responsible_hsdirs = self.second_descriptor.responsible_hsdirs if set(responsible_hsdirs) != set(previous_responsible_hsdirs): logger.info("\t HSDir set changed (%s vs %s)", set(responsible_hsdirs), set(previous_responsible_hsdirs)) return True else: logger.info("\t HSDir set remained the same") return False
def _publish_descriptor(self, is_first_desc): """ Attempt to publish descriptor if needed. If 'is_first_desc' is set then attempt to upload the first descriptor of the service, otherwise the second. """ from onionbalance.hs_v3.onionbalance import my_onionbalance if not self._should_publish_descriptor_now(is_first_desc): logger.info("No reason to publish %s descriptor for %s", "first" if is_first_desc else "second", self.onion_address) return try: intro_points = self._get_intros_for_desc() except NotEnoughIntros: return # Derive blinding parameter _, time_period_number = hashring.get_srv_and_time_period(is_first_desc) blinding_param = my_onionbalance.consensus.get_blinding_param( self._get_identity_pubkey_bytes(), time_period_number) try: desc = descriptor.OBDescriptor(self.onion_address, self.identity_priv_key, blinding_param, intro_points, is_first_desc) except descriptor.BadDescriptor: return logger.info( "Service %s created %s descriptor (%s intro points) (blinding param: %s) (size: %s bytes). About to publish:", self.onion_address, "first" if is_first_desc else "second", len(desc.intro_set), blinding_param.hex(), len(str(desc.v3_desc))) # When we do a v3 HSPOST on the control port, Tor decodes the # descriptor and extracts the blinded pubkey to be used when uploading # the descriptor. So let's do the same to compute the responsible # HSDirs: blinded_key = desc.get_blinded_key() # Calculate responsible HSDirs for our service try: responsible_hsdirs = hashring.get_responsible_hsdirs( blinded_key, is_first_desc) except hashring.EmptyHashRing: logger.warning("Can't publish desc with no hash ring. Delaying...") return logger.info("Uploading %s descriptor for %s to %s", "first" if is_first_desc else "second", self.onion_address, responsible_hsdirs) # Upload descriptor self._upload_descriptor(my_onionbalance.controller.controller, desc, responsible_hsdirs) # It would be better to set last_upload_ts when an upload succeeds and # not when an upload is just attempted. Unfortunately the HS_DESC # # UPLOADED event does not provide information about the service and # so it can't be used to determine when descriptor upload succeeds desc.set_last_upload_ts(datetime.datetime.utcnow()) desc.set_responsible_hsdirs(responsible_hsdirs) # Set the descriptor if is_first_desc: self.first_descriptor = desc else: self.second_descriptor = desc