async def serve(self): """ Subscribe to the pubsub topic, read and filter incoming messages (dropping the ones emitted by us). Selected messages are put on the async queue (inQueue) to be later treated in the processMessages() task """ try: async for message in self.client.pubsub.sub(self.topic): sender = message['from'] if isinstance( message['from'], str) else message['from'].decode() await self.rawMessageReceived.emit(self.topic, sender, message['data']) if await self.filtered(message): continue self.ipfsCtx.pubsub.psMessageRx.emit() await self.inQueue.put(message) self._receivedCount += 1 await asyncio.sleep(0) except asyncio.CancelledError: self.debug('Cancelled, queue size was {0}'.format( self.inQueue.qsize())) return except Exception as e: traceback.print_exc() self.debug('Serve interrupted by unknown exception {}'.format( str(e))) ensureLater(10, self.serve)
async def publishDag(self, op, allowOffline=False, reschedule=False): if not self.dagUser.dagCid: self.debug('DAG CID not set yet ?') return self.debug('Publishing profile DAG with CID {}'.format( self.dagUser.dagCid)) result = await op.publish(self.dagUser.dagCid, key=self.keyRootId, allow_offline=allowOffline, cache='always', cacheOrigin='profile', resolve=True, lifetime='48h') if result is None: self.debug('DAG publish failed') else: self.debug('DAG publish success: {}'.format(result)) if reschedule is True: self.debug('Rescheduling user DAG publish') ensureLater(60 * 10, self.publishDag, reschedule=reschedule, allowOffline=allowOffline)
async def watch(self): await self.sJobActive.emit(self, True) try: await self._watch() except asyncio.CancelledError: self.treeWidget().setItemWidget(self, self.COL_STATUS, None) self.status('Cancelled') except Exception: pass await self.sJobActive.emit(self, False) ensureLater(1, self.updateControls)
async def publish(self, ipfsop, timeout=None): """ Publish the DID document to the IPNS key We always cache the record so that your DID is always resolvable whatever the connection state. :rtype: bool """ # Get config settings timeout = timeout if timeout else cGet('publish.ipns.timeout') autoRepublish = cGet('publish.autoRepublish') republishDelay = cGet('publish.autoRepublishDelay') ipnsLifetime = cGet('publish.ipns.lifetime') ipnsTtl = cGet('publish.ipns.ttl') if not self.docCid: return False if autoRepublish is True: # Auto republish for local IPIDs ensureLater( republishDelay, self.publish ) try: if await ipfsop.publish(self.docCid, key=self.ipnsKey, lifetime=ipnsLifetime, ttl=ipnsTtl, cache='always', cacheOrigin='ipidmanager', timeout=timeout): self.message('Published !') self.message( 'Published IPID {did} with docCid: {docCid}'.format( did=self.did, docCid=self.docCid)) return True else: self.message('Error publishing IPID with DID: {did}'.format( did=self.did)) return False except Exception as e: self.message(str(e)) return False
async def joinChannel(self, ipfsop, channel, chanSticky=False): if channel in self.channelWidgets.keys(): return self.channelWidgets[channel] cAgent = ipfsop.curve25519Agent pubKeyCid = await cAgent.pubKeyCid() chatToken = await ChatToken.make(ipfsop, channel, pubKeyCid, encType='curve25519') log.debug(chatToken.pretty()) # Create the JWS and import it without pinning jwsToken = await ipfsop.rsaAgent.jwsTokenObj( orjson.dumps(chatToken.data).decode()) jwsTokenEntry = await ipfsop.addJson(jwsToken, pin=False) # Create an aiopubsub key and start the pubsub service key = makeKeyChatChannel(channel) service = PSEncryptedChatChannelService(ipfsop.ctx, channel, chatToken.psTopic, jwsTokenEntry['Hash'], cAgent.privKey, key, scheduler=self.app.scheduler) ipfsop.ctx.pubsub.reg(service) await service.startListening() self.userListRev = uid4() # Create the chatroom widget self.channelWidgets[channel] = w = ChatRoomWidget(channel, service, self.app.mainWindow, sticky=chanSticky) ensure(w.startHeartbeatTask()) ensureLater(3, w.sendStatusJoin) return self.channelWidgets[channel]
async def sendChannelsStatus(self): msgConfig = cGet('services.chat.messages.UserChannelsListMessage', mod='galacteek.ipfs.pubsub.srvs') delay = msgConfig.publishTimer pubChannels = {} pubChannels = [] for chan, _w in self.channelWidgets.items(): pubChannels.append({ 'tokenSigMethod': 'rsa', 'sessionJwsCid': _w.psService.jwsTokenCid }) userChannelsMsg = UserChannelsListMessage.make(self.userListRev, pubChannels) # Publish on the keyChatChanList PS key, the PS service catches it # and sends the message gHub.publish(keyChatChanList, userChannelsMsg) ensureLater(delay, self.sendChannelsStatus)
async def serve(self, ipfsop): """ Subscribe to the pubsub topic, read and filter incoming messages (dropping the ones emitted by us). Selected messages are put on the async queue (inQueue) to be later treated in the processMessages() task """ topic = self.topic() # We're only in the bytopic list when listening self.ipfsCtx.pubsub.reg(self) try: async for message in ipfsop.client.pubsub.sub(topic): if await self.filtered(message): continue self.ipfsCtx.pubsub.psMessageRx.emit() if self._shuttingDown: return await self.inQueue.put(message) self._receivedCount += 1 await asyncio.sleep(0) except asyncio.CancelledError: self.ipfsCtx.pubsub.unreg(self) self.debug('Cancelled, queue size was {0}'.format( self.inQueue.qsize())) return except Exception as err: traceback.print_exc() # Unregister self.ipfsCtx.pubsub.reg(self) self.debug(f'Serve interrupted by unknown exception {err}') ensureLater(5, self.serve, ipfsop)
async def joinChannel(self, ipfsop, channel): topic = chatChannelTopic(channel) service = ipfsop.ctx.pubsub.byTopic(topic) if not service: key = makeKeyChatChannel(channel) service = PSChatChannelService(ipfsop.ctx, self.app.ipfsClient, channel, key, scheduler=self.app.scheduler) ipfsop.ctx.pubsub.reg(service) await service.start() w = self.channelWidgets.get(channel) if not w: self.channelWidgets[channel] = w = ChatRoomWidget( channel, service, self.app.mainWindow) ensure(w.startHeartbeatTask()) ensureLater(1, w.sendStatusJoin) return self.channelWidgets[channel]
async def registerFromIdent(self, op, iMsg): # iMsg is a PeerIdentMessage if iMsg.peer not in self.byPeerId: peerValidated = False now = int(time.time()) avgPing = await op.waitFor(op.pingAvg(iMsg.peer, count=2), 5) personDid = iMsg.personDid if not ipidFormatValid(personDid): log.debug('Invalid DID: {}'.format(personDid)) return try: mType, stat = await self.app.rscAnalyzer(iMsg.iphandleqrpngcid) except Exception: log.debug('Invalid QR: {}'.format(iMsg.iphandleqrpngcid)) else: statInfo = StatInfo(stat) if not statInfo.valid or statInfo.dataLargerThan( kilobytes(512)) or not mType or not mType.isImage: log.debug('Invalid stat for QR: {}'.format( iMsg.iphandleqrpngcid)) return if not await self.validateQr( iMsg.iphandleqrpngcid, iMsg) is True: log.debug('Invalid QR: {}'.format(iMsg.iphandleqrpngcid)) peerValidated = False else: log.debug('Ident QR {qr} for {peer} seems valid'.format( qr=iMsg.iphandleqrpngcid, peer=iMsg.peer)) peerValidated = True ensure(op.ctx.pin(iMsg.iphandleqrpngcid)) # Load the IPID ipid = await self.app.ipidManager.load( personDid, initialCid=iMsg.personDidCurCid, track=True ) if not ipid: log.debug('Cannot load DID: {}'.format(personDid)) return async with self.lock: pCtx = PeerCtx(self.ctx, iMsg.peer, iMsg, ipid, pingavg=avgPing if avgPing else 0, pinglast=now if avgPing else 0, validated=peerValidated ) ipid.sChanged.connectTo(partialEnsure( self.onPeerDidModified, pCtx)) pCtx.sInactive.connectTo(self.onUnresponsivePeer) ensure(self.didPerformAuth(pCtx)) self._byPeerId[iMsg.peer] = pCtx ensureLater(60, pCtx.watch) await self.peerAdded.emit(iMsg.peer) else: # This peer is already registered # What we ought to do here is just to refresh the DID document async with self.lock: pCtx = self.getByPeerId(iMsg.peer) if pCtx: log.debug('Updating ident for peer {}'.format(iMsg.peer)) pCtx.ident = iMsg log.debug('Refreshing DID: {}'.format(pCtx.ipid)) await pCtx.ipid.refresh() await self.peerModified.emit(iMsg.peer) await self.changed.emit()
async def watch(self): if self.peerUnresponsive: await self.sInactive.emit(self.peerId) else: ensureLater(120, self.watch)