def _cb_bodyready(self, txresponse, request): # deliverBody hangs for responses without body if txresponse.length == 0: return txresponse, b'', None maxsize = request.meta.get('download_maxsize', self._maxsize) warnsize = request.meta.get('download_warnsize', self._warnsize) expected_size = txresponse.length if txresponse.length != UNKNOWN_LENGTH else -1 if maxsize and expected_size > maxsize: error_message = ("Cancelling download of {url}: expected response " "size ({size}) larger than " "download max size ({maxsize})." ).format(url=request.url, size=expected_size, maxsize=maxsize) logger.error(error_message) txresponse._transport._producer.loseConnection() raise defer.CancelledError(error_message) if warnsize and expected_size > warnsize: logger.warning("Expected response size (%(size)s) larger than " "download warn size (%(warnsize)s).", {'size': expected_size, 'warnsize': warnsize}) def _cancel(_): txresponse._transport._producer.loseConnection() d = defer.Deferred(_cancel) txresponse.deliverBody(_ResponseReader(d, txresponse, request, maxsize, warnsize)) # save response for timeouts self._txresponse = txresponse return d
def _cb_bodyready(self, txresponse, request): # deliverBody hangs for responses without body print('_cb_bodyready', txresponse) return if txresponse.length == 0: print('_cb_bodyready', txresponse.length) maxsize = request.meta.get('download_maxsize', self._maxsize) warnsize = request.meta.get('download_warnsize', self._warnsize) expected_size = txresponse.length if txresponse.length != UNKNOWN_LENGTH else -1 fail_on_dataloss = request.meta.get('download_fail_on_dataloss', self._fail_on_dataloss) if maxsize and expected_size > maxsize: err_msg = 'maxsize reached' txresponse._transport._producer.loseConnection() raise defer.CancelledError(err_msg) def _cancel(_): print('_cancel') txresponse._transport._producer.abortConnection() d = defer.Deferred(_cancel) txresponse.deliverBody( _ResponseReader( finished=d, txresponse=txresponse, request=request, maxsize=maxsize, warnsize=warnsize, fail_on_dataloss=fail_on_dataloss, crawler=self._crawler, ) ) self._txresponse = txresponse return d
def _cb_body_get(self,transferdata,request): # 如果返回的response的Headers中包含了Content-Length,返回一个具体的数值 # 如果Headers中不包含的话就是UNKNOWN_LENGTH if transferdata.length == 0: logger.debug("length: ", transferdata.length) # 若meta中不存的'download_maxsize'这个值的话,会自动赋上默认值self._maxsize maxsize = request.meta.get('download_maxsize',self._maxsize) warnsize = request.meta.get('download_warnsize',self._warnsize) expected_size = transferdata.length if transferdata.length is not UNKNOWN_LENGTH else -1 fail_on_dataloss = request.meta.get('download_fail_on_dataloss', self._fail_on_dataloss) # x and y布尔"与" - 如果 x 为 False,x and y 返回 False,否则它返回 y 的计算值。 if maxsize and expected_size > maxsize : error_msg = ("%(url)s 网页的大小(%(size)s)已经超过可容许下载的最大值(%(maxsize)s).") error_args = {'url': request.url,"size":expected_size,'maxsize':maxsize} logger.error(error_msg,error_args) # twisted.protocols.tls.TLSMemoryBIOProtocol 的方法 transferdata._transport._producer.loseConnection() raise defer.CancelledError(error_msg % error_args) def _cancel(_): transferdata._transport._producer.abortConnection() finished = defer.Deferred(_cancel) transferdata.deliverBody(_ResponseReader( finished,transferdata,request,maxsize,warnsize,fail_on_dataloss,self.lfm )) # 表示接收到了数据,用于延迟的判定 self._transferdata = transferdata return finished
def _cb_bodyready(self, txresponse, request): # deliverBody hangs for responses without body if txresponse.length == 0: return { "txresponse": txresponse, "body": b"", "flags": None, "certificate": None, "ip_address": None, } maxsize = request.meta.get('download_maxsize', self._maxsize) warnsize = request.meta.get('download_warnsize', self._warnsize) expected_size = txresponse.length if txresponse.length != UNKNOWN_LENGTH else -1 fail_on_dataloss = request.meta.get('download_fail_on_dataloss', self._fail_on_dataloss) if maxsize and expected_size > maxsize: error_msg = ( "Cancelling download of %(url)s: expected response " "size (%(size)s) larger than download max size (%(maxsize)s).") error_args = { 'url': request.url, 'size': expected_size, 'maxsize': maxsize } logger.error(error_msg, error_args) txresponse._transport._producer.loseConnection() raise defer.CancelledError(error_msg % error_args) if warnsize and expected_size > warnsize: logger.warning( "Expected response size (%(size)s) larger than " "download warn size (%(warnsize)s) in request %(request)s.", { 'size': expected_size, 'warnsize': warnsize, 'request': request }) def _cancel(_): # Abort connection immediately. txresponse._transport._producer.abortConnection() d = defer.Deferred(_cancel) txresponse.deliverBody( _ResponseReader( finished=d, txresponse=txresponse, request=request, maxsize=maxsize, warnsize=warnsize, fail_on_dataloss=fail_on_dataloss, crawler=self._crawler, )) # save response for timeouts self._txresponse = txresponse return d
def _cb_bodyready(self, txresponse, request): # deliverBody hangs for responses without body if txresponse.length == 0: return txresponse, '', None maxsize = request.meta.get('download_maxsize', self._maxsize) warnsize = request.meta.get('download_warnsize', self._warnsize) expected_size = txresponse.length if txresponse.length != UNKNOWN_LENGTH else -1 if maxsize and expected_size > maxsize: logger.error( "Expected response size (%(size)s) larger than " "download max size (%(maxsize)s).", { 'size': expected_size, 'maxsize': maxsize }) txresponse._transport._producer.loseConnection() raise defer.CancelledError() if warnsize and expected_size > warnsize: logger.warning( "Expected response size (%(size)s) larger than " "download warn size (%(warnsize)s).", { 'size': expected_size, 'warnsize': warnsize }) def _cancel(_): txresponse._transport._producer.loseConnection() d = defer.Deferred(_cancel) txresponse.deliverBody( _ResponseReader(d, txresponse, request, maxsize, warnsize)) return d
def _cb_bodyready(self, txresponse, request): # deliverBody hangs for responses without body if txresponse.length == 0: return txresponse, '', None maxsize = request.meta.get('download_maxsize', self._maxsize) warnsize = request.meta.get('download_warnsize', self._warnsize) expected_size = txresponse.length if txresponse.length != UNKNOWN_LENGTH else -1 if maxsize and expected_size > maxsize: log.msg("Expected response size (%s) larger than download max size (%s)." % (expected_size, maxsize), logLevel=log.ERROR) txresponse._transport._producer.loseConnection() raise defer.CancelledError() if warnsize and expected_size > warnsize: log.msg("Expected response size (%s) larger than downlod warn size (%s)." % (expected_size, warnsize), logLevel=log.WARNING) def _cancel(_): txresponse._transport._producer.loseConnection() d = defer.Deferred(_cancel) txresponse.deliverBody(_ResponseReader(d, txresponse, request, maxsize, warnsize)) return d
def cancel(self): if not self.called: self.errback(failure.Failure(defer.CancelledError())) return self._canceller(self) elif isinstance(self.result, defer.Deferred): # Waiting for another deferred -- cancel it instead. return self.result.cancel()
def _cancel(self, d): self.state = "hung up" # stop reacting to anything further self._error = defer.CancelledError() self.transport.loseConnection() # if connectionLost isn't called synchronously, then our # self._negotiation_d will have been errbacked by Deferred.cancel # (which is our caller). So if it's still around, clobber it if self._negotiation_d: self._negotiation_d = None
def chainError(err): if not err.check(defer.CancelledError): _log.error("CF_COMMIT FAILURE: %s", err) if self.cancelled: if not err.check(defer.CancelledError): raise defer.CancelledError() return err else: d.callback(None)
def test_rejoin_after_error(self): """ try out all the rejoin_after_error scenarios """ client = self.mock_client([]) coord = self.make_coordinator(client) coord.on_group_leave = Mock() def check(rejoin_needed, exc): coord._rejoin_needed = False coord._rejoin_wait_dc = None for call in client.reactor.getDelayedCalls(): call.cancel() client.reset_consumer_group_metadata.reset_mock() coord.on_group_leave.reset_mock() coord.rejoin_after_error(Failure(exc)) if rejoin_needed: self.assertEqual(coord._rejoin_needed, True) assert_delayed_calls(1, client) else: self.assertEqual(coord._rejoin_needed, False) assert_delayed_calls(0, client) self.assertEqual(coord._rejoin_wait_dc, None) check(True, RebalanceInProgress()) check(True, CoordinatorNotAvailable()) client.reset_consumer_group_metadata.assert_any_call(coord.group_id) check(True, IllegalGeneration()) coord.on_group_leave.assert_any_call() check(True, InvalidGroupId()) coord.on_group_leave.assert_any_call() check(True, InconsistentGroupProtocol()) check(True, RequestTimedOutError()) coord.on_group_leave.assert_any_call() check(True, UnknownError()) coord._stopping = True check(False, defer.CancelledError()) coord._stopping = False start_d = coord.start() start_d.addErrback(lambda f: None) check(False, ValueError()) coord.on_group_leave.assert_any_call() self.successResultOf(start_d)
def _cb_bodyready(self, txresponse, request): # deliverBody hangs for responses without body if txresponse.length == 0: return txresponse, b'', None maxsize = request.meta.get('download_maxsize', self._maxsize) warnsize = request.meta.get('download_warnsize', self._warnsize) expected_size = txresponse.length if txresponse.length != UNKNOWN_LENGTH else -1 fail_on_dataloss = request.meta.get('download_fail_on_dataloss', self._fail_on_dataloss) if maxsize and expected_size > maxsize: error_msg = ( "Cancelling download of %(url)s: expected response " "size (%(size)s) larger than download max size (%(maxsize)s).") error_args = { 'url': request.url, 'size': expected_size, 'maxsize': maxsize } logger.error(error_msg, error_args) txresponse._transport._producer.loseConnection() raise defer.CancelledError(error_msg % error_args) if warnsize and expected_size > warnsize: logger.warning( "Expected response size (%(size)s) larger than " "download warn size (%(warnsize)s).", { 'size': expected_size, 'warnsize': warnsize }) def _cancel(_): txresponse._transport._producer.loseConnection() d = defer.Deferred(_cancel) txresponse.deliverBody( _ResponseReader(d, txresponse, request, maxsize, warnsize, fail_on_dataloss)) # save response for timeouts self._txresponse = txresponse return d
def _cb_bodyready(self, txresponse, request): headers_received_result = self._crawler.signals.send_catch_log( signal=signals.headers_received, headers=self._headers_from_twisted_response(txresponse), body_length=txresponse.length, request=request, spider=self._crawler.spider, ) for handler, result in headers_received_result: if isinstance(result, Failure) and isinstance( result.value, StopDownload): logger.debug( "Download stopped for %(request)s from signal handler %(handler)s", { "request": request, "handler": handler.__qualname__ }) txresponse._transport.stopProducing() with suppress(AttributeError): txresponse._transport._producer.loseConnection() return { "txresponse": txresponse, "body": b"", "flags": ["download_stopped"], "certificate": None, "ip_address": None, "failure": result if result.value.fail else None, } # deliverBody hangs for responses without body if txresponse.length == 0: return { "txresponse": txresponse, "body": b"", "flags": None, "certificate": None, "ip_address": None, } maxsize = request.meta.get('download_maxsize', self._maxsize) warnsize = request.meta.get('download_warnsize', self._warnsize) expected_size = txresponse.length if txresponse.length != UNKNOWN_LENGTH else -1 fail_on_dataloss = request.meta.get('download_fail_on_dataloss', self._fail_on_dataloss) if maxsize and expected_size > maxsize: warning_msg = ( "Cancelling download of %(url)s: expected response " "size (%(size)s) larger than download max size (%(maxsize)s).") warning_args = { 'url': request.url, 'size': expected_size, 'maxsize': maxsize } logger.warning(warning_msg, warning_args) txresponse._transport._producer.loseConnection() raise defer.CancelledError(warning_msg % warning_args) if warnsize and expected_size > warnsize: logger.warning( "Expected response size (%(size)s) larger than " "download warn size (%(warnsize)s) in request %(request)s.", { 'size': expected_size, 'warnsize': warnsize, 'request': request }) def _cancel(_): # Abort connection immediately. txresponse._transport._producer.abortConnection() d = defer.Deferred(_cancel) txresponse.deliverBody( _ResponseReader( finished=d, txresponse=txresponse, request=request, maxsize=maxsize, warnsize=warnsize, fail_on_dataloss=fail_on_dataloss, crawler=self._crawler, )) # save response for timeouts self._txresponse = txresponse return d
def __updateCF__(proc, pvInfoByName, delrec, hostName, iocName, iocid, owner, iocTime): # Consider making this function a class methed then 'proc' simply becomes 'self' client = proc.client channels_dict = proc.channel_dict iocs = proc.iocs conf = proc.conf new = set(pvInfoByName.keys()) if iocid in iocs: hostName = iocs[iocid]["hostname"] iocName = iocs[iocid]["iocname"] owner = iocs[iocid]["owner"] iocTime = iocs[iocid]["time"] else: _log.warn('IOC Env Info not found: %s', iocid) if hostName is None or iocName is None: raise Exception('missing hostName or iocName') if proc.cancelled: raise defer.CancelledError() channels = [] """A list of channels in channelfinder with the associated hostName and iocName""" _log.debug('Find existing channels by IOCID: %s', iocid) old = client.findByArgs(prepareFindArgs(conf, [('iocid', iocid)])) if proc.cancelled: raise defer.CancelledError() if old is not None: for ch in old: if len(new) == 0 or ch[ u'name'] in delrec: # case: empty commit/del, remove all reference to ioc if ch[u'name'] in channels_dict: ch[u'owner'] = iocs[channels_dict[ch[u'name']] [-1]]["owner"] ch[u'properties'] = __merge_property_lists( [{ u'name': 'hostName', u'owner': owner, u'value': iocs[channels_dict[ch[u'name']][-1]]["hostname"] }, { u'name': 'iocName', u'owner': owner, u'value': iocs[channels_dict[ch[u'name']][-1]]["iocname"] }, { u'name': 'iocid', u'owner': owner, u'value': channels_dict[ch[u'name']][-1] }, { u'name': 'pvStatus', u'owner': owner, u'value': 'Active' }, { u'name': 'time', u'owner': owner, u'value': iocTime }], ch[u'properties']) channels.append(ch) _log.debug("Add existing channel to previous IOC: %s", channels[-1]) """In case alias exist, also delete them""" if (conf.get('alias', 'default') == 'on'): if ch[u'name'] in pvInfoByName and "aliases" in pvInfoByName[ ch[u'name']]: for a in pvInfoByName[ch[u'name']]["aliases"]: if a[u'name'] in channels_dict: a[u'owner'] = iocs[channels_dict[ a[u'name']][-1]]["owner"] a[u'properties'] = __merge_property_lists([ { u'name': 'hostName', u'owner': owner, u'value': iocs[channels_dict[a[u'name']][-1]] ["hostname"] }, { u'name': 'iocName', u'owner': owner, u'value': iocs[channels_dict[a[u'name']][-1]] ["iocname"] }, { u'name': 'iocid', u'owner': owner, u'value': channels_dict[a[u'name']][-1] }, { u'name': 'pvStatus', u'owner': owner, u'value': 'Active' }, { u'name': 'time', u'owner': owner, u'value': iocTime } ], a[u'properties']) channels.append(a) _log.debug( "Add existing alias to previous IOC: %s", channels[-1]) else: """Orphan the channel : mark as inactive, keep the old hostName and iocName""" ch[u'properties'] = __merge_property_lists( [{ u'name': 'pvStatus', u'owner': owner, u'value': 'Inactive' }, { u'name': 'time', u'owner': owner, u'value': iocTime }], ch[u'properties']) channels.append(ch) _log.debug("Add orphaned channel with no IOC: %s", channels[-1]) """Also orphan any alias""" if (conf.get('alias', 'default') == 'on'): if ch[u'name'] in pvInfoByName and "aliases" in pvInfoByName[ ch[u'name']]: for a in pvInfoByName[ch[u'name']]["aliases"]: a[u'properties'] = __merge_property_lists( [{ u'name': 'pvStatus', u'owner': owner, u'value': 'Inactive' }, { u'name': 'time', u'owner': owner, u'value': iocTime }], a[u'properties']) channels.append(a) _log.debug( "Add orphaned alias with no IOC: %s", channels[-1]) else: if ch[u'name'] in new: # case: channel in old and new """ Channel exists in Channelfinder with same hostname and iocname. Update the status to ensure it is marked active and update the time. """ ch[u'properties'] = __merge_property_lists( [{ u'name': 'pvStatus', u'owner': owner, u'value': 'Active' }, { u'name': 'time', u'owner': owner, u'value': iocTime }], ch[u'properties']) channels.append(ch) _log.debug("Add existing channel with same IOC: %s", channels[-1]) new.remove(ch[u'name']) """In case, alias exist""" if (conf.get('alias', 'default') == 'on'): if ch[u'name'] in pvInfoByName and "aliases" in pvInfoByName[ ch[u'name']]: for a in pvInfoByName[ch[u'name']]["aliases"]: if a in old: """alias exists in old list""" a[u'properties'] = __merge_property_lists( [{ u'name': 'pvStatus', u'owner': owner, u'value': 'Active' }, { u'name': 'time', u'owner': owner, u'value': iocTime }], a[u'properties']) channels.append(a) new.remove(a[u'name']) else: """alias exists but not part of old list""" aprops = __merge_property_lists( [{ u'name': 'pvStatus', u'owner': owner, u'value': 'Active' }, { u'name': 'time', u'owner': owner, u'value': iocTime }, { u'name': 'alias', u'owner': owner, u'value': ch[u'name'] }], ch[u'properties']) channels.append({ u'name': a[u'name'], u'owner': owner, u'properties': aprops }) new.remove(a[u'name']) _log.debug( "Add existing alias with same IOC: %s", channels[-1]) # now pvNames contains a list of pv's new on this host/ioc """A dictionary representing the current channelfinder information associated with the pvNames""" existingChannels = {} """ The list of pv's is searched keeping in mind the limitations on the URL length The search is split into groups to ensure that the size does not exceed 600 characters """ searchStrings = [] searchString = '' for pv in new: if not searchString: searchString = pv elif (len(searchString) + len(pv) < 600): searchString = searchString + '|' + pv else: searchStrings.append(searchString) searchString = pv if searchString: searchStrings.append(searchString) for eachSearchString in searchStrings: _log.debug('Find existing channels by name: %s', eachSearchString) for ch in client.findByArgs( prepareFindArgs(conf, [('~name', eachSearchString)])): existingChannels[ch["name"]] = ch if proc.cancelled: raise defer.CancelledError() for pv in new: newProps = [{ u'name': 'hostName', u'owner': owner, u'value': hostName }, { u'name': 'iocName', u'owner': owner, u'value': iocName }, { u'name': 'iocid', u'owner': owner, u'value': iocid }, { u'name': 'pvStatus', u'owner': owner, u'value': "Active" }, { u'name': 'time', u'owner': owner, u'value': iocTime }] if pv in pvInfoByName and "infoProperties" in pvInfoByName[pv]: newProps = newProps + pvInfoByName[pv]["infoProperties"] if pv in existingChannels: """update existing channel: exists but with a different hostName and/or iocName""" existingChannel = existingChannels[pv] existingChannel["properties"] = __merge_property_lists( newProps, existingChannel["properties"]) channels.append(existingChannel) _log.debug("Add existing channel with different IOC: %s", channels[-1]) """in case, alias exists, update their properties too""" if (conf.get('alias', 'default') == 'on'): if pv in pvInfoByName and "aliases" in pvInfoByName[pv]: alProps = [{ u'name': 'alias', u'owner': owner, u'value': pv }] for p in newProps: alProps.append(p) for a in pvInfoByName[pv]["aliases"]: if a in existingChannels: ach = existingChannels[a] ach["properties"] = __merge_property_lists( alProps, ach["properties"]) channels.append(ach) else: channels.append({ u'name': a, u'owner': owner, u'properties': alProps }) _log.debug("Add existing alias with different IOC: %s", channels[-1]) else: """New channel""" channels.append({ u'name': pv, u'owner': owner, u'properties': newProps }) _log.debug("Add new channel: %s", channels[-1]) if (conf.get('alias', 'default') == 'on'): if pv in pvInfoByName and "aliases" in pvInfoByName[pv]: alProps = [{ u'name': 'alias', u'owner': owner, u'value': pv }] for p in newProps: alProps.append(p) for a in pvInfoByName[pv]["aliases"]: channels.append({ u'name': a, u'owner': owner, u'properties': alProps }) _log.debug("Add new alias: %s", channels[-1]) _log.info("Total channels to update: %s", len(channels)) if len(channels) != 0: client.set(channels=channels) else: if old and len(old) != 0: client.set(channels=channels) if proc.cancelled: raise defer.CancelledError()
def _commitWithThread(self, TR): if not self.running: raise defer.CancelledError( 'CF Processor is not running (TR: %s:%s)', TR.src.host, TR.src.port) _log.info("CF_COMMIT: %s", TR) """ a dictionary with a list of records with their associated property info pvInfo {rid: { "pvName":"recordName", "infoProperties":{propName:value, ...}}} """ host = TR.src.host port = TR.src.port iocName = TR.infos.get('IOCNAME') or TR.src.port hostName = TR.infos.get('HOSTNAME') or TR.src.host owner = TR.infos.get('ENGINEER') or TR.infos.get( 'CF_USERNAME') or self.conf.get('username', 'cfstore') time = self.currentTime() """The unique identifier for a particular IOC""" iocid = host + ":" + str(port) pvInfo = {} for rid, (rname, rtype) in TR.addrec.items(): pvInfo[rid] = {"pvName": rname} for rid, (recinfos) in TR.recinfos.items(): # find intersection of these sets if rid not in pvInfo: _log.warn('IOC: %s: PV not found for recinfo with RID: %s', iocid, rid) continue recinfo_wl = [p for p in self.whitelist if p in recinfos.keys()] if recinfo_wl: pvInfo[rid]['infoProperties'] = list() for infotag in recinfo_wl: property = { u'name': infotag, u'owner': owner, u'value': recinfos[infotag] } pvInfo[rid]['infoProperties'].append(property) for rid, alias in TR.aliases.items(): if rid not in pvInfo: _log.warn('IOC: %s: PV not found for alias with RID: %s', iocid, rid) continue pvInfo[rid]['aliases'] = alias delrec = list(TR.delrec) _log.debug("Delete records: %s", delrec) pvInfoByName = {} for rid, (info) in pvInfo.items(): if info["pvName"] in pvInfoByName: _log.warn( "Commit contains multiple records with PV name: %s (%s)", pv, iocid) continue pvInfoByName[info["pvName"]] = info _log.debug("Add record: %s: %s", rid, info) if TR.initial: """Add IOC to source list """ self.iocs[iocid] = { "iocname": iocName, "hostname": hostName, "owner": owner, "time": time, "channelcount": 0 } if not TR.connected: delrec.extend(self.channel_dict.keys()) for pv in pvInfoByName.keys(): self.channel_dict[pv].append( iocid) # add iocname to pvName in dict self.iocs[iocid]["channelcount"] += 1 """In case, alias exists""" if (self.conf.get('alias', 'default' == 'on')): if pv in pvInfoByName and "aliases" in pvInfoByName[pv]: for a in pvInfoByName[pv]["aliases"]: self.channel_dict[a].append( iocid) # add iocname to pvName in dict self.iocs[iocid]["channelcount"] += 1 for pv in delrec: if iocid in self.channel_dict[pv]: self.channel_dict[pv].remove(iocid) if iocid in self.iocs: self.iocs[iocid]["channelcount"] -= 1 if self.iocs[iocid]['channelcount'] == 0: self.iocs.pop(iocid, None) elif self.iocs[iocid]['channelcount'] < 0: _log.error("Channel count negative: %s", iocid) if len(self.channel_dict[pv] ) <= 0: # case: channel has no more iocs del self.channel_dict[pv] """In case, alias exists""" if (self.conf.get('alias', 'default' == 'on')): if pv in pvInfoByName and "aliases" in pvInfoByName[pv]: for a in pvInfoByName[pv]["aliases"]: self.channel_dict[a].remove(iocid) if iocid in self.iocs: self.iocs[iocid]["channelcount"] -= 1 if self.iocs[iocid]['channelcount'] == 0: self.iocs.pop(iocid, None) elif self.iocs[iocid]['channelcount'] < 0: _log.error("Channel count negative: %s", iocid) if len(self.channel_dict[a] ) <= 0: # case: channel has no more iocs del self.channel_dict[a] poll(__updateCF__, self, pvInfoByName, delrec, hostName, iocName, iocid, owner, time) dict_to_file(self.channel_dict, self.iocs, self.conf)
def chainResult(_ignored): if self.cancelled: raise defer.CancelledError() else: d.callback(None)
def _cancelWaitForEmptyPool(self, deferred): self._waitingForEmptyPool.discard(deferred) deferred.errback(defer.CancelledError())
def testFetchFailedCancelled(self): testFailure = failure.Failure(defer.CancelledError()) with mock.patch.object(self.monitor, '_resultDown') as mock_resultDown: r = self.monitor._fetchFailed(testFailure) self.assertIsNone(r) mock_resultDown.assert_not_called()
def stop(): consumer_start_d.errback(defer.CancelledError())
def _handleCancellation(self, f): f.trap(psycopg2.extensions.QueryCanceledError) return failure.Failure(defer.CancelledError())
def stopToCancelled(err): if err.check(task.TaskStopped): raise defer.CancelledError() return err