def queryNodes(self, indexes=None, apimodule=False, options=False): if not (self.mainWindow.tree.selectedCount() or self.mainWindow.allnodesCheckbox.isChecked() or (indexes is not None)): return (False) #Show progress window progress = ProgressBar("Fetching Data", parent=self.mainWindow) try: #Get global options globaloptions = {} globaloptions['threads'] = self.mainWindow.threadsEdit.value() globaloptions['speed'] = self.mainWindow.speedEdit.value() globaloptions['errors'] = self.mainWindow.errorEdit.value() globaloptions[ 'expand'] = self.mainWindow.autoexpandCheckbox.isChecked() globaloptions[ 'logrequests'] = self.mainWindow.logCheckbox.isChecked() globaloptions[ 'saveheaders'] = self.mainWindow.headersCheckbox.isChecked() globaloptions[ 'allnodes'] = self.mainWindow.allnodesCheckbox.isChecked() objecttypes = self.mainWindow.typesEdit.text().replace( ' ', '').split(',') level = self.mainWindow.levelEdit.value() - 1 #Get selected nodes if indexes is None: select_all = globaloptions['allnodes'] select_filter = {'level': level, 'objecttype': objecttypes} indexes = self.mainWindow.tree.selectedIndexesAndChildren( False, select_filter, select_all) elif isinstance(indexes, list): indexes = iter(indexes) # if (len(indexes) == 0): # return (False) #Update progress window #self.mainWindow.logmessage("Start fetching data for {} node(s).".format(len(indexes))) self.mainWindow.logmessage("Start fetching data.") totalnodes = 0 hasindexes = True progress.setMaximum(totalnodes) self.mainWindow.tree.treemodel.nodecounter = 0 #Init status messages statuscount = {} errorcount = 0 ratelimitcount = 0 allowedstatus = [ 'fetched (200)', 'downloaded (200)', 'fetched (202)', 'stream' ] #,'error (400)' if apimodule == False: apimodule = self.mainWindow.RequestTabs.currentWidget() if options == False: options = apimodule.getOptions() options.update(globaloptions) try: #Spawn Threadpool threadpool = ApiThreadPool(apimodule) threadpool.spawnThreads(options.get("threads", 1)) #Init input Queue #indexes = deque(list(indexes)) #Process Logging/Input/Output Queue while True: try: #Logging (sync logs in threads with main thread) msg = threadpool.getLogMessage() if msg is not None: self.mainWindow.logmessage(msg) # Jobs in: packages of 100 at a time jobsin = 0 while (hasindexes and (jobsin < 100)): index = next(indexes, False) if index: jobsin += 1 totalnodes += 1 if index.isValid(): treenode = index.internalPointer() job = { 'nodeindex': index, 'nodedata': deepcopy(treenode.data), 'options': deepcopy(options) } threadpool.addJob(job) else: threadpool.applyJobs() progress.setRemaining(threadpool.getJobCount()) progress.resetRate() hasindexes = False self.mainWindow.logmessage( "Added {} node(s) to queue.".format( totalnodes)) if jobsin > 0: progress.setMaximum(totalnodes) #Jobs out job = threadpool.getJob() #-Finished all nodes (sentinel)... if job is None: break #-Finished one node... elif 'progress' in job: progresskey = 'nodeprogress' + str( job.get('threadnumber', '')) # Update single progress if 'current' in job: percent = int((job.get('current', 0) * 100.0 / job.get('total', 1))) progress.showInfo( progresskey, "{}% of current node processed.".format( percent)) elif 'page' in job: if job.get('page', 0) > 1: progress.showInfo( progresskey, "{} page(s) of current node processed." .format(job.get('page', 0))) # Update total progress else: progress.removeInfo(progresskey) if not threadpool.suspended: progress.step() #-Add data... elif 'data' in job and (not progress.wasCanceled): if not job['nodeindex'].isValid(): continue # Add data treeindex = job['nodeindex'] treenode = treeindex.internalPointer() treenode.appendNodes(job['data'], job['options'], job['headers'], True) if options.get('expand', False): self.mainWindow.tree.setExpanded( treeindex, True) # Count status status = job['options'].get('querystatus', 'empty') if not status in statuscount: statuscount[status] = 1 else: statuscount[status] = statuscount[status] + 1 # Collect errors for automatic retry if not status in allowedstatus: threadpool.addError(job) errorcount += 1 # Detect rate limit ratelimit = job['options'].get('ratelimit', False) if ratelimit: ratelimitcount += 1 # Clear errors if not threadpool.suspended and ( status in allowedstatus) and not ratelimit: threadpool.clearRetry() errorcount = 0 ratelimitcount = 0 # Suspend on error elif (errorcount > (globaloptions['errors'] - 1)) or ( ratelimitcount > 0): threadpool.suspendJobs() if ratelimit: msg = "You reached the rate limit of the API." else: msg = "{} consecutive errors occurred.\nPlease check your settings.".format( errorcount) timeout = 60 * 5 #5 minutes # Adjust progress progress.setRemaining( threadpool.getJobCount() + threadpool.getRetryCount()) progress.showError(msg, timeout, ratelimitcount > 0) self.mainWindow.tree.treemodel.commitNewNodes() # Show info progress.showInfo( status, "{} response(s) with status: {}".format( statuscount[status], status)) progress.showInfo( 'newnodes', "{} new node(s) created".format( self.mainWindow.tree.treemodel.nodecounter) ) progress.showInfo( 'threads', "{} active thread(s)".format( threadpool.getThreadCount())) progress.setRemaining(threadpool.getJobCount()) # Custom info from modules info = job['options'].get('info', {}) for name, value in info.items(): progress.showInfo(name, value) # Abort elif progress.wasCanceled: progress.showInfo( 'cancel', "Disconnecting from stream, may take some time." ) threadpool.stopJobs() # Retry elif progress.wasResumed: if progress.wasRetried: threadpool.retryJobs() else: threadpool.clearRetry() errorcount = 0 ratelimitcount = 0 threadpool.resumeJobs() progress.setRemaining(threadpool.getJobCount()) progress.hideError() # Continue elif not threadpool.suspended: threadpool.resumeJobs() # Finished if not threadpool.hasJobs(): progress.showInfo( 'cancel', "Work finished, shutting down threads.") threadpool.stopJobs() #-Waiting... progress.computeRate() time.sleep(1.0 / 1000.0) finally: QApplication.processEvents() finally: request_summary = [ str(val) + " x " + key for key, val in statuscount.items() ] request_summary = ", ".join(request_summary) request_end = "Fetching completed" if not progress.wasCanceled else 'Fetching cancelled by user' self.mainWindow.logmessage( "{}, {} new node(s) created. Summary of responses: {}.". format(request_end, self.mainWindow.tree.treemodel.nodecounter, request_summary)) self.mainWindow.tree.treemodel.commitNewNodes() finally: progress.close()
def queryNodes(self, indexes=False, apimodule=False, options=False): #Show progress window progress = ProgressBar(u"Fetching Data", parent=self.mainWindow) #Get global options globaloptions = {} globaloptions['threads'] = self.mainWindow.threadsEdit.value() globaloptions['speed'] = self.mainWindow.speedEdit.value() objecttypes = self.mainWindow.typesEdit.text().replace(' ', '').split(',') #Get selected nodes if indexes == False: level = self.mainWindow.levelEdit.value() - 1 indexes = self.mainWindow.tree.selectedIndexesAndChildren( False, { 'level': level, 'objecttype': objecttypes }) #Update progress window progress.setMaximum(len(indexes)) self.mainWindow.tree.treemodel.nodecounter = 0 if apimodule == False: apimodule = self.mainWindow.RequestTabs.currentWidget() if options == False: options = apimodule.getOptions() options.update(globaloptions) try: #Spawn Threadpool threadpool = ApiThreadPool(apimodule, self.mainWindow.logmessage) #Init status messages statuscount = {} errorcount = 0 #Fill Input Queue number = 0 for index in indexes: number += 1 if not index.isValid(): continue treenode = index.internalPointer() job = { 'number': number, 'nodeindex': index, 'data': deepcopy(treenode.data), 'options': deepcopy(options) } threadpool.addJob(job) threadpool.processJobs(options.get("threads", None)) #Process Output Queue while True: try: job = threadpool.getJob() #-Finished all nodes... if job is None: break #-Waiting... elif 'waiting' in job: time.sleep(0) #-Finished one node... elif 'progress' in job: #Update progress progress.step() #-Add data... else: if not job['nodeindex'].isValid(): continue #add data treenode = job['nodeindex'].internalPointer() treenode.appendNodes(job['data'], job['options'], job['headers'], True) #show status status = job['options'].get('querystatus', 'empty') count = 1 if not status in statuscount else statuscount[ status] + 1 statuscount[status] = count progress.showInfo( status, u"{} response(s) with status: {}".format( count, status)) progress.showInfo( 'newnodes', u"{} new node(s) created".format( self.mainWindow.tree.treemodel.nodecounter)) progress.showInfo( 'threads', u"{} active thread(s)".format( threadpool.getThreadCount())) #auto cancel after three consecutive errors, ignore on streaming-tab if (status == 'fetched (200)') or ( status == 'stream') or (status == 'downloaded (200)'): errorcount = 0 else: errorcount += 1 if errorcount > 2: self.mainWindow.logmessage( u"Automatically canceled because of three consecutive errors." ) progress.cancel() #Abort if progress.wasCanceled: progress.showInfo( 'cancel', u"Disconnecting from stream may take up to one minute." ) threadpool.stopJobs() #break finally: QApplication.processEvents() finally: self.mainWindow.tree.treemodel.commitNewNodes() progress.close()
def queryNodes(self, indexes=False, apimodule=False, options=False): #Show progress window progress = ProgressBar(u"Fetching Data",parent=self.mainWindow) #Get global options globaloptions = {} globaloptions['threads'] = self.mainWindow.threadsEdit.value() globaloptions['speed'] = self.mainWindow.speedEdit.value() objecttypes = self.mainWindow.typesEdit.text().replace(' ','').split(',') #Get selected nodes if indexes == False: level = self.mainWindow.levelEdit.value() - 1 indexes = self.mainWindow.tree.selectedIndexesAndChildren(False, {'level': level, 'objecttype':objecttypes}) #Update progress window progress.setMaximum(len(indexes)) self.mainWindow.tree.treemodel.nodecounter = 0 if apimodule == False: apimodule = self.mainWindow.RequestTabs.currentWidget() if options == False: options = apimodule.getOptions() options.update(globaloptions) try: #Spawn Threadpool threadpool = ApiThreadPool(apimodule,self.mainWindow.logmessage) #Init status messages statuscount = {} errorcount = 0 #Fill Input Queue number = 0 for index in indexes: number += 1 if not index.isValid(): continue treenode = index.internalPointer() job = {'number': number, 'nodeindex': index, 'data': deepcopy(treenode.data), 'options': deepcopy(options)} threadpool.addJob(job) threadpool.processJobs(options.get("threads",None)) #Process Output Queue while True: try: job = threadpool.getJob() #-Finished all nodes... if job is None: break #-Waiting... elif 'waiting' in job: time.sleep(0) #-Finished one node... elif 'progress' in job: #Update progress progress.step() #-Add data... else: if not job['nodeindex'].isValid(): continue #add data treenode = job['nodeindex'].internalPointer() treenode.appendNodes(job['data'], job['options'], job['headers'], True) #show status status = job['options'].get('querystatus','empty') count = 1 if not status in statuscount else statuscount[status]+1 statuscount[status] = count progress.showInfo(status,u"{} response(s) with status: {}".format(count,status)) progress.showInfo('newnodes',u"{} new node(s) created".format(self.mainWindow.tree.treemodel.nodecounter)) progress.showInfo('threads',u"{} active thread(s)".format(threadpool.getThreadCount())) #auto cancel after three consecutive errors, ignore on streaming-tab if (status == 'fetched (200)') or (status == 'stream') or (status == 'downloaded (200)'): errorcount=0 else: errorcount += 1 if errorcount > 2: self.mainWindow.logmessage(u"Automatically canceled because of three consecutive errors.") progress.cancel() #Abort if progress.wasCanceled: progress.showInfo('cancel',u"Disconnecting from stream may take up to one minute.") threadpool.stopJobs() #break finally: QApplication.processEvents() finally: self.mainWindow.tree.treemodel.commitNewNodes() progress.close()
def queryNodes(self, indexes=False, apimodule=False, options=False): if not self.actionQuery.isEnabled() or not ( (self.mainWindow.tree.selectedCount > 0) or (indexes != False)): return (False) #Show progress window progress = ProgressBar(u"Fetching Data", parent=self.mainWindow) #Get global options globaloptions = {} globaloptions['threads'] = self.mainWindow.threadsEdit.value() globaloptions['speed'] = self.mainWindow.speedEdit.value() globaloptions['errors'] = self.mainWindow.errorEdit.value() globaloptions['logrequests'] = self.mainWindow.logCheckbox.isChecked() objecttypes = self.mainWindow.typesEdit.text().replace(' ', '').split(',') level = self.mainWindow.levelEdit.value() - 1 #Get selected nodes if indexes == False: indexes = self.mainWindow.tree.selectedIndexesAndChildren( False, { 'level': level, 'objecttype': objecttypes }) #Update progress window self.mainWindow.logmessage( u"Start fetching data for {} node(s).".format(len(indexes))) progress.setMaximum(len(indexes)) self.mainWindow.tree.treemodel.nodecounter = 0 #Init status messages statuscount = {} errorcount = 0 laststatus = None laststatuscount = 0 allowedstatus = ['fetched (200)', 'downloaded (200)', 'stream'] #,'error (400)' if apimodule == False: apimodule = self.mainWindow.RequestTabs.currentWidget() if options == False: options = apimodule.getOptions() options.update(globaloptions) try: #Spawn Threadpool threadpool = ApiThreadPool(apimodule) #Fill Input Queue indexes = deque(indexes) # for index in indexes: # number += 1 # if not index.isValid(): # continue # # treenode = index.internalPointer() # job = {'number': number, 'nodeindex': index, 'data': deepcopy(treenode.data), # 'options': deepcopy(options)} # threadpool.addJob(job) threadpool.processJobs(options.get("threads", None)) #Process Input/Output Queue while True: try: #Logging (sync logs in threads with main thread) msg = threadpool.getLogMessage() if msg is not None: self.mainWindow.logmessage(msg) #Jobs in if (len(indexes) > 0): index = indexes.popleft() if index.isValid(): treenode = index.internalPointer() job = { 'nodeindex': index, 'data': deepcopy(treenode.data), 'options': deepcopy(options) } threadpool.addJob(job) if len(indexes) == 0: threadpool.closeJobs() progress.showInfo( 'remainingnodes', u"{} node(s) remaining.".format( threadpool.getJobCount())) #Jobs out job = threadpool.getJob() #-Finished all nodes... if job is None: break #-Waiting... elif 'waiting' in job: time.sleep(0) #-Finished one node... elif 'progress' in job: #Update progress progress.step() #-Add data... elif not progress.wasCanceled: if not job['nodeindex'].isValid(): continue #add data treenode = job['nodeindex'].internalPointer() treenode.appendNodes(job['data'], job['options'], job['headers'], True) #show status status = job['options'].get('querystatus', 'empty') count = 1 if not status in statuscount else statuscount[ status] + 1 statuscount[status] = count progress.showInfo( status, u"{} response(s) with status: {}".format( count, status)) progress.showInfo( 'newnodes', u"{} new node(s) created".format( self.mainWindow.tree.treemodel.nodecounter)) progress.showInfo( 'threads', u"{} active thread(s)".format( threadpool.getThreadCount())) progress.showInfo( 'remainingnodes', u"{} node(s) remaining.".format( threadpool.getJobCount())) #auto cancel after three consecutive errors if (status != laststatus): laststatus = status laststatuscount = 1 else: laststatuscount += 1 if not (laststatus in allowedstatus) and ( (laststatuscount > (globaloptions['errors'] - 1)) or (laststatus == "rate limit (400)")): threadpool.suspendJobs() if laststatus == "rate limit (400)": msg = "You reached the rate limit of the API. You are strongly advised to calm down and retry later." timeout = 60 * 10 #10 minutes else: msg = "Something is wrong. {} consecutive errors occurred. You are strongly advised to check your settings.".format( laststatuscount) timeout = 60 #1 minute if RetryDialog.doContinue( msg, timeout, self.mainWindow) == QDialog.Accepted: laststatuscount = 1 threadpool.resumeJobs() else: self.mainWindow.logmessage( u"Canceled because of {} consecutive errors or rate limit." .format(laststatuscount)) progress.cancel() #Abort if progress.wasCanceled: progress.showInfo( 'cancel', u"Disconnecting from stream may take up to one minute." ) threadpool.stopJobs() #break finally: QApplication.processEvents() finally: request_summary = [ str(val) + " x " + key for key, val in statuscount.iteritems() ] request_summary = ", ".join(request_summary) self.mainWindow.logmessage( u"Fetching completed, {} new node(s) created. Summary of responses: {}." .format(self.mainWindow.tree.treemodel.nodecounter, request_summary)) self.mainWindow.tree.treemodel.commitNewNodes() progress.close()
def queryNodes(self, indexes=None, apimodule=False, options=None): if not (self.mainWindow.tree.selectedCount() or self.mainWindow.allnodesCheckbox.isChecked() or (indexes is not None)): return False #Show progress window progress = ProgressBar("Fetching Data", parent=self.mainWindow) try: apimodule, options = self.getQueryOptions(apimodule, options) indexes = self.getIndexes(options, indexes, progress) # Update progress window self.mainWindow.logmessage("Start fetching data.") totalnodes = 0 hasindexes = True progress.setMaximum(totalnodes) self.mainWindow.tree.treemodel.nodecounter = 0 #Init status messages statuscount = defaultdict(int) errorcount = 0 ratelimitcount = 0 allowedstatus = [ 'fetched (200)', 'downloaded (200)', 'fetched (202)' ] try: #Spawn Threadpool threadpool = ApiThreadPool(apimodule) threadpool.spawnThreads(options.get("threads", 1)) #Process Logging/Input/Output Queue while True: try: #Logging (sync logs in threads with main thread) msg = threadpool.getLogMessage() if msg is not None: self.mainWindow.logmessage(msg) # Jobs in: packages of 100 at a time jobsin = 0 while hasindexes and (jobsin < 100): index = next(indexes, False) if index: jobsin += 1 totalnodes += 1 if index.isValid(): job = self.prepareJob(index, options) threadpool.addJob(job) else: threadpool.applyJobs() progress.setRemaining(threadpool.getJobCount()) progress.resetRate() hasindexes = False progress.removeInfo('input') self.mainWindow.logmessage( "Added {} node(s) to queue.".format( totalnodes)) if jobsin > 0: progress.setMaximum(totalnodes) #Jobs out job = threadpool.getJob() #-Finished all nodes (sentinel)... if job is None: break #-Finished one node... elif 'progress' in job: progresskey = 'nodeprogress' + str( job.get('threadnumber', '')) # Update single progress if 'current' in job: percent = int((job.get('current', 0) * 100.0 / job.get('total', 1))) progress.showInfo( progresskey, "{}% of current node processed.".format( percent)) elif 'page' in job: if job.get('page', 0) > 1: progress.showInfo( progresskey, "{} page(s) of current node processed." .format(job.get('page', 0))) # Update total progress else: progress.removeInfo(progresskey) if not threadpool.suspended: progress.step() #-Add data... elif 'data' in job and (not progress.wasCanceled): if not job['nodeindex'].isValid(): continue # Add data treeindex = job['nodeindex'] treenode = treeindex.internalPointer() newcount = treenode.appendNodes( job['data'], job['options'], True) if options.get('expand', False): self.mainWindow.tree.setExpanded( treeindex, True) # Count status and errors status = job['options'].get('querystatus', 'empty') statuscount[status] += 1 errorcount += int(not status in allowedstatus) # Detect rate limit ratelimit = job['options'].get('ratelimit', False) #ratelimit = ratelimit or (not newcount) ratelimitcount += int(ratelimit) autoretry = (ratelimitcount) or ( status == "request error") # Clear errors when everything is ok if not threadpool.suspended and ( status in allowedstatus) and (not ratelimit): #threadpool.clearRetry() errorcount = 0 ratelimitcount = 0 # Suspend on error or ratelimit elif (errorcount >= options['errors']) or (ratelimitcount > 0): threadpool.suspendJobs() if ratelimit: msg = "You reached the rate limit of the API." else: msg = "{} consecutive errors occurred.\nPlease check your settings.".format( errorcount) timeout = 60 * 5 # 5 minutes # Adjust progress progress.showError(msg, timeout, autoretry) self.mainWindow.tree.treemodel.commitNewNodes() # Add job for retry if not status in allowedstatus: threadpool.addError(job) # Show info progress.showInfo( status, "{} response(s) with status: {}".format( statuscount[status], status)) progress.showInfo( 'newnodes', "{} new node(s) created".format( self.mainWindow.tree.treemodel.nodecounter) ) progress.showInfo( 'threads', "{} active thread(s)".format( threadpool.getThreadCount())) progress.setRemaining(threadpool.getJobCount()) # Custom info from modules info = job['options'].get('info', {}) for name, value in info.items(): progress.showInfo(name, value) # Abort elif progress.wasCanceled: progress.showInfo( 'cancel', "Disconnecting from stream, may take some time." ) threadpool.stopJobs() # Retry elif progress.wasResumed: if progress.wasRetried: threadpool.retryJobs() else: threadpool.clearRetry() # errorcount = 0 # ratelimitcount = 0 threadpool.resumeJobs() progress.setRemaining(threadpool.getJobCount()) progress.hideError() # Continue elif not threadpool.suspended: threadpool.resumeJobs() # Finished with pending errors if not threadpool.hasJobs( ) and threadpool.hasErrorJobs(): msg = "All nodes finished but you have {} pending errors. Skip or retry?".format( threadpool.getErrorJobsCount()) autoretry = False timeout = 60 * 5 # 5 minutes progress.showError(msg, timeout, autoretry) # Finished if not threadpool.hasJobs(): progress.showInfo( 'cancel', "Work finished, shutting down threads.") threadpool.stopJobs() #-Waiting... progress.computeRate() time.sleep(1.0 / 1000.0) finally: QApplication.processEvents() finally: request_summary = [ str(val) + " x " + key for key, val in statuscount.items() ] request_summary = ", ".join(request_summary) request_end = "Fetching completed" if not progress.wasCanceled else 'Fetching cancelled by user' self.mainWindow.logmessage( "{}, {} new node(s) created. Summary of responses: {}.". format(request_end, self.mainWindow.tree.treemodel.nodecounter, request_summary)) self.mainWindow.tree.treemodel.commitNewNodes() except Exception as e: self.mainWindow.logmessage( "Error in scheduler, fetching aborted: {}.".format(str(e))) finally: progress.close() return not progress.wasCanceled
def queryNodes(self, indexes=None, apimodule=False, options=False): if not self.actionQuery.isEnabled() or not ((self.mainWindow.tree.selectedCount() > 0) or (indexes is not None)): return (False) #Show progress window progress = ProgressBar("Fetching Data", parent=self.mainWindow) try: #Get global options globaloptions = {} globaloptions['threads'] = self.mainWindow.threadsEdit.value() globaloptions['speed'] = self.mainWindow.speedEdit.value() globaloptions['errors'] = self.mainWindow.errorEdit.value() globaloptions['expand'] = self.mainWindow.autoexpandCheckbox.isChecked() globaloptions['logrequests'] = self.mainWindow.logCheckbox.isChecked() globaloptions['saveheaders'] = self.mainWindow.headersCheckbox.isChecked() objecttypes = self.mainWindow.typesEdit.text().replace(' ','').split(',') level = self.mainWindow.levelEdit.value() - 1 #Get selected nodes if indexes is None: indexes = self.mainWindow.tree.selectedIndexesAndChildren(False, {'level': level, 'objecttype':objecttypes}) if (len(indexes) == 0): return (False) #Update progress window self.mainWindow.logmessage("Start fetching data for {} node(s).".format(len(indexes))) progress.setMaximum(len(indexes)) self.mainWindow.tree.treemodel.nodecounter = 0 #Init status messages statuscount = {} errorcount = 0 ratelimitcount = 0 allowedstatus = ['fetched (200)','downloaded (200)','fetched (202)','stream'] #,'error (400)' if apimodule == False: apimodule = self.mainWindow.RequestTabs.currentWidget() if options == False: options = apimodule.getOptions() options.update(globaloptions) try: #Spawn Threadpool threadpool = ApiThreadPool(apimodule) threadpool.spawnThreads(options.get("threads", 1)) #Init input Queue indexes = deque(indexes) #Process Logging/Input/Output Queue while True: try: #Logging (sync logs in threads with main thread) msg = threadpool.getLogMessage() if msg is not None: self.mainWindow.logmessage(msg) #Jobs in if (len(indexes) > 0): index = indexes.popleft() if index.isValid(): treenode = index.internalPointer() job = {'nodeindex': index, 'nodedata': deepcopy(treenode.data), 'options': deepcopy(options)} threadpool.addJob(job) if len(indexes) == 0: threadpool.applyJobs() progress.setRemaining(threadpool.getJobCount()) progress.resetRate() #Jobs out job = threadpool.getJob() #-Finished all nodes (sentinel)... if job is None: break #-Finished one node... elif 'progress' in job: progresskey = 'nodeprogress' + str(job.get('threadnumber', '')) # Update single progress if 'current' in job: percent = int((job.get('current',0) * 100.0 / job.get('total',1))) progress.showInfo(progresskey, "{}% of current node processed.".format(percent)) elif 'page' in job: if job.get('page', 0) > 1: progress.showInfo(progresskey, "{} page(s) of current node processed.".format(job.get('page',0))) # Update total progress else: progress.removeInfo(progresskey) if not threadpool.suspended: progress.step() #-Add data... elif 'data' in job and (not progress.wasCanceled): if not job['nodeindex'].isValid(): continue # Add data treeindex = job['nodeindex'] treenode = treeindex.internalPointer() treenode.appendNodes(job['data'], job['options'], job['headers'], True) if options.get('expand',False): self.mainWindow.tree.setExpanded(treeindex,True) # Count status status = job['options'].get('querystatus', 'empty') if not status in statuscount: statuscount[status] = 1 else: statuscount[status] = statuscount[status]+1 # Collect errors for automatic retry if not status in allowedstatus: threadpool.addError(job) errorcount += 1 # Detect rate limit ratelimit = job['options'].get('ratelimit', False) if ratelimit: ratelimitcount += 1 # Clear errors if not threadpool.suspended and (status in allowedstatus) and not ratelimit: threadpool.clearRetry() errorcount = 0 ratelimitcount = 0 # Suspend on error elif (errorcount > (globaloptions['errors']-1)) or (ratelimitcount > 0): threadpool.suspendJobs() if ratelimit: msg = "You reached the rate limit of the API." else: msg = "{} consecutive errors occurred.\nPlease check your settings.".format(errorcount) timeout = 60 * 5 #5 minutes # Adjust progress progress.setRemaining(threadpool.getJobCount() + threadpool.getRetryCount()) progress.showError(msg, timeout, ratelimitcount > 0) self.mainWindow.tree.treemodel.commitNewNodes() # Show info progress.showInfo(status,"{} response(s) with status: {}".format(statuscount[status],status)) progress.showInfo('newnodes',"{} new node(s) created".format(self.mainWindow.tree.treemodel.nodecounter)) progress.showInfo('threads',"{} active thread(s)".format(threadpool.getThreadCount())) progress.setRemaining(threadpool.getJobCount()) # Custom info from modules info = job['options'].get('info', {}) for name, value in info.items(): progress.showInfo(name, value) # Abort elif progress.wasCanceled: progress.showInfo('cancel', "Disconnecting from stream, may take some time.") threadpool.stopJobs() # Retry elif progress.wasResumed: if progress.wasRetried: threadpool.retryJobs() else: threadpool.clearRetry() threadpool.resumeJobs() progress.setRemaining(threadpool.getJobCount()) progress.hideError() # Continue elif not threadpool.suspended: threadpool.resumeJobs() # Finished if not threadpool.hasJobs(): progress.showInfo('cancel', "Work finished, shutting down threads.") threadpool.stopJobs() #-Waiting... progress.computeRate() time.sleep(1.0 / 1000.0) finally: QApplication.processEvents() finally: request_summary = [str(val)+" x "+key for key,val in statuscount.items()] request_summary = ", ".join(request_summary) request_end = "Fetching completed" if not progress.wasCanceled else 'Fetching cancelled by user' self.mainWindow.logmessage("{}, {} new node(s) created. Summary of responses: {}.".format(request_end, self.mainWindow.tree.treemodel.nodecounter,request_summary)) self.mainWindow.tree.treemodel.commitNewNodes() finally: progress.close()