Пример #1
0
def get():
    transports = [_tor_getter.get()]
    for file in files:
        try:
            with open(file, 'r') as transport_file:
                transports.append(transport_file.read().strip())
        except FileNotFoundError:
            pass
        else:
            break
    else:
        time.sleep(1)
    return list(transports)
Пример #2
0
def __retry_internal(
    f, exceptions=Exception, tries=-1, delay=0, max_delay=None, backoff=1, jitter=0,
):
    """
    Executes a function and retries it if it failed.
    :param f: the function to execute.
    :param exceptions: an exception or a tuple of exceptions to catch. default: Exception.
    :param tries: the maximum number of attempts. default: -1 (infinite).
    :param delay: initial delay between attempts. default: 0.
    :param max_delay: the maximum value of delay. default: None (no limit).
    :param backoff: multiplier applied to delay between attempts. default: 1 (no backoff).
    :param jitter: extra seconds added to delay between attempts. default: 0.
                   fixed if a number, random if a range tuple (min, max)
    :param logger: logger.warning(fmt, error, delay) will be called on failed attempts.
                   default: retry.logging_logger. if None, logging is disabled.
    :returns: the result of the f function.
    """
    _tries, _delay = tries, delay
    while _tries:
        try:
            return f()
        except exceptions as e:
            _tries -= 1
            if not _tries:
                raise

            if logger is not None:
                logger.warning("%s, retrying in %s seconds...", e, _delay)

            time.sleep(_delay)
            _delay *= backoff

            if isinstance(jitter, tuple):
                _delay += random.uniform(*jitter)
            else:
                _delay += jitter

            if max_delay is not None:
                _delay = min(_delay, max_delay)
Пример #3
0
 def iter_galleries(self, start=0, end=None, delay=None, **kwargs):
     i = start
     o = None
     while True:
         resp = self.get_gallery(page=i, **kwargs)
         if len(resp) == 0:
             break
         o2 = json.dumps(resp)
         if o == o2:
             warn("Page: %i and %i return the same results" % (i - 1, i))
             break
         debug("Points is %i" % resp[0]['points'])
         debug("Score is %i" % resp[0]['score'])
         debug("Ups is %i" % resp[0]['ups'])
         #if min_points and resp[0]['points'] < min_points:
         #    info("Stop at page %i because points is less than %i" % (i, min_points))
         #    break
         for d in resp:
             yield d
         i += 1
         if end and i > end:
             break
         if delay:
             sleep(delay)
Пример #4
0
def update_stats_on_main():
	errors = 0
	sleep(60) # initial sleep
	while(is_server_running()):
		try:
			req = get_data("http://" + main_server_addr + "/update_server_stats?auth_key=" + get_internal_auth_key().decode() + "&server_addr=" + self_server_addr, post=json.dumps({"load": 0.0}))
			if(not req or json.loads(req.read())["type"] != TYPE_OK):
				errors += 1
			sleep(5 * 60) # 5 minutes sleep
		except Exception as ex:
			LOG_WARN(LOG_TYPE_EXCEPTION, ex="could not update on main server")
			sleep(1 * 60) # 1 minutes sleep
Пример #5
0
    def test_persist_timer(self):
        self.i = 0

        def add_1(args):
            self.i += 1

        gt = GTimer(only_once=False)
        gt.start(add_1, 0.1, 2)
        gt.start(add_1, 0.1, 2)
        gt.start(add_1, 0.1, 2)
        gt.start(add_1, 0.1, 2)
        time.sleep(0.3)
        self.assertTrue(self.i >= 3 and self.i <= 4)
        gt.stop()
        time.sleep(0.3)
        self.assertTrue(self.i >= 3 and self.i <= 4)
        gt.start(add_1, 0.1, 2)
        time.sleep(0.3)
        self.assertTrue(self.i >= 6 and self.i <= 8)
Пример #6
0
 def _request(self, endpoint, retries=5, delay=30, ignore_credits=False):
     if not ignore_credits:
         api_credits = self.credits
         if api_credits['UserRemaining'] <= 10 and api_credits[
                 'UserReset'] - time.time() > -60:  # just to be sure
             s = api_credits['UserReset'] - time.time() + 60
             warn("Waiting %i seconds till UserRemaining credits are reset."
                  % (s))
             sleep(s)
             return self._request(endpoint, retries, delay, ignore_credits)
         while api_credits['ClientRemaining'] <= 20:
             warn("Waiting an hour till ClientRemaining credits are reset.")
             sleep(60 * 60)
             api_credits = self.get_credits()
     for i in range(retries):
         debug("Requesting '%s'. Retry: %i" % (endpoint, i))
         if self.credits: debug("Credits: %s" % self.credits)
         if i > 0: sleep(i * delay)
         # TODO: try catch all the possible request exceptions
         try:
             data = requests.request("GET",
                                     "https://api.imgur.com/3/%s" %
                                     endpoint,
                                     headers=self.headers)
         except Exception as e:
             error("Exception fetching '%s' : %s" % (endpoint, e))
             continue
         if data.status_code >= 400:
             error("Bad status code (%s) fetching '%s'" %
                   (data.status_code, endpoint))
             continue
         for k, v in data.headers.items():
             if k.startswith('X-RateLimit-'):
                 #print("Update %s header: %s" % (k.replace("X-RateLimit-", ""), int(v)))
                 self.credits[k.replace("X-RateLimit-", "")] = int(v)
         return data.json()
     raise FailedFetchingException(endpoint)
Пример #7
0
def lookup_blocks_from_communicator(comm_inst):
    logger.info('Looking up new blocks')
    tryAmount = 2
    newBlocks = ''
    # List of existing saved blocks
    existingBlocks = get_block_list()
    triedPeers = []  # list of peers we've tried this time around
    # Max amount of *new* block hashes to have in queue
    maxBacklog = 1560
    lastLookupTime = 0  # Last time we looked up a particular peer's list
    new_block_count = 0
    for i in range(tryAmount):
        # Defined here to reset it each time, time offset is added later
        listLookupCommand = 'getblocklist'
        if len(comm_inst.blockQueue) >= maxBacklog:
            break
        if not comm_inst.isOnline:
            break
        # check if disk allocation is used
        if comm_inst.storage_counter.is_full():
            logger.debug(
                'Not looking up new blocks due to maximum amount of disk used')
            break
        try:
            # select random online peer
            peer = onlinepeers.pick_online_peer(comm_inst)
        except onionrexceptions.OnlinePeerNeeded:
            time.sleep(1)
            continue
        # if we've already tried all the online peers this time around, stop
        if peer in triedPeers:
            if len(comm_inst.onlinePeers) == len(triedPeers):
                break
            else:
                continue
        triedPeers.append(peer)

        # Get the last time we looked up a peer's stamp,
        # to only fetch blocks since then.
        # Saved in memory only for privacy reasons
        try:
            lastLookupTime = comm_inst.dbTimestamps[peer]
        except KeyError:
            lastLookupTime = epoch.get_epoch() - \
                config.get("general.max_block_age",
                           onionrvalues.DEFAULT_EXPIRE)
        listLookupCommand += '?date=%s' % (lastLookupTime,)
        try:
            newBlocks = peeraction.peer_action(
                comm_inst,
                peer, listLookupCommand)  # get list of new block hashes
        except Exception as error:
            logger.warn(
                f'Could not get new blocks from {peer}.',
                error=error)
            newBlocks = False

        if newBlocks != False:  # noqa
            # if request was a success
            for i in newBlocks.split('\n'):
                if stringvalidators.validate_hash(i):
                    i = reconstructhash.reconstruct_hash(i)
                    # if newline seperated string is valid hash

                    # if block does not exist on disk + is not already in queue
                    if i not in existingBlocks:
                        if i not in comm_inst.blockQueue:
                            if onionrproofs.hashMeetsDifficulty(i) and \
                                 not blacklist.inBlacklist(i):
                                if len(comm_inst.blockQueue) <= 1000000:
                                    # add blocks to download queue
                                    comm_inst.blockQueue[i] = [peer]
                                    new_block_count += 1
                                    comm_inst.dbTimestamps[peer] = \
                                        epoch.get_rounded_epoch(roundS=60)
                        else:
                            if peer not in comm_inst.blockQueue[i]:
                                if len(comm_inst.blockQueue[i]) < 10:
                                    comm_inst.blockQueue[i].append(peer)
    if new_block_count > 0:
        block_string = ""
        if new_block_count > 1:
            block_string = "s"
        logger.info(
            f'Discovered {new_block_count} new block{block_string}',
            terminal=True)
        comm_inst.download_blocks_timer.count = \
            int(comm_inst.download_blocks_timer.frequency * 0.99)
    comm_inst.decrementThreadCount('lookup_blocks_from_communicator')
Пример #8
0
 def test_once_timer(self):
     meta = GrpcMeta('dev-pinpoint:9992', self.agent_meta)
     meta.start()
     time.sleep(1)
     meta.stop()
Пример #9
0
def func1():
    print(123)
    time.sleep(3)
    print(456)
Пример #10
0
 def new_connection(self):
     print("new connection to " + self.host + ":" + str(self.port))
     time.sleep(1)
     return None
Пример #11
0
 def main_polling(self):
     self.weibo_login()
     while 1:
         self.polling()
         time.sleep(weibo_config.polling_time)
def func2(i):
    print(456)
    time.sleep(2)
    return i, 'func2'
Пример #13
0
 def _wait(self, delay):
     if delay > 0:
         time.sleep(delay)
Пример #14
0
 def close_connection(self):
     print("connection closed")
     time.sleep(1)
     return None
Пример #15
0
 def publish(self, set):
     print("uploading: " + set)
     time.sleep(2)
     return None
def func1(i):
    print(123)
    time.sleep(1)
    return i, 'func1'
Пример #17
0
 def openUrl(self, urlName):
     time.sleep(2)
     self.driver.get(self.config[urlName])
Пример #18
0
    def parse(self, response):
        item = ScholaridItem()
        item['scid'] = response.meta['scid']
        item['scurl'] = response.meta['scurl']
        item['name'] = response.css('.p_name ::text').extract_first()
        item['mechanism'] = response.css('.p_affiliate ::text').extract_first()
        p_ach = response.css('.p_ach_num ::text').extract()
        item['citedtimes'] = p_ach[0]
        item['resultsnumber'] = p_ach[1]
        item['Hindex'] = p_ach[2]
        item['Gindex'] = p_ach[3]

        field = response.css('.person_domain ::text').extract()
        item['field'] = list(filter(lambda x: x != '/', field))

        pie = response.css('.pieText .number ::text').extract()
        if len(pie) == 4:
            item['journal'] = pie[0]
            item['meeting'] = pie[1]
            item['professionwork'] = pie[2]
            item['other'] = pie[3]
        else:
            item['journal'] = ''
            item['meeting'] = ''
            item['professionwork'] = ''
            item['other'] = ''

        item['total'] = response.css(
            '.pieMapTotal .number ::text').extract_first()

        #爬取关系网
        chrome_options = webdriver.ChromeOptions()
        chrome_options.add_argument('--headless')
        browser = webdriver.Chrome(chrome_options=chrome_options)
        browser.get(response.request.url)
        item['copinfo'] = list()
        #如果有关系网图,就爬取图的,否则爬取侧栏的合作学者
        try:
            browser.find_element_by_css_selector(
                '.co_author_wr h3 a').click()  #模拟点击更多按钮
            time.sleep(0.5)
            sreach_window = browser.current_window_handle  #重定位网页
            co_persons = browser.find_elements_by_css_selector(
                '.co_relmap_person')
            for co_person in co_persons:
                person = dict()
                person['url'] = self.source2real(
                    co_person.get_attribute('href'))
                co_person = co_person.find_element_by_css_selector(
                    '.co_person_name')
                person['name'] = co_person.text
                person['count'] = co_person.get_attribute('paper-count')  #合作次数
                person['mechanism'] = co_person.get_attribute('affiliate')
                item['copinfo'].append(person)
        except NoSuchElementException:
            co_persons = response.css('.au_info')
            for co_person in co_persons:
                person = dict()
                person['url'] = self.source2real(
                    'http://xueshu.baidu.com' +
                    co_person.css('a::attr(href)').extract_first())
                person['name'] = co_person.css('a ::text').extract_first()
                person['mechanism'] = co_person.css(
                    '.au_label ::text').extract_first()
                person['count'] = 1  #暂定,网页并没有合作次数
                item['copinfo'].append(person)
        finally:
            browser.close()
        yield item
Пример #19
0
def func2():
    print('---------')
    time.sleep(1)
    print('=========')
Пример #20
0
def waiter(job):
    while job.result is None:
        time.sleep(0.1)
    return job.result
Пример #21
0
 def CheckQueue(self, str):
     print(str)
     time.sleep(1)
     return True