def start_parser(): # Запуск скрипта read_file_name = 'list_requests' # read_file_name = input('filename: ') if 'json' in (read_file_name): reqs = Req.read_json(read_file_name) else: reqs = Req.read_txt(read_file_name) time_now = datetime.now(tz=None) print("time start {}:{}:{}".format(time_now.hour, time_now.minute, time_now.second)) get_positions(reqs) # делаем запросы в google и яндекс, отправляем список обьектов Req Req.create_json(reqs) time_now = datetime.now(tz=None) print("time finish {}:{}:{}".format(time_now.hour, time_now.minute, time_now.second))
pool = [] for port in ports: stream = threading.Thread(target=run_scraper, args=(port, reqs, requests_google, requests_yandex)) pool.append(stream) for stream in pool: stream.start() for stream in pool: stream.join() if __name__ == '__main__': read_file_name = 'list_requests' # read_file_name = input('filename: ') if 'json' in (read_file_name): reqs = Req.read_json(read_file_name) else: reqs = Req.read_txt(read_file_name) requests_google = [req.id for req in reqs] # список id не сделаных запросов гугл requests_google.reverse( ) # переверням. теперь можно брать первые id с конца requests_yandex = requests_google.copy( ) # список id не сделаный запросов в яндекс time_now = datetime.now(tz=None) print("time start {}:{}:{}".format(time_now.hour, time_now.minute, time_now.second)) ports = get_ports() # получаем список портов print(ports) lock_w, lock_y, lock_g, lock_rest = threading.RLock(), threading.RLock( ), threading.RLock(), threading.RLock()