/
main.py
65 lines (44 loc) · 1.39 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
import threading
from spider import Spider
from Domain import *
from general import *
from queue import Queue
PROJECT_NAME= "The LNM Institute of Information Technology"
HOMEPAGE = "http://www.lnmiit.ac.in/"
DOMAIN_NAME= get_domain_name(HOMEPAGE)
QUEUE_FILE=PROJECT_NAME + '/queue.txt'
CRAWLED_FILE= PROJECT_NAME + '/crawled.txt'
NUMBER_OF_THREADS = 8
queue=Queue()
Spider(PROJECT_NAME,HOMEPAGE,DOMAIN_NAME)
# each queued link is a new job
def create_job():
for link in file_to_set(QUEUE_FILE):
queue.put(link)
queue.join()
crawl()
#
# Check if there are items in the queue , if so crawl them
def crawl():
queued_links = file_to_set(QUEUE_FILE)
if len(queued_links)>0:
print(str(len(queued_links))+ 'Links in the queue')
create_jobs()
#Each queued link is a new job
def create_job():
for link in file_to_set(QUEUE_FILE):
queue.put(link)
queue.join()
crawl()
# Create market thread ( dies when the main exists)
def create_workers():
for _ in range(NUMBER_OF_THREADS):
t=threading.Threads(target=work)
t.daemon=True
t.start()
# Do the next job in the queue
def work():
while True:
url=queue.get()
Spider.crawl_page(threading.current_thread().name,url)
queue.task.done()