Beispiel #1
0
 def show_frame(self):
     start = time()
     _, frame = self.cap.read()
     frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
     try:
         present_user = Classifier()
         present_user.infer(frame)
         # self.set_person_name(frame, present_user.person_name, present_user.confidence)
         self.set_face_shape_contour(frame)
         self.img = Image.fromarray(frame)
         self.imgtk = ImageTk.PhotoImage(image=self.img)
         self.lmain.imgtk = self.imgtk
         self.lmain.configure(image=self.imgtk)
         self.lmain.after(1, self.show_frame)
         print(time() - start)
         print(present_user.person_name, present_user.confidence,
               present_user.face_128_chars[:6])
     except:
         print('Камера не видит твое лицо')
         self.img = Image.fromarray(frame)
         self.imgtk = ImageTk.PhotoImage(image=self.img)
         self.lmain.imgtk = self.imgtk
         self.lmain.configure(image=self.imgtk)
         self.lmain.after(1, self.show_frame)
         print(time() - start)
Beispiel #2
0
def crawler(domain, pathseed, uniqueId, maxSize=5000):
    pq = queue.PriorityQueue()
    visited = []
    links = []
    pq.put((value(domain + pathseed), domain + pathseed))
    visited.append(domain + pathseed)
    rp = Robots.fetch(domain + '/robots.txt', verify=False)
    driver = webdriver.PhantomJS(
        service_args=['--ignore-ssl-errors=true', '--ssl-protocol=any'])
    while (not pq.empty() and pq.qsize() < maxSize * 5):
        a = pq.get()[1]
        print("! " + str(len(links)) + " " + a)
        if (len(links) < maxSize):
            links.append(a)
            ls = get_all_links(domain, a, maxSize, rp, driver)
            for l in ls:
                if (l not in visited):
                    if (value(l) == 1 or value(l) == 2):
                        visited.append(l)
                        pq.put((value(l), l))
        else:
            while (not pq.empty()):
                pq.get()
    while (len(links) < maxSize and not pq.empty()):
        links.append(pq.get()[1])
    os.makedirs('Docs/HTMLPages/Heuristic2/' + folder(domain) + '/True/',
                exist_ok=True)
    os.makedirs('Docs/HTMLPages/Heuristic2/' + folder(domain) + '/False/',
                exist_ok=True)
    print(len(links))
    v = 0
    clf = Classifier()
    pos = 0
    res = ""
    for l in links:
        v += 1
        driver.get(l)
        time.sleep(1)
        soup = BeautifulSoup(driver.page_source, "html.parser")
        res = str(clf.classify(soup))
        print(str(v) + " " + l + " " + res)
        #print(driver.page_source)
        if (res == 'True'):
            pos += 1
            extractorMain.extractor(soup, folder(domain), "Heuristic2",
                                    folder(domain).lower(), l, uniqueId)
            uniqueId += 1
        with open(
                'Docs/HTMLPages/Heuristic2/' + folder(domain) + '/' + res +
                '/' + str(v) + '-' + l.replace('/', '*') + '.html', 'wb') as f:
            f.write(bytes(driver.page_source, 'UTF-8'))
    hr = pos / maxSize
    with open('Docs/HTMLPages/Heuristic2/' + folder(domain) + '/' + 'hr.txt',
              'wb') as f:
        f.write(bytes(str(hr), 'UTF-8'))
    return 0
Beispiel #3
0
def crawler(domain, pathseed, maxSize=273):
    q = queue.Queue()
    visited = []
    links = []
    q.put(domain + pathseed)
    visited.append(domain + pathseed)
    rp = Robots.fetch(domain + '/robots.txt', verify=False)
    driver = webdriver.PhantomJS(
        service_args=['--ignore-ssl-errors=true', '--ssl-protocol=any'])
    while (not q.empty() and q.qsize() < maxSize):
        a = q.get()
        print("! " + str(len(links)) + " " + a)
        if (len(links) < maxSize):
            links.append(a)
            ls = get_all_links(domain, a, maxSize, rp, driver)
            for l in ls:
                if (l not in visited):
                    visited.append(l)
                    q.put(l)
        else:
            while (not q.empty()):
                q.get()
    while (len(links) < maxSize and not q.empty()):
        links.append(q.get())
    os.makedirs('Docs/HTMLPages/BFS/' + folder(domain) + '/True/',
                exist_ok=True)
    os.makedirs('Docs/HTMLPages/BFS/' + folder(domain) + '/False/',
                exist_ok=True)
    print(len(links))
    v = 0
    clf = Classifier()
    pos = 0
    res = ""
    for l in links:
        v += 1
        driver.get(l)
        time.sleep(1)
        soup = BeautifulSoup(driver.page_source, "html.parser")
        res = str(clf.classify(soup))
        print(str(v) + " " + l + " " + res)
        #print(driver.page_source)
        if (res == 'True'):
            pos += 1
        with open(
                'Docs/HTMLPages/BFS/' + folder(domain) + '/' + res + '/' +
                str(v) + '-' + l.replace('/', '*') + '.html', 'wb') as f:
            f.write(bytes(driver.page_source, 'UTF-8'))
    hr = pos / maxSize
    with open('Docs/HTMLPages/BFS/' + folder(domain) + '/' + 'hr.txt',
              'wb') as f:
        f.write(bytes(str(hr), 'UTF-8'))
    return 0
Beispiel #4
0
 def start_capture(self):
     """
     Starts demonstrating frames from the camera
     """
     if not self.capture:
         self.capture = QtCapture(0)
         # self.capture.setFPS(1)
         self.capture.user_classifier = Classifier()
         self.capture.setParent(self)
         self.capture.setWindowFlags(QtCore.Qt.Tool)
     self.capture.start()
     self.capture.show()
Beispiel #5
0
 def __init__(self, *args):
     super(QWidget, self).__init__()
     self.fps = 30
     self.video_frame = QLabel()
     self.cap = cv2.VideoCapture(*args)
     self.user_classifier = Classifier()
     lay = QVBoxLayout()
     # lay.setContentsMargins(0)
     lay.addWidget(self.video_frame)
     self.setLayout(lay)
     self.label_encoder = None
     self.classifier = None
     self.person_name = None
     self.confidence = None
Beispiel #6
0
import urllib.request
from selenium import webdriver
from bs4 import BeautifulSoup
import signal
from Classifier.classifier import Classifier

url = "http://www.spoj.com/problems/MMIND/"
driver = webdriver.PhantomJS( service_args=['--ignore-ssl-errors=true', '--ssl-protocol=any'])
driver.get(url)
page = BeautifulSoup(driver.page_source, "html.parser")
driver.service.process.send_signal(signal.SIGTERM)


k = [0,1,2,3,4,5]
for i in k:
    clf = Classifier(i)
    print(clf.classify(page))
Beispiel #7
0
 def add_user(self):
     person_name = raw_input('Введите ФИО в формате:  \'familiya-imya\'\n')
     self.make_user_profile(person_name)
     add_user = Classifier()
     add_user.train_classifier()
     print('Пользователь успешно добавлен')