Ejemplo n.º 1
0
 def myclick(self):
     goods = self.lineEdit.text()
     depth = 4
     start_url = 'https://s.taobao.com/search?q=' + goods
     for i in range(depth):  # 循环3次
         try:
             url = start_url + '&s=' + str(44 * i)  # 淘宝商品页面列表从0,44,88。
             html = Test1.getHTMLText(url)  # 两个函数
             plt = re.findall(
                 r'\"view_price\"\:\"[\d\.]*\"', html
             )  # 以列表类型返回形如  "view_price":"186.2" ,反斜杠\" \"表示"view_price"
             tlt = re.findall(r'\"raw_title\"\:\".*?\"', html)
             for i in range(len(plt)):
                 price = eval(plt[i].split(':')[1])  # 详见淘宝商品信息爬虫(1)
                 title = eval(tlt[i].split(':')[1])
                 item = QtWidgets.QTreeWidgetItem()
                 item.setText(0, title)
                 item.setData(1, QtCore.Qt.DisplayRole, float(price))
                 self.treeWidget.addTopLevelItem(item)
         except:
             continue
     print("222")
Ejemplo n.º 2
0
 def getTest1C2AsC2(self, current):
     return Test1.C2()
Ejemplo n.º 3
0
 def getTest1C2AsObject(self, current):
     return Test1.C2()
#-------------------------------Main body
#loading an image
#I=cv2.imread("colorProblem.jpg")
#R,G,B = I[:,:,0], I[:,:,1], I[:,:,2]
#for i in [R,G,B]:
#    I_eq = cv2.equalizeHist(i)
#    n,bins = histogram(array(I_eq),256,normed=True)
#    displayHistogram(n,bins)

#Equalizing blue channel and merging
I = cv2.imread("colorProblem.jpg")
R, G, B = I[:, :, 0], I[:, :, 1], I[:, :, 2]
B_eq = cv2.equalizeHist(B)
I_mod = cv2.merge([R, G, B_eq])

Test1.show2_OpenCV(I, I_mod)

#------Example of histogram equalization

#loading an image
#I=cv2.imread("Eye3.jpg")
#I=cv2.cvtColor(I, cv2.COLOR_RGB2GRAY)
#I_eq = cv2.equalizeHist(I)
#Test1.show2_OpenCV(I,I_eq)

#n,bins = histogram(array(I),256,normed=True)
#n2,bins2= histogram(array(I_eq),256,normed=True)
#displayHistogram(n,bins)
#displayHistogram(n2,bins2)
'''
Created on 19-02-2013

@author: Wiktor
'''
import Test1
import cv2
from pylab import *

def greyLevelMapLambdaWay(II,b,a):
    (M,N)=II.shape
    cc=zeros((M,N),uint8)
    for i in range(len[II]):
        cc[i] = map(lambda x: max(min(x*a+b,255),0), II[i]) # MAP FUNCTION!!!!!!!
    return cc

I1=cv2.imread("GreenTest.jpg")
I2 = cv2.cvtColor(I1, cv2.COLOR_RGB2GRAY)

I2 = greyLevelMapLambdaWay(I2, 2, 1)
Test1.show1_OpenCV(I2)







Ejemplo n.º 6
0
            with open('result.txt', 'a') as f:
                f.write('\n')
                f.write(video_name)

            # video_name='video.wmv'
            im_dir = os.path.join(osp.dirname(__file__), 'frame')
            head_dir = os.path.join(osp.dirname(__file__), 'head')
            eye_dir = os.path.join(osp.dirname(__file__), 'eye')
            cut_dir = os.path.join(osp.dirname(__file__), 'cut')
            if not os.path.exists(im_dir):
                os.mkdir(im_dir)
            vc = cv2.VideoCapture(os.path.join(video_dir, video_name))
            if vc.isOpened():
                totalFrameNumber = vc.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)

                totalFrameNumber = Test1.interception_video(
                    video_dir, video_name, im_dir, totalFrameNumber)
                print totalFrameNumber

                numpy = True
                for root, dirs, files in os.walk(im_dir):
                    if len(files) == 0:
                        numpy = False

                if numpy:
                    print 'demo1~~~~~~~~~~~~~~~~~~~~~'
                    board_datas = []
                    board_datas = demo1.main()
                    gc.collect()
                    print 'demo2~~~~~~~~~~~~~~~~~~~~~'
                    head_datas = demo2.main()
                    gc.collect()
Ejemplo n.º 7
0
    video_name = 'video.wmv'
    im_dir = os.path.join(osp.dirname(__file__), 'frame')
    head_dir = os.path.join(osp.dirname(__file__), 'head')
    eye_dir = os.path.join(osp.dirname(__file__), 'eye')
    if not os.path.exists(im_dir):
        os.mkdir(im_dir)
    vc = cv2.VideoCapture(os.path.join(video_dir, video_name))
    if vc.isOpened():
        totalFrameNumber = vc.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)
        shutil.rmtree(eye_dir)
        os.mkdir(eye_dir)
        shutil.rmtree(head_dir)
        os.mkdir(head_dir)
        shutil.rmtree(im_dir)
        os.mkdir(im_dir)
        Test1.interception_video(video_dir, video_name, im_dir)
        numpy = True
        for root, dirs, files in os.walk(im_dir):
            if len(files) == 0:
                numpy = False

        if numpy:
            print 'demo1~~~~~~~~~~~~~~~~~~~~~'
            board_datas = []
            board_datas = demo1.main()
            print 'demo2~~~~~~~~~~~~~~~~~~~~~'
            head_datas = demo2.main()
            if not os.path.exists(head_dir):
                os.mkdir(head_dir)
            NAN.getHead(im_dir, head_dir, head_datas)
            numpy = True
Ejemplo n.º 8
0
 def throwTest1Def(self, current):
     raise Test1._def()
Ejemplo n.º 9
0
    ax.set_xlim(left[0], right[-1])
    ax.set_ylim(bottom.min(), top.max())
    
    plt.show()




#-------------------------------Main body
#loading an image
I=cv2.imread("colorProblem.jpg") 
#Creating the image histogram
n,bins = histogram(array(I),256,normed=True)
#displaing the histogram
displayHistogram(n,bins)



#------Example of histogram equalization

#loading an image
I=cv2.imread("colorProblem.jpg") 
I=cv2.cvtColor(I, cv2.COLOR_RGB2GRAY)
I_eq = cv2.equalizeHist(I)
Test1.show2_OpenCV(I,I_eq)






def ChangeGreenToWhite(I, Gthr):
    R, G, B = cv2.split(I)
    I2 = nonzero((G >= Gthr))  # Zwraca indexy wszystkich elementow dla ktorych condition is true
    I[I2] = [255, 255, 255]
    return I


def ChangeGreenScreenRGB(I, Ibcg, Gthr):
    R, G, B = cv2.split(I)
    I2 = nonzero((G >= Gthr))
    I[I2] = Ibcg[I2]
    return I


I = cv2.imread("GreenTest.jpg")
Ibcg = cv2.imread("GreenTest_Background.jpg")


t.show1_OpenCV(ChangeGreenScreenRGB(I, Ibcg, 180))

"""
#Replace all numbers >2 and <10 with 1
T = np.array(range(0,20))
T1 = nonzero((T>3) & (T<10))
T[T1] = 1

print T
"""
Ejemplo n.º 11
0
            end = time()
            temp = end - start
            print("Sample's median chosen with selectDet time -->", temp)

            # SMALL SAMPLE WITH SORTING

            A = B
            start = time()
            Test5.QuickSort(A, m)  # Sample ordered with selectionSort
            end = time()
            temp = end - start
            print("Sample sorted with selectionSort time -->", temp)

            A = B
            start = time()
            Test1.QuickSort(A, m)  # Sample ordered with inserctionSort
            end = time()
            temp = end - start
            print("Sample sorted with inserctionSort time -->", temp)

            A = B
            start = time()
            test9.QuickSort(A, m)  # Sample ordered with bubbleSort
            end = time()
            temp = end - start
            print("Sample sorted with bubbleSort time -->", temp)

            # OTHER SORTING ALGHORITMS

            A = B
            start = time()
Ejemplo n.º 12
0
'''
Created on 19-02-2013

@author: Wiktor
'''
import Test1
import cv2
from pylab import *


def greyLevelMapLambdaWay(II, b, a):
    (M, N) = II.shape
    cc = zeros((M, N), uint8)
    for i in range(len[II]):
        cc[i] = map(lambda x: max(min(x * a + b, 255), 0),
                    II[i])  # MAP FUNCTION!!!!!!!
    return cc


I1 = cv2.imread("GreenTest.jpg")
I2 = cv2.cvtColor(I1, cv2.COLOR_RGB2GRAY)

I2 = greyLevelMapLambdaWay(I2, 2, 1)
Test1.show1_OpenCV(I2)
    I2 = nonzero((G>=Gthr)) #Zwraca indexy wszystkich elementow dla ktorych condition is true
    I[I2] = [255, 255, 255]
    return I

def ChangeGreenScreenRGB(I, Ibcg, Gthr):
    R,G,B = cv2.split(I)
    I2 = nonzero((G>=Gthr))
    I[I2] = Ibcg[I2]
    return I
    
    
    
    
I = cv2.imread("GreenTest.jpg")
Ibcg = cv2.imread("GreenTest_Background.jpg")


t.show1_OpenCV(ChangeGreenScreenRGB(I, Ibcg, 180))

"""
#Replace all numbers >2 and <10 with 1
T = np.array(range(0,20))
T1 = nonzero((T>3) & (T<10))
T[T1] = 1

print T
"""    
    


#-------------------------------Main body
#loading an image
#I=cv2.imread("colorProblem.jpg") 
#R,G,B = I[:,:,0], I[:,:,1], I[:,:,2]
#for i in [R,G,B]:
#    I_eq = cv2.equalizeHist(i)
#    n,bins = histogram(array(I_eq),256,normed=True)
#    displayHistogram(n,bins)

#Equalizing blue channel and merging
I=cv2.imread("colorProblem.jpg")
R,G,B = I[:,:,0], I[:,:,1], I[:,:,2]
B_eq = cv2.equalizeHist(B)
I_mod = cv2.merge([R,G, B_eq])

Test1.show2_OpenCV(I, I_mod)

#------Example of histogram equalization

#loading an image
#I=cv2.imread("Eye3.jpg") 
#I=cv2.cvtColor(I, cv2.COLOR_RGB2GRAY)
#I_eq = cv2.equalizeHist(I)
#Test1.show2_OpenCV(I,I_eq)

#n,bins = histogram(array(I),256,normed=True)
#n2,bins2= histogram(array(I_eq),256,normed=True) 
#displayHistogram(n,bins)
#displayHistogram(n2,bins2)

Ejemplo n.º 15
0
 def throwTest1E2AsE2(self, current):
     raise Test1.E2()
Ejemplo n.º 16
0
def startIdentify():
    recognizer = cv2.face.LBPHFaceRecognizer_create()
    recognizer.read('trainer/trainer.yml')
    #cascadePath = "haarcascade_frontalface_default.xml"
    #faceCascade = cv2.CascadeClassifier(cascadePath);
    faceCascade = cv2.CascadeClassifier(
        'C:/Users/e3003895/AppData/Local/Continuum/anaconda3/Lib/site-packages/cv2/data/haarcascade_frontalface_default.xml'
    )

    font = cv2.FONT_HERSHEY_SIMPLEX

    #iniciate id counter
    id = 0

    # names related to ids: example ==> Prasanna: id=1,  etc
    names = ['None', 'Prasanna', '', '', '', 'W']

    # Initialize and start realtime video capture
    cam = cv2.VideoCapture(0)
    cam.set(3, 640)  # set video widht
    cam.set(4, 480)  # set video height

    # Define min window size to be recognized as a face
    minW = 0.1 * cam.get(3)
    minH = 0.1 * cam.get(4)

    while True:

        ret, img = cam.read()
        #img = cv2.flip(img, -1) # Flip vertically

        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        faces = faceCascade.detectMultiScale(
            gray,
            scaleFactor=1.2,
            minNeighbors=5,
            minSize=(int(minW), int(minH)),
        )
        print('Number of faces detected: ' + str(len(faces)))

        for (x, y, w, h) in faces:

            cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)

            id, confidence = recognizer.predict(gray[y:y + h, x:x + w])

            # Check if confidence is less them 100 ==> "0" is perfect match
            #pm#if (confidence < 100):
            if (confidence < 80):
                id = names[id]
                #pm#confidence = "  {0}%".format(round(100 - confidence))
                confidence = "  {0}%".format(round(confidence))
            else:
                id = "unknown"
                #pm#confidence = "  {0}%".format(round(100 - confidence))
                confidence = "  {0}%".format(round(confidence))
            cv2.putText(img, str(id), (x + 5, y - 5), font, 1, (255, 255, 255),
                        2)
            cv2.putText(img, str(confidence), (x + 5, y + h - 5), font, 1,
                        (255, 255, 0), 1)

        cv2.imshow('camera', img)

        k = cv2.waitKey(10) & 0xff  # Press 'ESC' for exiting video
        if k == 27:
            break

    # Do a bit of cleanup
    print("\n [INFO] Exiting Program and cleanup stuff")
    cam.release()
    cv2.destroyAllWindows()
    print('detected id is ' + id)
    #ID_INT = int(id)
    return Test1.render_static(id)
Ejemplo n.º 17
0
 def throwTest1Def(self, current):
     raise Test1._def()
Ejemplo n.º 18
0
import Test1

Test1.test()