Ejemplo n.º 1
0
def on_message(client, userdata, msg):
    #print(msg.topic+" "+str(msg.payload))
    #print(msg.payload)
    if msg.payload in commands:
        command_obtained = True
        info = msg.payload
        print('Get control signal:', info)
        if info == 'Rec':
            cv2.destroyAllWindows()
            control = False
            flag_f1 = True
            x_f1 = 0
            x_f2 = 0
            y_f1 = 0
            y_f2 = 0
            direc = str(counter) + 'imageRec.jpg'
        elif info == 'Con':
            control = True
            direc = str(counter) + 'imageCon.jpg'
        else:
            direc = str(counter) + 'image.jpg'

        #cv2.imwrite(direc,img)
        counter += 1

    elif command_obtained == True and msg.payload not in commands:
        image_obtained = True
        with open(direc + ".jpg", "wb") as fh:
            fh.write(base64.decodebytes(msg.payload))

    if image_obtained == True and command_obtained == True:
        image_obtained, command_obtained = False, False
        if not control:
            # reset directions
            print('Doing classification.')
            test_set = []
            img_crop, img_bk = generate_crop(direc, 210)
            cv2.imwrite('2nd_step.jpg', img_crop)

            img = image.load_img('2nd_step.jpg', target_size=(224, 224))
            img_data = image.img_to_array(img)
            img_data = np.expand_dims(img_data, axis=0)
            img_data = preprocess_input(img_data)

            vgg16_feature = model.predict(img_data)
            test_set.append(np.ndarray.tolist(vgg16_feature[0]))

            if test_set:
                predict_target = clf.predict(test_set)
                print(predict_target.shape)
                print(predict_target.size)
                predict_prob = clf.predict_proba(test_set)
                #print(correct_tag)
                print('predict results.')
                print(clf.classes_)
                print(predict_prob)
                prob = predict_prob[0]
                orderedIndex = sorted(range(len(prob)),
                                      key=lambda k: prob[k],
                                      reverse=True)
                print(orderedIndex)
                print("appliances in order")
                validNum = 0
                validNum = len([i for i in prob if i > 0.075]) - 1
                print('There are valid object #', validNum)
                # get all the results in order and loop thru
                print(predict_target)
                predict_target = predict_target[0]
                for indexCount in orderedIndex:
                    print(clf.classes_[indexCount], end=" ")

                #print(predict_target)
                applianceTuple = applianceDict[predict_target]
                indexCount = 0
                cur_time = time.time()
                prev_time = time.time()
                while True:
                    # print('in the loop')
                    # print(indexCount)
                    print("orderedList ",
                          clf.classes_[orderedIndex[indexCount]])
                    # applianceClient=callAppliace(clf.classes_[orderedIndex[indexCount]],applianceTuple[1])
                    # controlAppliance(applianceClient)

                    info = connect.recv(1024)
                    info = info.decode()
                    if info == 'ACK':
                        print(info)
                        obj = clf.classes_[orderedIndex[indexCount]]
                        break
                    elif info == '':
                        print('Interrupted.')
                        break
                    indexCount += 1
                    if indexCount > 5:
                        indexCount = 0
                    #prev_time = time.time()
                    #   break
                    # except:
                    # info = ''

                    #applianceClient=callAppliace(predict_target,applianceTuple[1])
                    #controlAppliance(applianceClient)
                #print('probability.')
                #print(clf.classes_)
                #print(predict_prob)
            # control = True

        else:
            # dir_f1 = dir_f2
            img_bk, k, top, mid, control_signal = finger_control_f(
                direc, 220, 5, -70, 3)
            height, width = img_bk.shape
            if flag_f1:
                if control_signal == 'Down':
                    pyautogui.press('k')
                    connect.sendall(b'Doing Con.')
                    continue
                if top < 20:
                    pyautogui.press('i')
                    connect.sendall(b'Doing Con.')
                    continue
                elif mid < 20:
                    pyautogui.press('j')
                    connect.sendall(b'Doing Con.')
                    continue
                elif (width - mid) < 20:
                    pyautogui.press('l')
                    connect.sendall(b'Doing Con.')
                    continue
                else:
                    pyautogui.press('r')
                    x_f1 = mid
                    y_f1 = top
                    if obj == 'Printer':
                        pyautogui.press('a')
                    elif obj == 'Coffee maker':
                        pyautogui.press('b')
                    elif obj == 'TV':
                        pyautogui.press('c')
                    elif obj == 'Door':
                        pyautogui.press('d')
                    elif obj == 'Minotor':
                        pyautogui.press('e')
            x_f2 = mid
            y_f2 = top
            print('slope is ', k, 'top y value is ', top, ' and mid value is ',
                  mid)
            print('control signal is', control_signal)

            print('done finger detection', time.time() - time_in)
            time_in = time.time()
            # quite
            if control_signal == 'Down':
                pyautogui.press('enter')
                connect.sendall(b'Stop Con.')
                continue
                # print('go down')
                # pyautogui.press('down')
                # time.sleep(0.4)
                # connect.sendall(b'Doing Con.')

            else:
                # print('control object is',obj)
                # height,width = img_bk.shape
                slope = height / width
                if (control_scheme == '1'):
                    # Control Part - video
                    xx_thres = 20
                    yy_thres = 15
                    # y axis
                    delta_y_f = y_f2 - y_f1
                    delta_x_f = x_f2 - x_f1

                    steps_y = delta_y_f / 10
                    steps_x = delta_x_f / 10

                    if steps_x < 0:
                        for x_type in range(abs(int(steps_x))):
                            pyautogui.press('z')  # left
                    elif steps_x > 0:
                        for x_type in range(abs(int(steps_x))):
                            pyautogui.press('x')  #right

                    if steps_y < 0:
                        for y_type in range(abs(int(steps_y))):
                            pyautogui.press('n')  #up
                    elif steps_y > 0:
                        for y_type in range(abs(int(steps_y))):
                            pyautogui.press('m')  #down
                    else:
                        pyautogui.press('p')  #middle

                    pyautogui.press('enter')

                    if (delta_x_f > xx_thres or delta_x_f < -xx_thres) or (
                            delta_y_f > yy_thres or delta_y_f < -yy_thres):
                        # left
                        if delta_y_f < -slope * delta_x_f and delta_y_f > slope * delta_x_f:
                            print('go left')
                            pyautogui.press('left')
                        # right
                        elif delta_y_f > -slope * delta_x_f and delta_y_f < slope * delta_x_f:
                            print('go right')
                            pyautogui.press('right')
                        elif delta_y_f > -slope * delta_x_f and delta_y_f > slope * delta_x_f:
                            print('go down')
                            pyautogui.press('down')
                        elif delta_y_f < -slope * delta_x_f and delta_y_f < slope * delta_x_f:
                            print('go up')
                            pyautogui.press('up')
                    else:
                        print('STAY.')
                        pyautogui.press('v')
                    # else:
                    #   print('y stay')
                    #   # x axis
                    #   delta_x_f = x_f2 - x_f1
                    #   if delta_x_f > xx_thres:
                    #       print('go right')
                    #       pyautogui.press('right')
                    #   elif delta_x_f < -xx_thres:
                    #       print('go left')
                    #       pyautogui.press('left')
                    #   else:
                    #       print('x stay')
                elif (control_scheme == '2'):
                    # Control Part - video
                    if flag_f1:
                        x0, y0 = pyautogui.size()
                        pyautogui.moveTo(x0 / 2, y0 / 2)
                    delta_y_f = (y_f2 - y_f1) * 5
                    delta_x_f = (x_f2 - x_f1) * 5
                    pyautogui.moveTo(delta_x_f + x0 / 2, delta_y_f + y0 / 2)
                elif (control_scheme == '3'):
                    # Control Part - slides
                    # y axis
                    delta_y_f = y_f2 - y_f1
                    if delta_y_f > 15:
                        print('go down')
                        pyautogui.press('down')
                    elif delta_y_f < -15:
                        print('go up')
                        pyautogui.press('up')
                else:
                    print('no control')
                    control_scheme = input(
                        'Which control:(1-video,2-mouse,3-slides)')
                    connect.sendall(b'Stop Con.')
                    continue

                # ack it is done
                time.sleep(0.05)
                connect.sendall(b'Doing Con.')

                # show the threshold line
                # for pix_i in range(height-1):
                #   img_bk[pix_i, min(x_f1+xx_thres,width-1)] = 127
                #   img_bk[pix_i, max(x_f1-xx_thres,0)] = 127
                # for pix_j in range(width-1):
                #   img_bk[max(y_f1-yy_thres,0),pix_j] = 127
                #   img_bk[min(y_f1+yy_thres,height-1),pix_j] = 127

            # disable flag
            flag_f1 = False

            cv2.imshow('Binary Image', img_bk)
            cv2.waitKey(5)
Ejemplo n.º 2
0
def on_message(client, userdata, msg):
    global obj, command_in, down_confirm,x_ref,y_ref, k_ref, mid_ref
    
    
    #print(msg.topic)
    if msg.topic == 'image': 

        #print(len(msg.payload))
        #print('image transmitted')
        t1 = time.time()
        with open('1.png', "wb") as fh:
            fh.write(base64.decodebytes(msg.payload))
        info=connect.recv(1024)
        info = info.decode()
        print('Get control signal:',info)

        if info == 'rec':
            command_in = False
            down_confirm = False
            x_ref = None
            y_ref = None
            k_ref = None

            print('Doing classification.')
            test_set = []
            img_crop,img_bk = generate_crop('1.png',220)

            img_bk,k,top,mid,control_signal,x_mid = finger_control_f('1.png',binary_thre, 5,-70,3)
            
            #cv2.imshow('Binary Image', img_bk)
            cv2.waitKey(3)
            
            cv2.imwrite('2nd_step.jpg',img_crop)
        
            img = image.load_img('2nd_step.jpg', target_size=(224, 224))
            img_data = image.img_to_array(img)
            img_data = np.expand_dims(img_data, axis=0)
            img_data = preprocess_input(img_data)
            
            vgg16_feature = model.predict(img_data)
            test_set.append(np.ndarray.tolist(vgg16_feature[0]))
            #print(test_set)

            if test_set:
                predict_target = clf.predict(test_set)
                print(predict_target.shape)
                print(predict_target.size)
                predict_prob = clf.predict_proba(test_set)
                #print(correct_tag)
                print('predict results.')
                print(clf.classes_)
                print(predict_prob)
                prob = predict_prob[0]
                orderedIndex=sorted(range(len(prob)), key=lambda k: prob[k], reverse=True)
                print(orderedIndex)
                print("appliances in order")
                validNum = 0
                validNum = len([i for i in prob if i > 0.075]) - 1
                print('There are valid object #', validNum)
                # get all the results in order and loop thru
                print(predict_target)
                predict_target=predict_target[0]
                
                for indexCount in orderedIndex:
                    print(clf.classes_[indexCount],end=" ")
                
                
                indexCount = 0
                
                while True:
                    print("orderedList ",clf.classes_[orderedIndex[indexCount]])
                    info_2=connect.recv(1024)
                    info_2 = info_2.decode()
                    if info_2 == 'ACK':
                        print(info_2)
                        obj = clf.classes_[orderedIndex[indexCount]]
                        break
                    elif info_2 == '':
                        print('Interrupted.')
                        break
                    indexCount += 1
                    if indexCount > 5:
                        indexCount = 0
                connect.sendall(b'ready')
                time.sleep(0.5)
                connect.sendall(b'Doing Con.')


        elif info == 'con':
            t2 = time.time()
            #print(obj)
            #print('Con coming soon.')
            
            img_bk,k,top,mid,control_signal,x_mid = finger_control_f('1.png',binary_thre, 5,-70,3)
            
            cv2.imwrite('../binary.png',img_bk)
            height,width = img_bk.shape
            t3 = time.time()
            #print(top,mid)

            #print(k,x_mid)
            if obj == 'Printer':
                pyautogui.press('a')
            elif obj =='Coffee maker':
                pyautogui.press('b')
            elif obj =='TV':
                pyautogui.press('c')
            elif obj =='Door':
                pyautogui.press('d')
            elif obj =='Minotor':
                pyautogui.press('e')

            #print('slope is ',k,'top y value is ',top,' and mid value is ', mid)
            #print('control signal is', control_signal)
            if not x_ref or not y_ref or not k_ref:
                x_ref = mid
                y_ref = top
                mid_ref = x_mid
                if mid == x_mid:
                    direction = np.pi/2 - 0.01
                #print(top/(mid-x_mid))
                else:
                    direction=np.arctan(top/float((mid-x_mid)))
                k_ref = direction
                connect.sendall(b'Doing Con.')
            else:
                # quite
                if control_signal == 'Down':
                    print('down')
                    pyautogui.press('m')
                    if command_in:
                        down_confirm = True
                    time.sleep(0.01)
                    connect.sendall(b'Doing Con.')
                    #print(down_confirm)
                        
                else:
                    command_in = True
                    print('up')
                    pyautogui.press('n')

                    if mid == x_mid:
                        direction = k_ref
                    #print(top/(mid-x_mid))
                    else:
                        direction=np.arctan(top/float((mid-x_mid)))
                    print(direction - k_ref)
                    print(x_mid - mid_ref)

                    if (x_mid - mid_ref > width//5) or (direction - k_ref > size):
                        print('block 4')
                        block = 8
                        pyautogui.press('8')
                    elif (x_mid - mid_ref < -width // 5) or (direction - k_ref < -size):
                        print('block 1')
                        block = 2
                        pyautogui.press('2')
                    elif (direction - k_ref < size) :
                        print('block 2')
                        block = 4
                        pyautogui.press('4')
                    elif (direction - k_ref > -size):
                        print('block 3')
                        block = 6
                        pyautogui.press('6')

                    '''
                    elif x_mid > width*1.15//2 and x_mid < width*3.1//4:
                        print('block 3')
                        block = 6
                        pyautogui.press('6')
                    elif x_mid < width*1.05// 2 and x_mid > width *7//20 :
                        print('block 2')
                        block = 4
                        pyautogui.press('4')
                    '''
                    '''
                    if direction > 0 and direction < np.pi/2 - size:
                        print('block 4')
                        block = 8
                        pyautogui.press('8')
                    elif direction > np.pi/2 - size and direction < np.pi/2:
                        print('block 3')
                        block = 6
                        pyautogui.press('6')
                    elif direction < 0 and direction > - np.pi/2 + size:
                        print('block 1')
                        block = 2
                        pyautogui.press('2')
                    elif direction < -np.pi/2 + size:
                        print('block 2')
                        block = 4
                        pyautogui.press('4')
                    '''
                    #print('here')
                    #print(down_confirm)
                    if down_confirm == True:
                        down_confirm = False
                        command_in = False
                        #connect.sendall(b'Stop Con.')
                        connect.sendall(b'Doing Con.')
                    else:
                        connect.sendall(b'Doing Con.')
Ejemplo n.º 3
0
        # Rewind the stream, open it as an image with PIL and do some
        # processing on it
        image_stream.seek(0)  # seek(0) - the start of the file

        file_bytes = np.asarray(bytearray(image_stream.read()), dtype=np.uint8)
        img = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)
        direc = 'Raw_Images/' + str(counter) + 'image.jpg'
        cv2.imwrite(direc, img)
        counter += 1

        #img = generate_crop(file_path,240)

        if not control:

            test_set = []
            img_crop, img_bk = generate_crop(direc, 220)
            cv2.imwrite('2nd_step.jpg', img_crop)

            img = image.load_img('2nd_step.jpg', target_size=(224, 224))
            img_data = image.img_to_array(img)
            img_data = np.expand_dims(img_data, axis=0)
            img_data = preprocess_input(img_data)

            vgg16_feature = model.predict(img_data)
            test_set.append(np.ndarray.tolist(vgg16_feature[0]))

            if test_set:
                predict_target = clf.predict(test_set)
                print(predict_target.shape)
                print(predict_target.size)
                predict_prob = clf.predict_proba(test_set)
Ejemplo n.º 4
0
def on_message(client, userdata, msg):
    #print(msg.topic)
    if msg.topic == 'image':
        #print(len(msg.payload))
        #print('image transmitted')
        with open('1.png', "wb") as fh:
            fh.write(base64.decodebytes(msg.payload))
        info = connect.recv(1024)
        info = info.decode()
        print('Get control signal:', info)

        if info == 'rec':
            print('Doing classification.')
            test_set = []
            img_crop, img_bk = generate_crop('1.png', 220)

            img_bk, k, top, mid, control_signal = finger_control_f(
                '1.png', 200, 5, -70, 3)
            x_f1 = mid
            y_f1 = top
            #cv2.imshow('Binary Image', img_bk)
            cv2.waitKey(5)

            cv2.imwrite('2nd_step.jpg', img_crop)

            img = image.load_img('2nd_step.jpg', target_size=(224, 224))
            img_data = image.img_to_array(img)
            img_data = np.expand_dims(img_data, axis=0)
            img_data = preprocess_input(img_data)

            vgg16_feature = model.predict(img_data)
            test_set.append(np.ndarray.tolist(vgg16_feature[0]))

            if test_set:
                predict_target = clf.predict(test_set)
                print(predict_target.shape)
                print(predict_target.size)
                predict_prob = clf.predict_proba(test_set)
                #print(correct_tag)
                print('predict results.')
                print(clf.classes_)
                print(predict_prob)
                prob = predict_prob[0]
                orderedIndex = sorted(range(len(prob)),
                                      key=lambda k: prob[k],
                                      reverse=True)
                print(orderedIndex)
                print("appliances in order")
                validNum = 0
                validNum = len([i for i in prob if i > 0.075]) - 1
                print('There are valid object #', validNum)
                # get all the results in order and loop thru
                print(predict_target)
                predict_target = predict_target[0]
                for indexCount in orderedIndex:
                    print(clf.classes_[indexCount], end=" ")

                #print(predict_target)
                applianceTuple = applianceDict[predict_target]
                indexCount = 0

                while True:
                    print("orderedList ",
                          clf.classes_[orderedIndex[indexCount]])
                    info_2 = connect.recv(1024)
                    info_2 = info_2.decode()
                    if info_2 == 'ACK':
                        print(info_2)
                        obj = clf.classes_[orderedIndex[indexCount]]
                        break
                    elif info_2 == '':
                        print('Interrupted.')
                        break
                    indexCount += 1
                    if indexCount > 5:
                        indexCount = 0

        elif info == 'con':
            print('Con coming soon.')

            img_bk, k, top, mid, control_signal = finger_control_f(
                direc, 220, 5, -70, 3)
            height, width = img_bk.shape

            pyautogui.press('r')
            x_f1 = mid
            y_f1 = top
            if obj == 'Printer':
                pyautogui.press('a')
            elif obj == 'Coffee maker':
                pyautogui.press('b')
            elif obj == 'TV':
                pyautogui.press('c')
            elif obj == 'Door':
                pyautogui.press('d')
            elif obj == 'Minotor':
                pyautogui.press('e')

            x_f2 = mid
            y_f2 = top
            print('slope is ', k, 'top y value is ', top, ' and mid value is ',
                  mid)
            print('control signal is', control_signal)

            #print('done finger detection',time.time() - time_in)
            time_in = time.time()
            # quite
            if control_signal == 'Down':
                pyautogui.press('enter')
                connect.sendall(b'Stop Con.')

            else:

                slope = height / width
                if (control_scheme == '1'):

                    xx_thres = 20
                    yy_thres = 15
                    # y axis
                    delta_y_f = y_f2 - y_f1
                    delta_x_f = x_f2 - x_f1

                    steps_y = delta_y_f / 10
                    steps_x = delta_x_f / 10

                    if steps_x < 0:
                        for x_type in range(abs(int(steps_x))):
                            pyautogui.press('z')  # left
                    elif steps_x > 0:
                        for x_type in range(abs(int(steps_x))):
                            pyautogui.press('x')  #right

                    if steps_y < 0:
                        for y_type in range(abs(int(steps_y))):
                            pyautogui.press('n')  #up
                    elif steps_y > 0:
                        for y_type in range(abs(int(steps_y))):
                            pyautogui.press('m')  #down
                    else:
                        pyautogui.press('p')  #middle

                    pyautogui.press('enter')

                    if (delta_x_f > xx_thres or delta_x_f < -xx_thres) or (
                            delta_y_f > yy_thres or delta_y_f < -yy_thres):
                        # left
                        if delta_y_f < -slope * delta_x_f and delta_y_f > slope * delta_x_f:
                            print('go left')
                            pyautogui.press('left')
                        # right
                        elif delta_y_f > -slope * delta_x_f and delta_y_f < slope * delta_x_f:
                            print('go right')
                            pyautogui.press('right')
                        elif delta_y_f > -slope * delta_x_f and delta_y_f > slope * delta_x_f:
                            print('go down')
                            pyautogui.press('down')
                        elif delta_y_f < -slope * delta_x_f and delta_y_f < slope * delta_x_f:
                            print('go up')
                            pyautogui.press('up')
                    else:
                        print('STAY.')
                        pyautogui.press('v')
                    time.sleep(0.05)
                    connect.sendall(b'Doing Con.')
                cv2.waitKey(2)
Ejemplo n.º 5
0
def on_message(client, userdata, msg):
    print(msg.topic)
    if msg.topic == "confirm_img":
        ack_info = msg.payload.decode('utf-8')

    elif 'command' in msg.topic:
        print('command recognized')
        info = msg.payload.decode('utf-8')
        print(info == 'rec')
        if info == 'rec':
            print('entered')
            cv2.destroyAllWindows()
            control = False
            flag_f1 = False
            # 180,135
            x_f1 = 60
            x_f2 = 0
            y_f1 = 20
            y_f2 = 0
            direc = str(counter) + 'imageRec.png'

            # reset directions
            print('Doing classification.')
            test_set = []
            img_crop, img_bk = generate_crop(direc, 220)

            img_bk, k, top, mid, control_signal = finger_control_f(
                direc, 200, 5, -70, 3)
            x_f1 = mid
            y_f1 = top
            cv2.imshow('Binary Image', img_bk)
            cv2.waitKey(5)

            cv2.imwrite('2nd_step.png', img_crop)

            img = image.load_img('2nd_step.png', target_size=(224, 224))
            img_data = image.img_to_array(img)
            img_data = np.expand_dims(img_data, axis=0)
            img_data = preprocess_input(img_data)

            vgg16_feature = model.predict(img_data)
            test_set.append(np.ndarray.tolist(vgg16_feature[0]))

            if test_set:
                predict_target = clf.predict(test_set)
                print(predict_target.shape)
                print(predict_target.size)
                predict_prob = clf.predict_proba(test_set)
                #print(correct_tag)
                print('predict results.')
                print(clf.classes_)
                print(predict_prob)
                prob = predict_prob[0]
                orderedIndex = sorted(range(len(prob)),
                                      key=lambda k: prob[k],
                                      reverse=True)
                print(orderedIndex)
                print("appliances in order")
                validNum = 0
                validNum = len([i for i in prob if i > 0.075]) - 1
                print('There are valid object #', validNum)
                # get all the results in order and loop thru
                print(predict_target)
                predict_target = predict_target[0]
                for indexCount in orderedIndex:
                    print(clf.classes_[indexCount], end=" ")

                #print(predict_target)
                applianceTuple = applianceDict[predict_target]
                indexCount = 0

                while True:
                    print("orderedList ",
                          clf.classes_[orderedIndex[indexCount]])
                    if ack_info == 'ACK':
                        print(ack_info)
                        obj = clf.classes_[orderedIndex[indexCount]]
                        break
                    elif ack_info == '':
                        print('Interrupted.')
                        break
                    indexCount += 1
                    if indexCount > 5:
                        indexCount = 0

        elif info == 'con':
            print('con part coming soon.')

        else:
            direc = str(counter) + 'image.png'
        counter += 1

    elif msg.topic == 'image':
        print(len(msg.payload))
        print('image transmitted')

        with open('1.png', "wb") as fh:
            fh.write(base64.decodebytes(msg.payload))
Ejemplo n.º 6
0
def on_message(client, userdata, msg):
    global obj, command_in, down_confirm, x_ref, y_ref, k_ref, mid_ref

    if msg.topic == 'image':
        t1 = time.time()
        print("message received time = " + str(t1))
        with open('1.png', "wb") as fh:
            fh.write(base64.decodebytes(msg.payload))
        info = connect.recv(1024)
        info = info.decode()
        print('Get control signal:', info)

        #doesn't matter from here
        if info == 'rec':
            command_in = False
            down_confirm = False
            x_ref = None
            y_ref = None
            k_ref = None

            print('Doing classification.')
            test_set = []
            img_crop, img_bk = generate_crop('1.png', 220)
            #
            img_bk, k, top, mid, control_signal, x_mid = finger_control_f(
                '1.png', binary_thre, 5, -70, 3)

            #cv2.imshow('Binary Image', img_bk)
            cv2.waitKey(3)

            cv2.imwrite('2nd_step.jpg', img_crop)

            img = image.load_img('2nd_step.jpg', target_size=(224, 224))
            img_data = image.img_to_array(img)
            img_data = np.expand_dims(img_data, axis=0)
            img_data = preprocess_input(img_data)

            vgg16_feature = model.predict(img_data)
            test_set.append(np.ndarray.tolist(vgg16_feature[0]))
            #print(test_set)

            if test_set:
                predict_target = clf.predict(test_set)
                print(predict_target.shape)
                print(predict_target.size)
                predict_prob = clf.predict_proba(test_set)
                #print(correct_tag)
                print('predict results.')
                print(clf.classes_)
                print(predict_prob)
                prob = predict_prob[0]
                orderedIndex = sorted(range(len(prob)),
                                      key=lambda k: prob[k],
                                      reverse=True)
                print(orderedIndex)
                print("appliances in order")
                validNum = 0
                validNum = len([i for i in prob if i > 0.075]) - 1
                print('There are valid object #', validNum)
                # get all the results in order and loop thru
                print(predict_target)
                predict_target = predict_target[0]

                for indexCount in orderedIndex:
                    print(clf.classes_[indexCount], end=" ")

                indexCount = 0

                while True:
                    print("orderedList ",
                          clf.classes_[orderedIndex[indexCount]])
                    info_2 = connect.recv(1024)
                    info_2 = info_2.decode()
                    if info_2 == 'ACK':
                        print(info_2)
                        obj = clf.classes_[orderedIndex[indexCount]]
                        break
                    elif info_2 == '':
                        print('Interrupted.')
                        break
                    indexCount += 1
                    if indexCount > 5:
                        indexCount = 0
                connect.sendall(b'ready')
                time.sleep(0.5)
                connect.sendall(b'Doing Con.')

        #don't care up until here
        elif info == 'con':
            t2 = time.time()
            #print(obj)
            #print('Con coming soon.')

            #img_bk is just image itself
            #top,mid is the coord of fingertip
            #xmid is the intercept that slope makes with frame
            img_bk, k, top, mid, control_signal, x_mid = finger_control_f(
                '1.png', binary_thre, 5, -70, 3)

            cv2.imwrite('../binary.png', img_bk)
            height, width = img_bk.shape
            t3 = time.time()
            #print(top,mid)

            #print(k,x_mid)
            if obj == 'Printer':
                pyautogui.press('a')
            elif obj == 'Coffee maker':
                pyautogui.press('b')
            elif obj == 'TV':
                pyautogui.press('c')
            elif obj == 'Door':
                pyautogui.press('d')
            elif obj == 'Minotor':
                pyautogui.press('e')

            #print('slope is ',k,'top y value is ',top,' and mid value is ', mid)
            #print('control signal is', control_signal)
            ##############################
            #creating reference photo and compares future images to reference image
            if not x_ref or not y_ref or not k_ref:
                x_ref = mid
                y_ref = top
                mid_ref = x_mid
                if mid == x_mid:
                    direction = np.pi / 2 - 0.01
                #print(top/(mid-x_mid))
                else:
                    direction = np.arctan(top / float((mid - x_mid)))
                k_ref = direction
                connect.sendall(b'Doing Con.')
            else:
                #if no finger, then sends a "down" flag
                # quite
                if control_signal == 'Down':
                    print('down')
                    pyautogui.press('m')
                    if command_in:
                        down_confirm = True
                    time.sleep(0.01)
                    connect.sendall(b'Doing Con.')
                    #print(down_confirm)

                #####
                else:
                    command_in = True
                    print('up')
                    pyautogui.press('n')

                    if mid == x_mid:
                        direction = k_ref
                    #print(top/(mid-x_mid))
                    else:
                        direction = np.arctan(top / float((mid - x_mid)))
                    print(direction - k_ref)
                    print(x_mid - mid_ref)

                    #mid_ref is xmid of the reference image
                    #k_ref = direction is the slope
                    #"//5" returns the integer digit of the width / 5
                    #if xmid coord - midref bigger than width /5
                    #width is 224 for this
                    #maybe don't include the midref calculations? Moving xmid does not necessarily mean they are pointing at that box
                    if (x_mid - mid_ref > width // 5) or (direction - k_ref >
                                                          size):
                        print('block 4')
                        block = 8
                        pyautogui.press('8')
                    elif (x_mid - mid_ref < -width // 5) or (direction - k_ref
                                                             < -size):
                        print('block 1')
                        block = 2
                        pyautogui.press('2')
                    elif (direction - k_ref < size):
                        print('block 2')
                        block = 4
                        pyautogui.press('4')
                    elif (direction - k_ref > -size):
                        print('block 3')
                        block = 6
                        pyautogui.press('6')
                #### revise this part
                #trying to integrate using the slope of finger and finger mid to indicate block correctly
                #direction is angle from xmid to top,mid
                #quadrant 4 is actually left side
                #size is alpha from the diagram
                #add time.sleep(time) only to server

                    if down_confirm == True:
                        down_confirm = False
                        command_in = False
                        #connect.sendall(b'Stop Con.')
                        connect.sendall(b'Doing Con.')
                    else:
                        connect.sendall(b'Doing Con.')
Ejemplo n.º 7
0
    if subdir.partition('/')[-1] == '':
        continue
    correct_tag = subdir.partition('/')[-1]
    labels.append(correct_tag)
    #print(correct_tag)
    print('Now in the folder', subdir.partition('/')[-1])

    files_size = 0
    counter = 0
    for i, file in enumerate(files):
        file_path = os.path.join(subdir, file)
        if 'jpg' in file_path or 'JPG' in file_path or 'Screen Shot' in file_path:
            total_img_process += 1
            files_size += 1
            print('\nImage: %s' % (file_path))
            img_crop, img_bk = generate_crop(file_path, 220)
            '''
			height,width,channel = img_crop.shape
			for line in range(height):
				for pixel in range(width):
					#print((line,pixel),(img_1[line,pixel][1],img_2[line,pixel][1],max([img_1[line,pixel][1],img_2[line,pixel][1]])-min([img_1[line,pixel][1],img_2[line,pixel][1]])))
					if img_crop[line,pixel][2] > abs(red_bias):
						img_crop[line,pixel][2] += red_bias
					else:
						img_crop[line,pixel][2]  = 0

					if img_crop[line,pixel][0] > abs(blue_bias):
						img_crop[line,pixel][0] += blue_bias
					else:
						img_crop[line,pixel][0] = 0