Ejemplo n.º 1
0
def main():
    
    print("sms hi")
    os.system('gpspipe -r -d -l -o /home/pi/smartcane/sms/date1.txt')
    # set api key, api secret
    api_key = "NCS4QACAQPBEDUMG"
    api_secret = "XBGIKE2OTK0SJEDL86QTWXXQXYUMHBNU"
    
    imgname='./image.jpg'
    camera = picamera.PiCamera()
    camera.vflip=True
    camera.hflip=True
    os.system("mplayer voicefile/camera.mp3")
    capture = PiRGBArray(camera)
    camera.capture(capture,format='rgb',use_video_port=True)
    capture = Image.fromarray(capture.array)
    capture.save(imgname)
    camera.close()

    #flag.initFlag()
    
    
    time.sleep(10)
    params = dict()
    
    if gps.convert('date1.txt') == 1:
      params['type'] = 'mms' # Message type ( sms, lms, mms, ata )
      params['to'] = '010-6473-4451'
      params['from'] = '01064734451'
      params['text'] = 'http://maps.google.com/maps?z=11&t=k&q=37.558088+126.998222'
      params["image"] = imgname # image for MMS. type must be set as "MMS"
    else:
      latresult,longresult = gps.convert('date1.txt')
      params['type'] = 'mms' # Message type ( sms, lms, mms, ata )
      params['to'] = '010-6473-4451'
      params['from'] = '01064734451'
      params['text'] = 'http://maps.google.com/maps?z=11&t=k&q=' + latresult +'+'+longresult # Message   
      params["image"] = imgname # image for MMS. type must be set as "MMS" 
    ## 4 params(to, from, type, text) are mandat?ory. must be filled
    
    cool = Message(api_key, api_secret)
    
    try:
        
        response = cool.send(params)
    
        print("Success Count : %s" % response['success_count'])
        print("Error Count : %s" % response['error_count'])
        print("Group ID : %s" % response['group_id'])
    
        os.system("mplayer /home/pi/Desktop/smartcane/voicefile/mms.MP3")
       
        if "error_list" in response:
            print("Error List : %s" % response['error_list'])
            
          

    except CoolsmsException as e:
        print("Error Code : %s" % e.code)
        print("Error Message : %s" % e.msg)


    sys.exit()
Ejemplo n.º 2
0
def main():
    imgname=os.path.join(dirname,'blockimage.jpg')
    camera = picamera.PiCamera()
    camera.vflip=True
    camera.hflip=True
    os.system("mplayer voicefile/camera.mp3")
    capture = PiRGBArray(camera)
    camera.capture(capture,format='rgb',use_video_port=True)
    capture = Image.fromarray(capture.array)
    capture.save(imgname)
    camera.close()
    
    img = cv2.imread(target)
    img = cv2.resize(img, (width,height), interpolation=cv2.INTER_AREA)
    blur = cv2.GaussianBlur(img,(3,3),0)
    blurfilename = os.path.join(dirname,'blur.jpg')
    cv2.imwrite(blurfilename,blur)
    frame = cv2.imread(blurfilename)
    

    #hsv??
    img_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)  

    img_h, img_s, img_v = cv2.split(img_hsv)
    
    avgValue = int(avgHSV(img_v))

    #??? ?? ??

    lower_white = np.array([0,0,avgValue+45])
    upper_white = np.array([180,20,255]) 
    
    mask = cv2.inRange(img_hsv, lower_white, upper_white)

    # Bitwise-AND mask and original image  
    img_result = cv2.bitwise_and(frame,frame, mask= mask)
    img_yellow_name = os.path.join(dirname,'img_yellow.jpg')
    cv2.imwrite( img_yellow_name, img_result ) 
            
    img = cv2.imread(img_yellow_name)
    imgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    
    #dilation
    kernel = np.ones((6, 6), np.uint8)
    dilation = cv2.dilate(imgray, kernel, iterations=1)
 
    #opening
    kernel = np.ones((7, 7), np.uint8)
    opening = cv2.morphologyEx(dilation, cv2.MORPH_OPEN,kernel)

    #Thresholding
    ret, thr = cv2.threshold(opening, 20, 255, 0)
    
    cv2.imshow('dddd',thr)

    _, contours, _ = cv2.findContours(thr, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    

    for con in contours:
        detect(con,img)
    
    #??? ?? ?? ?? ??
    rectangle_sorted = sorted(rectangles, key=lambda area: area[0], reverse =True)
    
    img_detect_name = os.path.join(dirname,'detectimg.jpg')
    cv2.imwrite(img_detect_name, img)
            
    img1_name = os.path.join(target)
    img2_name = os.path.join(dirname,'detectimg.jpg')
    img_original =cv2.imread(img1_name)
    img_original = cv2.resize(img_original,(width,height) , interpolation=cv2.INTER_AREA)
    img_detect =cv2.imread(img2_name)
    x_list=[]
    y_list=[]
    x2_list=[] 
    y2_list=[]
    for i,rect in enumerate(rectangle_sorted):
        ##rect is composed of size of rectangle(index 0) and 4 coords(index 2)
        for j in range(len(rect[1])):
            cv2.putText(img_detect,str(j),(rect[1][j][0],rect[1][j][1]),cv2.FONT_HERSHEY_SCRIPT_SIMPLEX,1,(255,255,255))
        if rect[1][0][0]!=0:
            x_list.append(rect[1][0][0])
            y_list.append(rect[1][0][1])
            x2_list.append(rect[1][2][0])
            y2_list.append(rect[1][2][1])
        else:
            x_list.append(rect[1][0][0])
            y_list.append(rect[1][0][1])
            x2_list.append(rect[1][2][0])
            y2_list.append(rect[1][2][1])
    
        
    if len(x_list) != 0:
        gradient1=0;intercept1=0
        gradient2=0;intercept2=0
        
        gradient1, intercept1 = linear_reg(x_list,y_list)
        gradient2, intercept2 = linear_reg(x2_list,y2_list)
        
        x_intersect = (intercept2-intercept1)/(gradient1-gradient2)
        y_interscet = x_intersect*gradient1+intercept1
        y_mid = 500
        x_mid = y_mid*(gradient1+gradient2)-(intercept1*gradient2) - (gradient1*intercept2)
        x_mid /=2*gradient1*gradient2
        
        x_third = [x_intersect,x_mid]
        y_third = [y_interscet,y_mid]
        gradient3,intercept3 = linear_reg(x_third,y_third)
        
        if not np.isnan(gradient1):
            if not np.isnan(gradient2):
                img_detect = draw_line(img_detect,gradient1,intercept1,1)
                img_detect = draw_line(img_detect,gradient2,intercept2,2)
                img_detect = draw_line(img_detect,gradient3,intercept3,3)
                print(gradient1,gradient2)
        if not np.isnan(gradient3):
            degree = int(math.degrees(math.atan(gradient3)))
        
        direction_result = "mplayer voicefile/"+getDirection(degree)+"_clockwise.mp3"
        os.system(direction_result)
        
        print(direction_result)
        
    
        #if gradient1 >0 and gradient2>0:
            #print("Left side")
            #os.system("mplayer voicefile/left.mp3")
        #elif gradient1 <0 and gradient2>0:
            #print("Center")
            #os.system("mplayer voicefile/front.mp3")
        #elif gradient1 <0 and gradient2<0:
            #print("Right side")
            #os.system("mplayer voicefile/right.mp3")
    else:
        print("NO CROSSWALK")
        os.system("mplayer voicefile/nodetect.mp3")

    cv2.imshow('original',img_original)
    cv2.imshow('detect',img_detect)
    cv2.imwrite('detectimg.jpg',img_detect)
    cv2.waitKey(0)
    cv2.destroyAllWindows()