Ejemplo n.º 1
0
def run(updateImageSignal, mode, setLabelSignal):
    camera.capture('image.jpg')
    ogImg = cv2.imread('image.jpg')
    colorImg = ogImg.copy()
    img = resize(ogImg, 50)
    imgContour = img.copy()

    img = img[20:200, 0:500]
    imgContour = imgContour[20:200, 0:500]

    cv2.imwrite('imageCrop.jpg', imgContour)

    imgBlur = cv2.GaussianBlur(img, (7, 7), 1)
    imgGray = cv2.cvtColor(imgBlur, cv2.COLOR_BGR2GRAY)

    threshold1 = 40
    threshold2 = 40

    imgCanny = cv2.Canny(imgGray, threshold1, threshold2)
    kernel = np.ones((5, 5))
    imgDil = cv2.dilate(imgCanny, kernel, iterations=1)

    form = getContours(imgDil, imgContour, img)
    print(form.corners)
    imgContour = resize(imgContour, 50)
    cv2.imwrite('output.jpg', imgContour)
    updateImageSignal.emit()
    classifyForm(form, mode, setLabelSignal)
Ejemplo n.º 2
0
def compare_different_profiles(profile1, profile1_name, profile2_name, profile2, failed_path, not_exist_path,
                               resolution):
    start_time = time.time()
    resize.resize(profile2, resolution)

    # profile1_loc_faces = get_faces_loc(profile1, ALGORITHM)
    # profile1_faces = get_faces(profile1, ALGORITHM, profile1_loc_faces)
    # profile2_loc_faces = get_faces_loc(profile2, ALGORITHM)
    # profile2_faces = get_faces(profile2, ALGORITHM, profile2_loc_faces)

    profile1_faces = dnn.get_faces_from_image(profile1)
    profile2_faces = dnn.get_faces_from_image(profile2)

    # result,m,n = compare_images(profile1, profile2, ALGORITHM, face_locs1=profile1_loc_faces, face_locs2=profile2_loc_faces,
    #                         faces1=profile1_faces, faces2=profile2_faces)

    result,m,n = FN.compare_faces_FN(profile1_faces, profile2_faces)

    time_it_took = time.time() - start_time
    debug_print("comparing time = " + str(time.time() - start_time))

    if result == SAME:
        debug_print("Oops. it said 2 different people are ###THE SAME###!")
        save_2_images_different_persons(failed_path, profile1, profile2, profile1_name, profile2_name)
        save_faces_2_person(failed_path,profile1_faces[m], profile2_faces[n], profile1_name, profile2_name)

        return True, time_it_took

    elif result == DIFFERENT:
        debug_print("Great! it said 2 different people are different!")

    elif save:
        imsave(not_exist_path + profile2_name, profile2)

    return False, time_it_took
Ejemplo n.º 3
0
def gallery(imageDir, x, y):
    print(imageDir)
    n = 1000

    #    filelist = [ f for f in os.listdir(tempPath) if f.endswith(".jpg") ]
    #    for file in filelist:
    #        os.remove(tempPath+file)

    #    for file in os.listdir(imageDir+'/'):
    #        img1=cv2.imread(imageDir+'/'+file)
    #        cv2.imwrite(imageDir+str(n)+"pic.jpg",img1[0])

    resize.resize(imageDir, x, y)
    imgs = []
    n_file = 0
    filelist = [
        f for f in os.listdir(imageDir + "/") if f.endswith(".jpg" or ".png")
    ]
    for file in filelist:
        n_file = n_file + 1
        imgs.append(Image.open(imageDir + "/" + file))
    #img = Image.open("1pic.jpg")

    album(imgs).save("album.png")
    basewidth = 700
    img1 = Image.open("album.png")
    wpercent = (basewidth / float(img1.size[0]))
    hsize = int((float(img1.size[1]) * float(wpercent)))
    img1 = img1.resize((basewidth, hsize), Image.ANTIALIAS)
    img1.save("resized_album.png")
Ejemplo n.º 4
0
    def uploadPicContent(self, content):
        conn = httplib.HTTPConnection("upload.che168.com",
                                      timeout=timeout_che168)
        headers = copy.copy(self.headers)

        img = StringIO(content)
        smallImg = StringIO()
        resize.resize(img, (600, 600), False, smallImg)
        content = smallImg.getvalue()

        conn.request("POST",
                     "/UploadImage.ashx?infoid=1000",
                     content,
                     headers=headers)
        try:
            res = conn.getresponse()
            resHeaders = res.getheaders()
            logger.debug(str(resHeaders))
            photoRes = self.decodeBody(resHeaders, res.read())
        except Exception as e:
            logger.debug(str(e))
            ret = None
            return ret

        #print photoRes#.decode("GB18030")
        conn.close()
        try:
            ret = json.loads(photoRes)
        except Exception as e:
            #ret = json.loads('[{"msg": "/2014/7/5/5525213967984534785.jpg", "img": "http://www.autoimg.cn/2scimg/2014/7/5/m_5525213967984534785.jpg", "success": 1}]')
            logger.debug(str(e))
            logger.debug(urllib.quote(photoRes))
            ret = None
        return ret
Ejemplo n.º 5
0
def main(args):
    in_data_dir = args.in_data_dir
    out_data_dir = args.out_data_dir
    size = (args.x, args.y)
    out_ext = args.out_ext
    in_exts = args.in_exts

    if not os.path.exists(out_data_dir):
        os.mkdir(out_data_dir)

    in_images_dir = os.path.join(in_data_dir, "images")
    out_images_dir = os.path.join(out_data_dir, "images")
    if not os.path.exists(out_images_dir):
        os.mkdir(out_images_dir)

    names = os.listdir(in_images_dir)
    pbar = pb.ProgressBar(widgets=["N=%d|" % len(names), pb.Percentage(), pb.Bar(), pb.ETA()], maxval=len(names))
    pbar.start()
    for i, name in enumerate(os.listdir(in_images_dir)):
        if os.path.splitext(name)[1] in in_exts:
            in_path = os.path.join(in_images_dir, name)
            out_path = os.path.join(out_images_dir, name)
            resize(in_path, out_path, size, out_ext)
        pbar.update(i)
    pbar.finish()
def main(args):
    in_data_dir = args.in_data_dir
    out_data_dir = args.out_data_dir
    size = (args.x, args.y)
    out_ext = args.out_ext
    in_exts = args.in_exts

    if not os.path.exists(out_data_dir):
        os.mkdir(out_data_dir)

    in_images_dir = os.path.join(in_data_dir, "images")
    out_images_dir = os.path.join(out_data_dir, "images")
    if not os.path.exists(out_images_dir):
        os.mkdir(out_images_dir)

    names = os.listdir(in_images_dir)
    pbar = pb.ProgressBar(
        widgets=["N=%d|" % len(names),
                 pb.Percentage(),
                 pb.Bar(),
                 pb.ETA()],
        maxval=len(names))
    pbar.start()
    for i, name in enumerate(os.listdir(in_images_dir)):
        if os.path.splitext(name)[1] in in_exts:
            in_path = os.path.join(in_images_dir, name)
            out_path = os.path.join(out_images_dir, name)
            resize(in_path, out_path, size, out_ext)
        pbar.update(i)
    pbar.finish()
Ejemplo n.º 7
0
def create_los(images, crop, ui=None):
    """
    :param images: table of the selected images
    :return: the image

    Autor : Adrien   2018-01-16
    """

    #Dimension of the table
    x, y = np.shape(images)
    final = np.zeros(((x - 1) * crop, (y - 1) * 2 * crop, 3), np.uint8)

    #Borne for y
    borne_y = y - 1

    ##Import and process

    # Squares for the odd lines
    for i in range(1, x, 2):
        for j in range(borne_y):
            img = Image.open(images[i][j])
            tab = np.array(img)
            img.close()

            if j == y - 1:
                tab = resize.resize(tab, (crop, 2 * crop))
                final[(i - 1) * crop:(i + 1) * crop,
                      j * 2 * crop:(2 * j + 1) * crop] = tab
            else:
                tab = resize.resize(tab, (2 * crop, 2 * crop))
                final[(i - 1) * crop:(i + 1) * crop,
                      j * 2 * crop:(j + 1) * 2 * crop] = tab
            if ui != None:
                ui.pb_avance.setValue(
                    int((100 * ((i * y + j) / (x * y) + 1) / 4 + 50)))

    #Triangles for the even lines
    for i in range(0, x, 2):
        for j in range(y):
            img = Image.open(images[i][j])
            tab = np.array(img)
            img.close()

            if i == 0 and j == 0:
                beggining = (0, 0)
            elif j == 0:
                beggining = ((i - 1) * crop, 0)
            elif i == 0:
                beggining = (0, (2 * j - 1) * crop)
            else:
                beggining = ((i - 1) * crop, (2 * j - 1) * crop)

            final = fill.fill_los(final, tab, crop, beggining, i, x, j, y)
            if ui != None:
                ui.pb_avance.setValue(
                    int((100 * ((i) * y + j) / (x * y) + 1) / 4 + 75))

    return final
Ejemplo n.º 8
0
def prediction(filename, slider):

	#Original file name for mouse hover on predicted image in predict.html
	original_image_path = "../static/uploads/" + filename

	#Checking the value of variable 'slider'.
	#This is from the html slider in index.html 
	#to select pre-processing method.
	if slider == '1':
		type = 'original'
	elif slider == '2':
		type = 'mirror'
	elif slider == '3':
		type = 'center'
	elif slider == '4':
		type = '10crop'


	if type == "original":

		# Image is read from the uploads folder using the filename from the created url.
		image = plt.imread(os.path.join('static/uploads', filename))
		resize(filename, False, True)
		filename = "distorted" + filename
		#Sending image and filename to predict method and getting predictions and image_path in return
		predictions, image_path = pred(image, filename)

	#Makes a mirror version of the image
	elif type == "mirror":
		resize(filename, False, True)
		filename = "distorted" + filename
		image = mirror_image(filename)
		filename = "mirror" + filename
		# Sending image and filename to predict method and getting predictions and image_path in return
		predictions, image_path = pred(image, filename)

	#Crops the center of the image. (long sides (height or width) gets cut off, short side (height or width) stays the same).
	elif type == "center":
		image = resize(filename)
		filename = "center_crop" + filename
		# Sending image and filename to predict method and getting predictions and image_path in return
		predictions, image_path = pred(np.array(image), filename)

	elif type == "10crop":

		#Sending filename to ten crop method to make 10 versions of the same image
		#The probability score gets combined for all 10 versions to make a final/combined probability score
		predictions, image_path = ten_crop_pred(filename)

	#This will make sure that there are no backslashes in the file path
	#html <img src=> Can handle any slash.
	#javascript code in <script> tags can not handle paths with backslash.
	image_path = image_path.replace('\\', '/')

	#return will send user to predict.html (in templates folder) and make the predictions dictionary available in the html code.
	return render_template('predict.html', predictions=predictions, image_path=image_path, original_image_path=original_image_path)
Ejemplo n.º 9
0
    def send_email(self):
        gmail_user = '******'
        gmail_password = self.pw  # your gmail password
        msg = MIMEMultipart()
        msg['Subject'] = 'SYSS Photos'
        msg['From'] = gmail_user
        msg['To'] = self.receiver
        msg.attach(
            MIMEText("""
        The photos selected are listed below. Thank you for using the system!
        If you have any enquiry, please contact Computer Club.
        
        We would appreciate it if you could give me some advice about the User Experience.
        Please spare a little time and fill in the questionnaire below.
        """))
        msg.attach(
            MIMEText(
                u'<a href="https://docs.google.com/forms/d/e/1FAIpQLSc98pWBdyFoooQjyEq33VAw3UjJORW6gJIudiN2j5IaP6nwPw/viewform?usp=sf_link">Click Here</a>',
                'html'))
        msg.attach(
            MIMEText(
                """
		
        Kind regards
        SYSS Computer Club William and Hardy
        """, 'plain', 'utf-8'))

        for fl in self.list_of_photos:
            resize(fl)
            path = fl
            while path[-1] != '\\':
                path = path[:-1]
            att = MIMEImage(open(path + "___temp.jpg", 'rb').read())
            name = os.path.basename(fl)
            att["Content-Disposition"] = 'attachment; filename=' + name
            msg.attach(att)
        try:
            os.remove(path + "___temp.jpg")
        except OSError:
            pass
        server = smtplib.SMTP_SSL('smtp.gmail.com', 465)
        server.ehlo()
        try:
            server.login(gmail_user, self.pw)
        except smtplib.SMTPAuthenticationError:
            print("Wrong Password")
            return False
        server.send_message(msg)

        # try:
        #     smtpObj = smtplib.SMTP('localhost')
        #     smtpObj.sendmail(sender, receivers, message.as_string())
        # except smtplib.SMTPException:
        #     print("ERROR.")
        server.quit()
        return True
def _process_jobs(jobs):
    for job in jobs:
        if not os.path.exists(job.raw_path()):
            logging.warning('Could not find image file: %s', job.raw_path())
            continue
        if os.path.exists(job.resized_path()):
            continue
        logging.info('Processing %s', job.raw_path())
        resize.resize(job)
        logging.info('Saved resized image to %s', job.resized_path())
Ejemplo n.º 11
0
 def process(self):
     resize.resize(self.path)
     temp_path = self.path
     while (temp_path[-1]!='\\'):
         temp_path = temp_path[:-1]
     img      = fr.api.load_image_file(temp_path+'___temp.jpg')
     location = fr.api.face_locations(img, number_of_times_to_upsample=1, model='hog')
     encoding = fr.api.face_encodings(img, known_face_locations=location, num_jitters=1)
     for i in range(len(encoding)) :
         self.faces.append(face(self.path, location[i], encoding[i]))
Ejemplo n.º 12
0
    def _onProcBtn(self, widget, *data):
        inDir = self.inDirEntry.get_text()
        outDir = self.outDirEntry.get_text()
        resX = self.xResEntry.get_text()
        resY = self.yResEntry.get_text()
        watermark = self.watermarkEntry.get_text()

        self.procBtn.set_sensitive(False)
        resize(inDir, outDir, resX, resY, watermark)
        self.procBtn.set_sensitive(True)
Ejemplo n.º 13
0
  def post(self):
    files = self.__validate_post_args()
    if files is None:
      return

    self.set_status(200)
    self.finish()

    print('resizing images')
    print(files)
    resize.resize(files)
Ejemplo n.º 14
0
 def train(self):
     print("Image Preprocessing...")
     rename()
     print("Re-naming images complete!")
     resize()
     print("Re-sizing images complete!")
     print("Training KNN classifier...")
     classifier = train("../FakeImageDetection/train",
                        model_save_path="trained_knn_model.clf",
                        n_neighbors=1)
     print("Training complete!")
Ejemplo n.º 15
0
    def uploadPicContent(self, content, imageConfig):
        # {"uploadUrl":"http:\/\/v0.api.upyun.com\/bximg\/",
        # "uploadParams":
        # {"policy":"",
        # "signature":""},
        # "vendorName":"upyun",
        # "imageSuffix":"#up",
        # "fileKey":"file"}
        uploadUrl = imageConfig.get('uploadUrl')
        params = imageConfig.get('uploadParams')
        policy = params.get('policy')
        imageSuffix = imageConfig.get('imageSuffix')
        signature = params.get('signature')
        host = uploadUrl.replace('http://', '')
        host = host.replace('/bximg/', '')
        logger.debug("Baxing upload image host = " + host)
        conn = httplib.HTTPConnection(host, timeout=10)
        headers = copy.copy(self.headers)
        # jsessionid = self.cookies.get("JSESSIONID", None)

        # logger.debug(jsessionid)
        img = StringIO(content)
        smallImg = StringIO()
        resize.resize(img, (800, 600), False, smallImg)
        boundaryHeader = "----pluploadboundary" + str(self.millis())
        headers[
            'Referer'] = 'http://s.baixing.net/swf/uploader/swfupload.swf?preventswfcaching=' + str(
                self.millis())
        headers['Origin'] = 'http://s.baixing.net'
        headers[
            "Content-Type"] = "multipart/form-data; boundary=" + boundaryHeader
        content = smallImg.getvalue()
        boundary = "--" + boundaryHeader
        picForm = boundary + '\r\n' + 'Content-Disposition: form-data; name="policy"\r\n\r\n'
        picForm += str(policy) + '\r\n'
        picForm += boundary + '\r\n' + 'Content-Disposition: form-data; name="signature"\r\n\r\n'
        picForm += str(signature) + '\r\n'
        picForm += boundary + '\r\n' + 'Content-Disposition: form-data; name="file"; filename="' + str(
            self.millis()) + '.jpg"\r\nContent-Type: image/jpeg\r\n\r\n'
        picForm += str(content) + '\r\n'
        picForm += boundary + "--"
        headers['Content-Length'] = len(picForm)
        logger.debug('upload image form = ' + picForm)
        conn.request("POST", "/bximg/", picForm, headers=headers)
        res = conn.getresponse()
        result = self.decodeBody(res.getheaders(), res.read())
        conn.close()

        jsonResult = json.loads(result)
        if jsonResult.get('code') == 200:
            imgUrl = str(jsonResult.get('url')) + str(imageSuffix)
            return imgUrl
        return None
Ejemplo n.º 16
0
def featpyramid(pic,model):
    #?????
    pyra = {}
    padx = math.ceil(model["maxsize"][0][0][0][1])
    pady = math.ceil(model["maxsize"][0][0][0][0])
    sbin = model["sbin"][0][0][0][0]
    interval = model["interval"][0][0][0][0]
    sc = 2.0 **(1.0/interval)
    imsize = [pic.shape[1],pic.shape[2]]
    max_scale = int(1 + np.floor(math.log(min(imsize)/(5.0*sbin))/math.log(sc)))
    pyra["feat"] = list(range(int(max_scale + interval)))
    pyra["scales"] = np.zeros((max_scale + interval, 1))
    pyra["imsize"] = imsize
    time = 0
    for i in range(interval):
        starttime = datetime.datetime.now()
        scaled = resize.resize(pic,1.0/sc**i)
        endtime = datetime.datetime.now()
        tmp = features.features(scaled,sbin/2.0)
        time += (endtime - starttime).seconds
        size =[tmp.shape[0],tmp.shape[1]+2*pady+2,tmp.shape[2]+2*padx+2]
        pyra["feat"][i]=np.zeros(size)
        pyra["feat"][i][:,pady+1:size[1]-pady-1,padx+1:size[2]-padx-1] = tmp
        pyra["scales"][i] = 2.0/sc**(i)
        #starttime = datetime.datetime.now()
        tmp = features.features(scaled,sbin)
        #endtime = datetime.datetime.now()
        #time += (endtime - starttime).seconds
        size =[tmp.shape[0],tmp.shape[1]+2*pady+2,tmp.shape[2]+2*padx+2]
        pyra["feat"][i+interval]=np.zeros(size)
        pyra["feat"][i+interval][:,pady+1:size[1]-pady-1,padx+1:size[2]-padx-1] = tmp
        pyra["scales"][i+interval] = 1.0/sc**(i-1)
        for j in range(i+interval,max_scale,interval):
            starttime = datetime.datetime.now()
            scaled = resize.resize(scaled, 0.5)
            endtime = datetime.datetime.now()
            tmp = features.features(scaled,sbin)
            time += (endtime - starttime).seconds
            size =[tmp.shape[0],tmp.shape[1]+2*pady+2,tmp.shape[2]+2*padx+2]
            pyra["feat"][j+interval]=np.zeros(size)
            pyra["feat"][j+interval][:,pady+1:size[1]-pady-1,padx+1:size[2]-padx-1] = tmp
            pyra["scales"][j+interval] = 0.5/sc**(i-1)
    for i in range(len(pyra["feat"])):
        pyra["feat"][i][31,0:pady+1,:]=1
        end=pyra["feat"][i].shape
        pyra["feat"][i][31,end[1]-padx-1:end[1],:]=1
        pyra["feat"][i][31,:,0:padx+1]=1
        pyra["feat"][i][31,:,end[2]-pady-1:end[2]]=1

    print time
    pyra["padx"] = padx
    pyra["pady"] = pady
    return pyra
Ejemplo n.º 17
0
    def uploadPicContent(self, content):

        conn = httplib.HTTPConnection("www.iautos.cn")
        headers = copy.copy(self.headers)

        img = StringIO(content)
        smallImg = StringIO()
        resize.resize(img, (800, 600), False, smallImg)
        content = smallImg.getvalue()

        boundaryHeader = '----WebKitFormBoundary' + str(random.random())
        boundary = '--' + boundaryHeader

        formStr = ""
        formStr += boundary + "\r\n" + 'Content-Disposition: form-data; name="userid"\r\n\r\n' + "114841" + "\r\n"
        formStr += boundary + "\r\n" + 'Content-Disposition: form-data; name="photoCount"\r\n\r\n' + "20" + "\r\n"
        formStr += boundary + "\r\n" + 'Content-Disposition: form-data; name="uploadurl"\r\n\r\n' + "/shopadmin/uploadimg/simple/" + "\r\n"
        formStr += boundary + "\r\n" + 'Content-Disposition: form-data; name="shopid"\r\n\r\n' + "0" + "\r\n"
        formStr += boundary + "\r\n" + 'Content-Disposition: form-data; name="btnUrl"\r\n\r\n' + "http://www.iautos.cn/static2013/images/shopadmin/car_photo_hover.jpg" + "\r\n"
        formStr += boundary + "\r\n" + 'Content-Disposition: form-data; name="maxSize"\r\n\r\n' + "8388608" + "\r\n"
        formStr += boundary + "\r\n" + 'Content-Disposition: form-data; name="is_add_watermark"\r\n\r\n' + "0" + "\r\n"
        formStr += boundary + "\r\n" + 'Content-Disposition: form-data; name="brand_id"\r\n\r\n' + "0" + "\r\n"
        formStr += boundary + "\r\n" + 'Content-Disposition: form-data; name="DropSelect"\r\n\r\n' + "1" + "\r\n"
        formStr += boundary + "\r\n" + 'Content-Disposition: form-data; name="Filename"\r\n\r\n' + "aa.jpg" + "\r\n"
        formStr += boundary + "\r\n" + 'Content-Disposition: form-data; name="flashId"\r\n\r\n' + "flash1" + "\r\n"
        formStr += boundary + "\r\n" + 'Content-Disposition: form-data; name="cid"\r\n\r\n' + "1" + "\r\n"

        formStr += boundary + '\r\n' + 'Content-Disposition: form-data; name="Filedata"; filename="aa.jpg"\r\nContent-Type: application/octet-stream\r\n\r\n'
        formStr += str(content) + "\r\n"
        formStr += boundary + "--"

        headers['Host'] = 'www.iautos.cn'
        headers['Content-Length'] = len(formStr)
        headers["Content-Type"] = "multipart/form-data; boundary=" + boundaryHeader

        logger.debug("picUpload headers=" + str(headers))

        conn.request("POST", "/shopadmin/uploadimg/simple/", formStr, headers=headers)
        res = conn.getresponse()
        resHeaders = res.getheaders()
        logger.debug(str(resHeaders))
        photoRes = self.decodeBody(resHeaders, res.read())
        # print photoRes#.decode("GB18030")
        conn.close()
        try:
            ret = json.loads(photoRes)
        except Exception as e:
            logger.debug(str(e))
            logger.debug(urllib.quote(photoRes))
            ret = None
        return ret
Ejemplo n.º 18
0
def main():
    print('需要几次检测(第一次检查需要较长时间):')
    k = int(input())
    #目录设置
    model_dir = '/Users/lulu/Desktop/Python/object_detection_on_pi'
    image_path = '/Users/lulu/Desktop/Python/object_detection_on_pi'
    images = os.listdir(image_path)
    #读取标签
    index_dict = load_index(model_dir)
    #创建graph
    print('开始构建TensorFlow图...')
    create_graph(model_dir)
    sess = tf.Session()
    softmax_tensor = sess.graph.get_tensor_by_name('softmax:0')
    #开始预测
    j = 0
    while (j < k):
        print('第%d次检测! 按下回车拍摄' % (j + 1))
        input()
        f = open(image_path + '/keychain.jpg', 'wb')
        f.write(catch_pic())
        f.close()
        result = []
        for image in images:
            if image[-3:] in ['jpg', 'JPG', 'peg', 'PEG']:
                resize(image_path + '/' + image)
                image_data = tf.gfile.FastGFile(image_path + '/' + image,
                                                'rb').read()
                #输入图像数据,得到softmax概率值(一个shape=(1,1008)的向量)
                print('已拍摄')
                predictions = sess.run(softmax_tensor,
                                       {'DecodeJpeg/contents:0': image_data})
                #(1,1008)->(1008,)
                predictions = np.squeeze(predictions)
                objects = ''
                split = ','
                max_predict_num = 2
                for i in range(max_predict_num):
                    if i == max_predict_num - 1:
                        split = ' 或者 '
                    objects = objects + split + index_dict[np.where(
                        predictions == max(predictions))[0][0]] + ' '
                    predictions[predictions == max(predictions)] = 0
                result.append(image + '中可能有 ' + objects[1:-1] + '.')
        for yi in result:
            print(yi)
        j += 1
        time.sleep(1.5)
    print('退出.')
    sess.close()
Ejemplo n.º 19
0
def run(path, blur, resize, rotate, sharpen, writetext):
    if blur != 0:
        blurProcess.process(path, blur)
    elif not (resize[0] == 0 or resize[1] == 0):
        resizeProcess.resize(path, resize[0], resize[1])
    elif rotate != 0:
        rotateProcess.rotate(path, rotate)
    elif sharpen:
        sharperProcess.sharpen(path)
    elif not writetext is None:
        # TODO: add writeText support
        click.echo("Write text is not working yet!")
    else:
        click.echo("No options were selected for the image %s" % path)
Ejemplo n.º 20
0
def input_value(scn_img):
    h1, w1 = scn_img.shape[:2]
    scn_img = resize(h1, w1, 640, scn_img)
    hr1, wr1 = scn_img.shape[:2]

    #==================================================>KPT_SCN
    # fast:

    # 	fast = cv2.FastFeatureDetector_create(49)
    # 	kp1 = fast.detect(scn_img,masked_data)

    # surf:

    detector = cv2.xfeatures2d.SURF_create(400, 5, 5)
    kp1, desc = detector.detectAndCompute(scn_img, None)

    # for k in kp1:
    # 	x,y=k.pt
    # 	print x,y

    #==================================================>DESCRIPTOR_SCN
    # freak:

    freakExtractor = cv2.xfeatures2d.FREAK_create()
    kp1, des1 = freakExtractor.compute(scn_img, kp1)

    # savekeyPointsOnImage(scn_img,"input1.jpg" ,kp1,wr1,hr1)

    # img4 = cv2.drawKeypoints(scn_img, kp1,None, color=(0,255,255))
    # cv2.imwrite('input1.jpg', img4)

    return kp1, des1
Ejemplo n.º 21
0
def cropFacesAndSaveThemIntoASpecificFolder(img, imno, param, name):
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    cv2.imshow("img", img)
    faces = face_cascade.detectMultiScale(gray, 1.3, 5)
    faceNo = 0

    if param == 0:
        faceFolder = r'/home/nvidia/Desktop/iSeeYou/' + name
        if not os.path.exists(faceFolder):
            os.makedirs(faceFolder)
    if param == 1:
        faceFolder = r'/home/nvidia/Desktop/test/' + str(imno)
        if not os.path.exists(faceFolder):
            os.makedirs(faceFolder)

    for (x, y, w, h) in faces:
        cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
        roi_gray = gray[y:y + h, x:x + w]
        roi_color = img[y:y + h, x:x + w]
        crop_img = img[y:y + h, x:x + w]
        faceNo = faceNo + 1
        crop_img = img = resize.resize(crop_img)
        #imname = 'imno:' +str(imno)+ 'CroppedFace_no:' + str(faceNo)
        imname = str(imno)
        status = cv2.imwrite(os.path.join(faceFolder, imname + '.jpeg'),
                             crop_img)
    return
Ejemplo n.º 22
0
def prepare_images():
    # print("Crop circles 2f")
    # crop_circles("img/original/2f/", "img/cropped/2f/")
    # print("Crop circles 5c")
    # crop_circles("img/original/5c/", "img/cropped/5c/")
    #
    # print("Augment 2f")
    # data_augment("img/cropped/2f/", "img/augmented/2f/")
    # print("Augment 5c")
    # data_augment("img/cropped/5c/", "img/augmented/5c/")

    size = (28, 28)
    print("Resize 2f")
    resize("img/augmented/2f/", "img/resized/2f/", size)
    print("Resize 5c")
    resize("img/augmented/5c/", "img/resized/5c/", size)
def preprocess_for_csvreader(imagesInDir, labelsInfile, outfile):
    outfile = open(outfile, 'w')

    # Create a dictionary from the filenames (keys) to the labels (values)
    labelsDict = csvToDict(labelsInfile)

    for subdir, dirs, files in os.walk(imagesInDir):
        for f in files:
            if not f.startswith('.'):
                # If you want to save the new images uncomment this line and add
                # 'writeFile' as the last argument in the call to resize()
                # writeFile = open(f+'_new', 'a')

                # Convert to grayscale
                img = Image.open(os.path.join(subdir, f)).convert('L')
                # Resize
                img = resize(img, (100, 100), False, False)
                # Convert image to array
                img = np.asarray(img)
                img = img.flatten()
                # Append image array to given imagesOutfile CSV file
                imagesWriter.writerow(img)
                # Append the label to the csv file
                label = labelsDict[f]
                labelsWriter.writerow(label)

    imagesOutfile.close()
    labelsOutfile.close()
Ejemplo n.º 24
0
def input_value(scn_img):
    h1, w1 = scn_img.shape[:2]
    scn_img = resize(h1, w1, 640, scn_img)
    hr1, wr1 = scn_img.shape[:2]

    #==================================================>KPT_SCN
    # fast:

    # 	fast = cv2.FastFeatureDetector_create(49)
    # 	kp1 = fast.detect(scn_img,masked_data)

    # brisk:

    detector = cv2.BRISK_create(70, 2, .5)
    kp1 = detector.detect(scn_img, None)

    # for k in kp1:
    # 	x,y=k.pt
    # 	print x,y

    #==================================================>DESCRIPTOR_SCN

    # brisk:

    kp1, des1 = detector.compute(scn_img, kp1)

    savekeyPointsOnImage(scn_img, "input1.jpg", kp1, wr1, hr1)

    # img4 = cv2.drawKeypoints(scn_img, kp1,None, color=(0,255,255))
    # cv2.imwrite('input1.jpg', img4)

    return kp1, des1
Ejemplo n.º 25
0
def compare_person_to_others_profile(images, person_index, failed_path, num_of_checks, not_exists_path, resolution):
    person, person_images, person_name = get_person_from_images(images, person_index)

    profile = get_profile(person_images)
    profile = resize.resize(profile, resolution)

    num_of_people = len(images)

    indexes_to_check = get_random_indexes(person_index, num_of_people, num_of_checks)

    true_negative = 0
    false_positive = 0

    checked = len(indexes_to_check)

    avg_compare_time = 0
    for i in indexes_to_check:
        curr_person, curr_person_images, curr_person_name = get_person_from_images(images, i)
        check_profile = get_profile(curr_person_images)
        compare_result, compare_time = compare_different_profiles(profile, person_name, curr_person_name,
                                                                  check_profile,
                                                                  failed_path,
                                                                  not_exists_path, resolution)
        avg_compare_time += compare_time
        if compare_result:
            false_positive += 1
        else:
            true_negative += 1

    avg_compare_time /= checked

    return true_negative, false_positive, avg_compare_time, checked
Ejemplo n.º 26
0
def test_resize_jpg():
    """Create thumbs from jpg image."""
    with TemporaryDirectory() as tmpdir:
        files = resize("test/files/Körsbärsträd.jpg", tmpdir, "test")

        assert len(files) == 4

        for (fname, fpath) in files:
            assert isfile(fpath)

            with Image.open(fpath) as img:
                # Check that all image have the widths they nominally should
                assert fname == f"test_{img.width}.jpg"

                # Check that images have roughly the same aspect ratio as the
                # original
                aspect = img.width / img.height
                assert aspect > 1.32
                assert aspect < 1.33

                # Check that there is no exif data, or possibly only the
                # orientation
                exif = img.getexif()
                assert len(exif) == 0 or len(exif) == 1
                if len(exif) == 1:
                    assert 0x0112 in exif
Ejemplo n.º 27
0
 def Resize_File(self):
     doThings = False
     if (len(self.inputFile.get(ANCHOR)) < 1):
         self.label3['text'] = "Select a file"
         self.label3['fg'] = 'Red'
     elif (len(self.inputFile.get(ANCHOR)) > 1):
         self.label3['text'] = " "
         self.label3['fg'] = 'Green'
         doThings = True
     try:
         if (doThings == True):
             fileName = self.inputFile.get(ANCHOR)
             fileSplit = fileName.split(".")
             for opt in self.options:
                 dimensions = opt.get().split("x")
                 if dimensions[0] == '0':
                     continue
                 else:
                     filePath = os.path.join(self.defaultInput, fileName)
                     newPath = os.path.join(
                         self.defaultOutput,
                         self.Format_Name(fileSplit, dimensions))
                     scale = (int(dimensions[0]), int(dimensions[1]))
                     new_img = resize.resize(filePath, scale)
                     new_img.save(newPath)
                 self.label3['text'] = 'Done'
                 self.UpdateListBox()
     except AttributeError:
         self.label3['text'] = "Select resize options"
         self.label3['fg'] = 'Red'
Ejemplo n.º 28
0
def _download_data(url, timeout=10):
    """Download data from given URL."""
    resp = requests.get(url, allow_redirects=False, timeout=timeout)
    if resp.status_code != 200:
        raise Exception('Error on http: [{}], HTTP status: [{}]'.format(
            url, resp.status_code))
    return base64.b64encode(resize(resp.content)).decode('utf-8')
Ejemplo n.º 29
0
def _download_data(url, timeout=30):
    """Download data from given URL."""
    resp = requests.get(url, allow_redirects=False, timeout=timeout)
    if resp.status_code != 200:
        raise Exception('Error on http: [{}], HTTP status: [{}]'.format(
            url, resp.status_code))
    print('[_download_data] url:[%s] size:[%d]' % (url, len(resp.content)))
    return resize(resp.content)
Ejemplo n.º 30
0
 def resize_image(self, src, w, h, file_name):
     out = u'{}/{}_{}'.format(self.image_dir, file_name, time.time())
     if h is 0 or w is 0:
         result = resize.resize_with_specific_ratio(src, out, width=w, height=h)
     else:
         result = resize.resize(src, out, w, h)
     if result:
         return out
def main(imgPath):
    img = resize(imgPath, (100, 100))
    img = img.convert('L')
    arr = np.asarray(img)
    dithered = dither(arr)
    lines = ditherToText(dithered)
    output = '\n'.join(lines)
    print(output)
    with open('output.txt', 'w', encoding='utf-8') as f:
        f.write(output)
Ejemplo n.º 32
0
async def resize_from_file(
        file: UploadFile = File(...), scale_pct: float = 50.0):
    image = await file.read()
    resized, headers = resize(image, scale_pct)
    headers.update({
        "orig-filename": file.filename,
        "content-type": file.content_type,
    })
    headers.update({k: str(v) for k, v in headers.items()})
    return StreamingResponse(resized, media_type="image/png", headers=headers)
def run(updateImageSignal, mode, setLabelSignal):
    camera.capture('image.jpg')
    ogImg = cv2.imread('image.jpg')
    colorImg = ogImg.copy()
    img = resize(ogImg, 50)
    imgContour = img.copy()

    img = img[20:200, 0:500]
    imgContour = imgContour[20:200, 0:500]

    cv2.imwrite('imageCrop.jpg', imgContour)

    form = canny(imgContour)

    print(form.corners)

    imgContour = resize(imgContour, 50)
    cv2.imwrite('output.jpg', imgContour)
    updateImageSignal.emit()
    classifyForm(form, mode, setLabelSignal)
Ejemplo n.º 34
0
def handlePhoto(update, context, isGroup):
    """Handle a photo sent in by user"""
    message = update.message
    user = update.effective_user
    usableCaption = message.caption or ''
    if len(usableCaption):
        usableCaption = usableCaption.strip()
    if (isGroup):
        if not usableCaption or not len(
                usableCaption) >= 8 or not usableCaption[:8] == '/sticker':
            return
        usableCaption = message.caption[8:].strip()
        setName = 'set_' + str(update.effective_chat.id).replace('-', 'm')
        setTitle = update.effective_chat.title + ' pack'
        setOwnerId = HARCODED_USER_ID_TODO_REPLACE
        member = context.bot.getChatMember(update.effective_chat.id,
                                           setOwnerId)
        if member.status not in ["member", "creator", "administrator"]:
            return
    else:
        setName = 'set_' + str(user.id).replace('-', 'm')
        setTitle = user.first_name + ' pack'
        setOwnerId = user.id

    if len(usableCaption) > 50:
        return update.message.reply_text('caption too long (max 50)')
    if message.photo and len(message.photo):
        # get full size photo
        filePath = getImageFromImageMessage(update, context)
        imgPath = resize.resize(filePath)
        stickerEmoji = DEFAULT_EMOJI
        if usableCaption and emoji.emoji_count(usableCaption[0]) == 1:

            stickerEmoji = usableCaption[0]
            usableCaption = usableCaption[1:].strip()
        #add caption
        if usableCaption:
            drawOnBottom = len(usableCaption) > 1 and usableCaption[0] == '-'
            if (drawOnBottom):
                usableCaption = usableCaption[1:].strip()
            imgPath = drawText.draw(imgPath, usableCaption, drawOnBottom)

        sticker = stickerModule.createSticker(context.bot, setOwnerId, imgPath,
                                              setName, setTitle, stickerEmoji)
        if sticker:
            update.message.reply_sticker(sticker.file_id)
        else:
            update.message.reply_text('error creating sticker')
        #remove tmp files
        os.remove(filePath)
        os.remove(filePath.split('.')[0] + '_r.png')

    else:
        update.message.reply_text('error')
Ejemplo n.º 35
0
else:
  min_size = img_w/3
# print min_size

gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# print gray_img.shape
cascade_fn='/usr/local/opt/opencv/share/OpenCV/haarcascades/haarcascade_frontalface_alt2.xml'
cascade = cv2.CascadeClassifier(cascade_fn)
rects = cascade.detectMultiScale(gray_img, scaleFactor=1.01, minNeighbors=4, minSize=(min_size, min_size), flags=cv.CV_HAAR_SCALE_IMAGE)
# print len(rects)
if len(rects) > 0:
  face = rects[len(rects)-1]

  # print img.shape, face

  r_img, r_face = resize.resize(img, face, resize_w, resize_h, face_ratio)

  # print r_img.shape, r_face

  face_x, face_y, face_w, face_h = r_face

  # cv2.rectangle(r_img, (face_x,face_y), (face_x+face_w,face_y+face_h), (255,0,0),2)


  # meanshift
  (segmented_img, labels_img, number_regions) = pms.segment(r_img, spatial_radius=9, range_radius=4.5, min_density=20)

  # kmeans
  pixels = reshape(segmented_img, (r_img.shape[0]*r_img.shape[1], r_img.shape[2]))
  centroids, _ = kmeans(pixels, 4) # four colors will be found
  qnt, _ = vq(pixels, centroids)
Ejemplo n.º 36
0
def start_once():
    resize()
Ejemplo n.º 37
0
    Key([mod, "shift"], "r", lazy.restart()),
    Key([mod, "shift"], "q", lazy.shutdown()),
    Key([mod], "r", lazy.spawncmd()),

    # Applications
    Key([mod], "c", lazy.spawn("firefox")),
    Key([mod], "l", lazy.spawn("clementine")),
    Key([mod], "Return", lazy.spawn("urxvt")),
    # Key([mod], "i", lazy.spawn("ipython qtconsole --profile labwork")),
    Key([mod], "i", lazy.spawn("jupyter qtconsole")),
    Key([mod, "shift"], "l", lazy.spawn("xscreensaver-command -lock")),
    Key([], "Print", lazy.spawn("scrot")),

    # Multimedia keys
    Key([], "XF86Display", lazy.function(lambda q: resize(toggle=True))),
    Key([mod], "s", lazy.function(lambda q: resize())),
    Key([mod, "shift"], "s", lazy.function(lambda q: resize(clone=True))),
    Key([], "XF86AudioPlay", lazy.spawn("clementine --play-pause")),
    Key([], "XF86AudioNext", lazy.spawn("clementine --next")),
    Key([], "XF86AudioPrev", lazy.spawn("clementine --prev")),
    Key([], "XF86AudioMute", lazy.spawn("/home/sean/.config/qtile/volume.sh mute")),
    Key([], "XF86AudioLowerVolume", lazy.spawn("/home/sean/.config/qtile/volume.sh down")),
    Key([], "XF86AudioRaiseVolume", lazy.spawn("/home/sean/.config/qtile/volume.sh up"))
    # Key([], "XF86MicMute", lazy.spawn("/home/sean/.qtile/volume.sh mic")),
]

# Drag floating layouts.
mouse = [
    Drag([mod], "Button1", lazy.window.set_position_floating(),
         start=lazy.window.get_position()),
Ejemplo n.º 38
0
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD, Adam, RMSprop
from keras.utils import np_utils
from keras.models import model_from_json
from PIL import Image

import scipy.misc as smp
import numpy as np
import resize

image_path = input("Enter image name: ")
#a = image_path.split('/')
#image = a[len(a)-1]
#image_path = "/images/"+image_path
converted = resize.resize(image_path)


model = model_from_json(open('my_model_architecture.json').read())
model.load_weights('my_model_weights.h5')

model.compile(loss='categorical_crossentropy',
              optimizer=RMSprop(),
              metrics=['accuracy'])


ima = Image.open(converted)
pixels = np.array(ima.getdata())


width, height = ima.size
Ejemplo n.º 39
0
def start_once():
    resize()
    call(['xsetroot', '-cursor_name', 'left_ptr'])