def qrcode(token): url = f'https://qrcode.iqiyipic.com/login/?data=https%3A%2F%2Fpassport.iqiyi.com%2Fapis%2Fqrcode%2Ftoken_login.action%3Ftoken%3D{token}&property=0&salt={md5Encode(f"35f4223bb8f6c8638dc91d94e9b16f5https%3A%2F%2Fpassport.iqiyi.com%2Fapis%2Fqrcode%2Ftoken_login.action%3Ftoken%3D{token}")}&width=162&_={random()}' if qrShowType == 'tg推送': tgpush(url) else: headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36' } req = get(url, headers=headers) with open('登录二维码.png', 'wb') as f: f.write(req.content) image.open('登录二维码.png').show()
def upload(request): reqfile = request.FILES['photo'] image = image.open(reqfile) image.thumbnail((128,128), Image.ANTIALIAS) image.save("/home/bing/1.jpeg", "jpeg") return HttpResponse("success.")
def login(user_name, password, source='index_nav', redir='https://www.douban.com/', login='******'): caprcha_id, caprcha_link = get_captcha( url) #把get_captcha函数返回的值,获取验证码的id和链接 if caprcha_id: img_html = session.get(caprcha_link) with open('caprcha.jpg', 'w') as f: f.write(img_html.content) try: im = image.open(caprcha.jpg) im.show() im.close() except: print("打开图片出错") else: caprcha = input('请输入验证码:') #把看到的验证码图片输入进去 data = { #需要传去的数据 'source':source, 'redir':redir, 'form_email':username, 'form_password':password, 'login':login, } if caprcha_id: #如果需要验证码就把下面的两个数据加入到data里面 data['captcha-id'] = caprcha_id data['captcha-solution'] = caprcha html = html = session.post(url, data=data, headers=headers) #获取登入后的页面 print(session.cookies.items()) #打印此时的cookies
def upload(request): reqfile = request.FILES['photo'] image = image.open(reqfile) image.thumbnail((128, 128), Image.ANTIALIAS) image.save("/home/bing/1.jpeg", "jpeg") return HttpResponse("success.")
def __getitem__(self, item): image = io.open(self.image[item]).convert('RGB') image = self.transform(image) text = self.text[item] input_ids, attention_mask = self._preprocess(text) if not self.infer: return image, input_ids.flatten(), attention_mask.flatten( ), self.label[item] else: return image, input_ids.flatten(), attention_mask.flatten()
def get_string(img_path): img=cv2.imread(img_path) img=cv2.cvtColor(img, cv2,COLOR_BGR2GRAY) kernel=np.ones((1,1), np.uint8) img= cv2.dilate(img, kernel, iterations=1) img= cv2.erode(img, kernel, iterations=1) cv2.imwrite(src_path + "1.jpg", img) img= cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2) cv2.imwrite(src_path+ "thres.png", img) result=pytesseract.image to string(image.open(src_path +"thres.png")) return result
def get_string(img_path): img = cv2.imread(img_path) img = cv2.cvtColor(img, cv2, COLOR_BGR2GRAY) # for convering Color image to gray kernel = np.ones((1, 1), np.uint8) img = cv2.dilate(img, kernel, iterations=1) # for removal of noise img = cv2.erode(img, kernel, iterations=1) # cv2.imwrite(src_path + "removed_noise.png", img) img = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2) cv2.imwrite(src_path + "thres.png", img) result = pytesseract.image_to_string(image.open(src_path + "thres.png")) return
from PIL import image img = image.open("prajwal.jpg") img.show()
from PIL import image im = image.open("corgi.jpg") im.rotate(45).show() new_image = image.new(im.mode, im.size) new_image.save("output.jpg") im.getdata() => sequence print(im.getdata()) im.putdata(data) im.putdata(data, scale, offset)
from PIL import image img = image.open("image.jpg") area = (100, 100, 300, 375) cropped_img = img.crop(area) img.show() cropped_img.show()
#%% ## 기타 참조 - konlp.pdf 참조 ## new_ko.concordance('영화') # 주변부 단어 확인 ## new_ko.collocations() # 어떤 단어들이 언어로 사용되었을까? new_ko.collocations() #%% # 워드 클라우드 적용 from wordcloud import WordCloud, STOPWORDS import numpy as np from PIL import image #%% stopwords = set(STOPWORDS) # 불용어 처리 alice_mask = np.array(image.open('img/Draw_ca1.png')) #%% data = new_ko.vocab().most_common(150) #%% path = 'C:/Windows/Fonts/malgun.ttf' wc = WordCloud(font_path=path, relative_scaling=0.2, background_color='while', mask=alice_mask).generate_from_frequencies(dict(data)) plt.figure(gifsize=(12, 8)) plt.imshow(wc) plt.axis('off') plt.show()
############################################### # Py method to calculate RGB value of an image ############################################### from PIL import image import random im = image.open("map-o1.png") rgb_im = im.convert("RGB") count_India = 0 count_Punjab = 0 count = 0 areaIndia = 3287263 while (count <= 1000000): x = random.randint(0, 2480) y = random.randint(0, 2735) z = 0 r, g, b = rgb_im.getpixel((x, y)) if (r == 60): count_India = count_India + 1 count = count + 1 else: if (r == 80): count_Punjab = count_Punjab + 1 count = count + 1 areaPunjab = (count_Punjab / count_India) * areaIndia print("Estimated Area of Punjab in sq km is ", areaPunjab)
data=np.array([[age, bmi,children,region,gender,smoker]]) data_s=scalar.transform(data) prediction=regression.pred(data_s)[0,:] return(prediction) def main() st.sidebar.header('About') st.sidebar.info('This app is created to predict Medical Insurance') st.sidebar.info('The dataset consist of 1338 people divided into 4 regions such as Southeast, Southwest, Nrtheast,Northwest') from PIL import image image=image.open() st.sidebar.image(image,width=300) st.title("Medical Insurance Prediction App") html_temp = """ <div style="background-color:#ADD8E6;padding:5px"> <h2 style="color:white;text-align:center;">Web app Build using Streamlit, Deployed on Heroku </h2> </div> """ st.markdown(html_temp, unsafe_allow_html=True) age=st.number_input("Age", min_value=0, max_value=150, value=0) bmi=st.number_input('BMI', min_value=0, max_value=100, value=0) Children=st.number_input('Number of Children', min_value=0, max_value=100, value=0)
# -*- coding: utf-8 -*- from PIL import image a = image.open("filename path") a.size() a.format() a.filename() a.save("save as other format")
from PIL import image import os for filename in os.listdir(os.getcwd()): img = image.open(filename) img2 = img.rotate(-90) img2.save("rot_" + filename)