def image_fix(): url = config.API_BASE_URL + '/api/v1/imageFix' apikey = config.API_KEY headers = {'APIKEY': apikey, "Content-type": "application/json"} input_file = "input/imagefix1.jpg" mask_file = "input/imagefix1_mask.jpg" contentBase64 = None maskBase64 = None with open(input_file, mode='rb') as f: # base64的二进制 base64_binary = base64.b64encode(f.read()) # 使用utf-8编码将二进制转为字符串 contentBase64 = base64_binary.decode(encoding="utf-8") with open(mask_file, mode='rb') as f: # base64的二进制 base64_binary = base64.b64encode(f.read()) # 使用utf-8编码将二进制转为字符串 maskBase64 = base64_binary.decode(encoding="utf-8") data = {"base64": contentBase64, "maskBase64": maskBase64} response = requests.post(url=url, headers=headers, json=data) print(response.content) json_result = json.loads(response.content) if json_result["code"] == 0: image_url = json_result["data"]["imageUrl"] file_name = './output/image_fix' + time.strftime( "%Y.%m.%d.%H.%M.%S", time.localtime()) + ".jpg" utils.download_img(image_url, file_name)
def main(): spacex_url = "https://api.spacexdata.com/v4/launches/latest" img_dir = create_dir_for_img() try: spacex_links = get_last_launch_links(spacex_url) for current_id, img_link in enumerate(spacex_links, start=1): img_title = f"spacex_{current_id}.jpg" download_img(img_dir, img_title, img_link) except requests.HTTPError as error: logger.exception(error)
def format_msg_country(self, data, name): result = Messages.NOT_FOUND_COUNTRY.value if isinstance(data, dict): country = ParseDictAsObj(data) download_img(country.countryInfo.flag) result = (f"Total: {country.cases}\n" f"Casos hoje: {country.todayCases}\n" f"Mortes: {country.deaths}\n" f"Mortes hoje: {country.todayDeaths}\n" f"Recuperados: {country.recovered}\n" f"Críticos: {country.critical}") return result
def main(): parser = argparse.ArgumentParser(description="Download photos.") parser.add_argument("collection", help="Hubble collection.") args = parser.parse_args() url_template_for_img = "http://hubblesite.org/api/v3/image/{}" url_template_for_coll = "http://hubblesite.org/api/v3/images/{}" url_for_coll = url_template_for_coll.format(args.collection) img_dir = create_dir_for_img() try: hubble_img_ids = get_img_ids(url_for_coll) for img_id in hubble_img_ids: img_link = get_img_link(url_template_for_img, img_id) img_ext = get_img_extension(img_link) img_title = f"hubble_{img_id}{img_ext}" download_img(img_dir, img_title, img_link) except requests.HTTPError as error: logger.exception(error)
def format_msg_state(self, data, name): result = Messages.NOT_FOUND_STATE.value if isinstance(data, dict): state = ParseDictAsObj(data) name = strip_accents(state.state).lower().replace(' ', '-') flag = download_img( UrlFlag.URL_FLAG_STATE.value.format( state=name if name != 'sao-paulo' else name + '1')) result = (f"UF: {state.uf}\n" f"Estado: {state.state}\n" f"Confirmados: {state.cases}\n" f"Suspeitos: {state.suspects}\n" f"Casos descartados: {state.refuses}\n" f"Mortes: {state.deaths}") return result, flag
candidate_encodings, candidate_names, cnt_dict = flatten_encodings(data) if args.video is None and not args.camera: if args.match is not None: # Use given photos to do face matching urls = get_url_path_list(args.match) else: # Search for photos if args.batch: stop("No photos provided!") urls, term, img_download_path = interact_get_match_photos(term, MATCH_FACE_IMG_PATH, KEY_FILE, key_file) for url in urls: if is_url(url): try: print("\n Downloading the photo from\n {}".format(url)) path, _ = download_img(url, img_download_path, term) print(" into\n {}".format(path)) except (ConnectionResetError, urllib.error.URLError): continue else: path = url if not args.batch: rgb = show_image(path) yes = mlutils.yes_or_no("\n Do you want to use this photo", yes=True) if not yes: os.remove(path) continue else: rgb = show_image(path, show=False)
# Apply delay function on Image Post Time and Image Original User Last Post Time # best_photos_sorted features = [ "caption_length", "english_content_length", "english_content_ratio", "hashtag_count", "hashtag_total_length", "hashtag_avg_length", "hashtag_caption_ratio", "mention_count", "year", "month", "weekday", "week", "hour", "followers", "following", "follower_following_ratio" ] photos_ranking = utils.rank_photo(df, features, model_path, current_time) # photos_ranking = [e for e in range(len(df))] # random.shuffle(photos_ranking) # Download Photo downloaded_photo_path, row_num, image_id = utils.download_img( photos_ranking, df, downloaded_img_folder_path, current_time) print(f"Selected Photo:{image_id} is downloaded") # Generate Caption original_caption = df.loc[row_num, "caption"] print("Original caption is extracted") # 1. Get Author UserName author_id = df.loc[row_num, "user_id"] print("Author ID is extracted") author_name = utils.get_image_author(cnxn, author_id) print("Author Name is retrieved") # 2. Pick Hashtags (picking 30 hashtags and preserve 30% hashtags of original post by defaule) english_hashtags = utils.get_english_hashtags(original_caption)
headers = {'APIKEY': apikey, "Content-type": "application/json"} file = "input/idphoto.jpg" with open(file, mode='rb') as f: #base64的二进制 base64_binary = base64.b64encode(f.read()) #使用utf-8编码将二进制转为字符串 base64_str = base64_binary.decode(encoding="utf-8") # print(base64Str) data = { "base64": base64_str, #人像图片文件的base64 "bgColor": "438EDB", #证件照背景色,格式为十六进制RGB, 如:3557FF "dpi": 300, #证件照打印dpi, 一般为300 "mmHeight": 35, #证件照物理高度,单位为毫米 "mmWidth": 25, #证件照物理宽度,单位为毫米 "printBgColor": "FFFFFF", #排版背景色,格式为十六进制RGB, 如:FFFCF9 "printMmHeight": 152, #打印的排版尺寸,单位为毫米, 如果为0或小于证件照尺寸则不会进行打印排版,输出单张证件照 "printMmWidth": 102, #打印的排版尺寸,单位为毫米, 如果为0或小于证件照尺寸则不会进行打印排版,输出单张证件照 "dress": "man8", #换装参数,非必填项,无参数时不换装,为类型+换装编号格式,比如 man1 男士第一个换装图, woman3 女士第三个换装,child5 儿童第五个换装。换装需额外扣除一个点点数 "printMarginMm": 5 #打印的排版的外部预留空间尺寸,非必填项 } response = requests.post(url=url, headers=headers, json=data) content = response.content.decode(encoding="utf-8") print(content) json_result = json.loads(content) if json_result["code"] == 0: image_url = json_result["data"]["idPhotoImage"] file_name = './output/idphoto' + time.strftime( "%Y.%m.%d.%H.%M.%S", time.localtime()) + ".jpg" utils.download_img(image_url, file_name)