def process_recipe_url(url: str) -> dict: new_recipe: dict = scrape_url(url, python_objects=True)[0] logger.info(f"Recipe Scraped From Web: {new_recipe}") if not new_recipe: return "fail" # TODO: Return Better Error Here slug = slugify(new_recipe["name"]) mealie_tags = { "slug": slug, "orgURL": url, "categories": [], "tags": [], "dateAdded": None, "notes": [], "extras": [], } new_recipe.update(mealie_tags) try: img_path = scrape_image(new_recipe.get("image"), slug) new_recipe["image"] = img_path.name except: new_recipe["image"] = None return new_recipe
def download_image_for_recipe(recipe: dict) -> dict: try: img_path = scrape_image(recipe.get("image"), recipe.get("slug")) recipe["image"] = img_path.name except: recipe["image"] = None return recipe
def process_recipe_url(url: str) -> dict: new_recipe: dict = scrape_url(url, python_objects=True)[0] logger.info(f"Recipe Scraped From Web: {new_recipe}") if not new_recipe: return "fail" # TODO: Return Better Error Here new_recipe = process_recipe_data(new_recipe, url) try: img_path = scrape_image( normalize_image_url(new_recipe.get("image")), new_recipe.get("slug") ) new_recipe["image"] = img_path.name except: new_recipe["image"] = None return new_recipe