def _get_ingredient_primary_category(ingredient): category_mappings = IngredientCategoryMappingModel.query.filter( IngredientCategoryMappingModel.ingredient_id == ingredient.id) # print([mapping.category_id for mapping in category_mappings]) # exit() for category_id in [ result.category_id for result in category_mappings ]: category = IngredientCategoryModel.query.get(category_id) if category.position and category.position >= 5: return category.display_name LogService.error("Could not find category for %s" % ingredient.canonical_name)
def get(self, path): """ Fetch the value from the configuration store for the given key. :param path: Normalized path in the hierarchy to the key. :return: str or Exception """ self._connect() try: data, stat = self.zk.get(path) return data.decode("utf-8") except NoNodeError: raise KeyError("%s does not exist." % path) except Exception as e: LogService.error(e.__class__) LogService.error(e)
def get_recipes(): # character_list = list(range(0, 10)) + list(string.ascii_uppercase) character_list = string.ascii_uppercase[2:3] raw_recipes = [] for char in character_list: # print(UpneatConnector._get_recipes_alpha(char)) slugs = UpneatConnector._get_recipes_alpha(char) for slug in slugs: try: raw_recipes.append(UpneatConnector.scrape_recipe(slug)) except: LogService.error("ERROR WITH %s " % slug) return raw_recipes
def create(self, args): """ This needs to be more reliable and do other things. :param args: :return: """ if args.slug != 'all': Resource.create(self.endpoint, args, self.path) return LogService.info('Creating all ingredients') data = london.util.load_yaml_data_from_path(self.path) LogService.info("Found %i items." % len(data)) success = 0 retries = [] for item in data: try: result = requests.post(self.endpoint, json=london.util.to_json(item)) result.raise_for_status() success += 1 except requests.exceptions.RequestException as e: # LogService.warning("Encountered error (%s). Will retry later." % e) retries.append(item) LogService.info("Succeeded with %i items." % success) LogService.info("Retrying with %i items. " % len(retries)) for item in list(retries): try: result = requests.post(self.endpoint, json=london.util.to_json(item)) result.raise_for_status() success += Resource._handle_error(result) retries.remove(item) except requests.exceptions.RequestException as e: LogService.error("Encountered error (%s). No more retries." % e) LogService.error(item) LogService.info("Succeeded with %i items." % success) # Refresh all indexes # There seems to be a problem where I hit ElasticSearch too quickly # and don't get all of the indexes in time. Manifested as 9 indexes # instead of 11. sleep(2) self._refresh_indexes()
def _kibana_settings(): """ I am pedantic and want dark mode enabled on the Kibana instance. This code serves no useful purpose within the app. :return: """ headers = {'kbn-version': '7.5.0', 'Content-Type': 'application/json'} data = '{"changes":{"theme:darkMode":true}}' kibana_host = os.getenv('AMARI_KIBANA_HOST', default='localhost') resp = requests.post("http://%s:5601/api/kibana/settings" % kibana_host, headers=headers, data=data) if resp.status_code == 200: LogService.info("Kibana set to dark mode.") else: LogService.error("Error setting dark mode: %s" % resp.text)
def import_(self, filepath): dicts_to_import = RecipeImporter._fetch_data_from_path(filepath) if len(dicts_to_import) > 1: self.delete(delete_all=True) for cocktail_dict in dicts_to_import: try: slug = Slug(cocktail_dict['display_name']) LogService.info("Working %s" % slug) c = CocktailFactory.raw_to_obj(cocktail_dict, slug) except KeyError as e: LogService.error("Something has bad data!") LogService.error(cocktail_dict) LogService.error(e) continue self.delete(cocktail=c) db_obj = CocktailModel(**ObjectSerializer.serialize(c, 'dict')) with self.pgconn.get_session() as session: session.add(db_obj) LogService.info("Successfully [re]created %s" % c.slug) ObjectValidator.validate(db_obj, session=session, fatal=False) Indexers.get_indexer(c).index(c) CocktailScanCache.invalidate()
def _handle_error(result): """ Parse an HTTP request for success/failure. Return a count of the success. :param result: :return: Integer count of success (THIS IS NOT A RETURN CODE!) """ try: result.raise_for_status() LogService.debug('Success!') return 1 except requests.exceptions.RequestException as e: LogService.error("Error handling URL: %i" % result.status_code) LogService.error(result.request.body) LogService.error(result.json().get('message')) LogService.error(result.json().get('details')) return 0
def import_(self, filepath): data = IngredientImporter._fetch_data_from_path(filepath) # Delete old data self.delete() LogService.info("Starting import") for ingredient in data: i = Ingredient(**ingredient) db_obj = IngredientModel(**ObjectSerializer.serialize(i, 'dict')) # Test for existing with self.pgconn.get_session() as session: # existing = IngredientModel.query.get(i.slug) existing = session.query(IngredientModel).get(i.slug) if existing: if existing.kind == IngredientKinds( 'category' ).value or existing.kind == IngredientKinds( 'family').value: if i.kind is IngredientKinds('ingredient'): LogService.error( "Skipping %s (t:%s) since a broader entry exists (%s)" % (i.slug, i.kind.value, existing.kind)) else: LogService.error( "%s (p:%s) already exists as a %s (p:%s)" % (i.slug, i.parent, existing.kind, existing.parent)) else: LogService.error( "%s (p:%s) already exists as a %s (p:%s)" % (i.slug, i.parent, existing.kind, existing.parent)) else: session.add(db_obj) Indexers.get_indexer(i).index(i) LogService.info("Validating") with self.pgconn.get_session() as session: objects = session.query(IngredientModel).all() for db_obj in objects: # Validate ObjectValidator.validate(db_obj, session=session, fatal=False) # Invalidate the cache IngredientTreeCache.invalidate()
def fail(self, message): LogService.error(message) if self.fatal: raise ValidationException(message)