def clean_word(self, word): if isinstance(word, str): return singularize(word.lower()) if singularize(word.lower()) not in self.stop_words else None elif isinstance(word, list): return [singularize(w.lower()) for w in word if w not in self.stop_words] else: raise ValueError()
def before_request(): if current_user.is_authenticated: if request.endpoint == 'web.show_core': if 1 == randint(1, 2): if not current_user.bio.research_interests or not current_user.bio.research_experience: flash(Markup( 'Your biography is not up to date. ' '<a href="{0}" class="alert-link">Update my biography</a>.'.format( url_for('show_user', user_id=current_user.id))), category='warning') else: for user_survey in current_user.surveys.values(): if not user_survey.completed_on: parent_type, parent_id = user_survey.survey.parent_type, user_survey.survey.parent_id if parent_type and parent_id: endpoint = 'web.show_{0}'.format(singularize(parent_type)) view_args = {'{0}_id'.format(singularize(parent_type)): parent_id} url = url_for(endpoint, **view_args) else: url = url_for('show_user', user_id=current_user.id) flash(Markup( 'You have not completed the survey. Please complete the ' '<a href="{href}#survey" class="alert-link">{title}</a>.'.format( href=url, title=user_survey.title )), category='warning') break
def put(self, id_object, resource=None, **kwargs): resource = resource or self.resource url = "{}/{}.json".format(resource, id_object) data = {singularize(resource): kwargs} resp = self.base._request(url, 'PUT', **data) if resp.status_code != 200: content = safe_get_json(resp) raise RequestException(resp.status_code, content=content) return self.class_object(**resp.json().get(singularize(resource)))
def create(self, resource=None, **kwargs): resource = resource or self.resource url = "{}.json".format(resource) singular_resource = singularize(resource) data = { singular_resource: kwargs, } resp = self.base._request(url, 'POST', **data) if resp.status_code != 201: content = resp.json() if getattr(resp, 'json') else {} raise RequestException(resp.status_code, content=content) return self.class_object(**resp.json().get(singularize(resource)))
def compute_metadata_score(self, stream_info, query_str): TYPE = 1 metadata_features = {'title': 0, 'rating': -1, 'description': 0, 'viewcount': -1, 'dislikes':-1, 'likes': -1, 'keywords': 0} query_words = query_str.split() # words = stream_info['ftitle'].split('_') for q_w in query_words: for w in words: w = inflection.singularize(w.lower()) if w.find(q_w) >= 0: metadata_features['title'] += 1 break # metadata_features['rating'] = self.get_type_score(stream_info['rating'], 'rating', TYPE) # words = stream_info['description'].split() for q_w in query_words: for w in words: w = inflection.singularize(w.lower()) if w.find(q_w) >= 0: metadata_features['description'] += 1 break # metadata_features['viewcount'] = self.get_type_score(stream_info['viewcount'], 'viewcount', TYPE) # metadata_features['dislikes'] = self.get_type_score(stream_info['dislikes'], 'dislikes', TYPE) # metadata_features['likes'] = self.get_type_score(stream_info['likes'], 'likes', TYPE) # for q_w in query_words: for w in stream_info['keywords']: w = inflection.singularize(w.lower()) if w.find(q_w) >= 0: metadata_features['keywords'] += 1 break # compute score score = 0 if metadata_features['title'] or metadata_features['description'] or metadata_features['keywords']: score = metadata_features['title'] + metadata_features['description'] + metadata_features['keywords'] #for key in metadata_features.keys(): # score += metadata_features[key] return score, metadata_features['viewcount']
def test_uncountable_word_is_not_greedy(): uncountable_word = "ors" countable_word = "sponsor" inflection.UNCOUNTABLES.add(uncountable_word) try: assert uncountable_word == inflection.singularize(uncountable_word) assert uncountable_word == inflection.pluralize(uncountable_word) assert inflection.pluralize(uncountable_word) == inflection.singularize(uncountable_word) assert "sponsor" == inflection.singularize(countable_word) assert "sponsors" == inflection.pluralize(countable_word) assert "sponsor" == inflection.singularize(inflection.pluralize(countable_word)) finally: inflection.UNCOUNTABLES.remove(uncountable_word)
def generate_model_name(name): """ Generate model name. :param name: String representing a field or route name. """ model_name = inflection.camelize(name.strip('/')) return inflection.singularize(model_name)
def handle(self): name = self.argument('name') singular = inflection.singularize(inflection.tableize(name)) directory = self._get_path() filepath = self._get_path(singular + '.py') if os.path.exists(filepath): raise RuntimeError('The model file already exists.') mkdir_p(directory) parent = os.path.join(directory, '__init__.py') if not os.path.exists(parent): with open(parent, 'w'): pass stub = self._get_stub() stub = self._populate_stub(name, stub) with open(filepath, 'w') as f: f.write(stub) self.info('Model <comment>%s</> successfully created.' % name) if self.option('migration'): table = inflection.tableize(name) self.call( 'make:migration', [ ('name', 'create_%s_table' % table), ('--table', table), ('--create', True) ] )
def gen_db_model(self, schema_name, table_name): sql = """ SELECT column_name, column_default, data_type, column_key, is_nullable, character_maximum_length, column_comment FROM information_schema.columns WHERE TABLE_SCHEMA = :schema_name AND TABLE_NAME = :table_name ORDER BY ORDINAL_POSITION""" with self.create_session(self._default_db) as session: entities = session.execute(sql, dict(schema_name=schema_name, table_name=table_name)) metas = [GenMeta(entity) for entity in entities if entity[0] != 'pkid'] model_meta = DbModelMeta() model_meta.table_metas = metas for meta in metas: if model_meta.has_id is False: model_meta.has_id = meta.column_name == 'id' model_meta.is_string_id = meta.column_name == 'id' and \ u'varchar' == meta.data_type if model_meta.has_created_at is False: model_meta.has_created_at = meta.column_name == 'created_at' if model_meta.has_updated_at is False: model_meta.has_updated_at = meta.column_name == 'updated_at' if model_meta.has_is_deleted is False: model_meta.has_is_deleted = meta.column_name == 'is_deleted' model_meta.class_name = inflection.singularize(inflection.camelize(table_name)) template_path = os.path.join(os.path.dirname(__file__)) t = Loader(template_path, **{}).load('model.tpl') print t.generate(**model_meta.__dict__)
def _resolve_obj_from_path(self, path_str, params): """ :param path_str: stripped (no extra slashes) :return: new obj """ obj = {} if path_str == '0': obj['name'] = 'jonathan' self._set_cached_obj(path_str, '0', obj) # add proper fields to obj and assign path and id globally to it obj.update(self._get_meta(params, ['name', 'id', 'birthday'], ['photos', 'feed', 'albums'], 'user')) return obj m = re.search('[0|me]/(photos|albums|feed)([0-9]+)$', path_str) if m: type = singularize(m.group(1)) id = m.group(2) if int(id) < 100: raise NoSuchPathException(path_str) obj['type'] = type self._set_cached_obj(path_str, str(id), obj) if type == 'photo': fields = ['id', 'created_time'] connections = ['comments', 'likes'] elif type == 'album': fields = ['id', 'cover_photo'] connections = ['comments', 'photos'] elif type == 'feed': fields = ['id', 'message', 'story'] connections = ['comments', 'attachments'] obj.update(self._get_meta(params, fields, connections, type)) return obj raise NoSuchPathException(path_str)
def get_response_query_template(service, operation): """refers to definition of API in botocore, and autogenerates template Assume that response format is xml when protocol is query You can see example of elbv2 from link below. https://github.com/boto/botocore/blob/develop/botocore/data/elbv2/2015-12-01/service-2.json """ client = boto3.client(service) aws_operation_name = to_upper_camel_case(operation) op_model = client._service_model.operation_model(aws_operation_name) result_wrapper = op_model.output_shape.serialization['resultWrapper'] response_wrapper = result_wrapper.replace('Result', 'Response') metadata = op_model.metadata xml_namespace = metadata['xmlNamespace'] # build xml tree t_root = etree.Element(response_wrapper, xmlns=xml_namespace) # build metadata t_metadata = etree.Element('ResponseMetadata') t_request_id = etree.Element('RequestId') t_request_id.text = '1549581b-12b7-11e3-895e-1334aEXAMPLE' t_metadata.append(t_request_id) t_root.append(t_metadata) # build result t_result = etree.Element(result_wrapper) outputs = op_model.output_shape.members replace_list = [] for output_name, output_shape in outputs.items(): t_result.append(_get_subtree(output_name, output_shape, replace_list)) t_root.append(t_result) xml_body = etree.tostring(t_root, pretty_print=True).decode('utf-8') xml_body_lines = xml_body.splitlines() for replace in replace_list: name = replace[0] prefix = replace[1] singular_name = singularize(name) start_tag = '<%s>' % name iter_name = '{}.{}'.format(prefix[-1], name.lower())if prefix else name.lower() loop_start = '{%% for %s in %s %%}' % (singular_name.lower(), iter_name) end_tag = '</%s>' % name loop_end = '{{ endfor }}' start_tag_indexes = [i for i, l in enumerate(xml_body_lines) if start_tag in l] if len(start_tag_indexes) != 1: raise Exception('tag %s not found in response body' % start_tag) start_tag_index = start_tag_indexes[0] xml_body_lines.insert(start_tag_index + 1, loop_start) end_tag_indexes = [i for i, l in enumerate(xml_body_lines) if end_tag in l] if len(end_tag_indexes) != 1: raise Exception('tag %s not found in response body' % end_tag) end_tag_index = end_tag_indexes[0] xml_body_lines.insert(end_tag_index, loop_end) xml_body = '\n'.join(xml_body_lines) body = '\n{}_TEMPLATE = """{}"""'.format(operation.upper(), xml_body) return body
def __init__(self, resource, **kwargs): self.save = self.__save self.resource = resource self.__dict__ = kwargs singular_resource = singularize(resource) name_custom_fields = '{}_fields'.format(singular_resource) self.CustomFields = CustomFields(**kwargs.pop(name_custom_fields, {})) map(lambda x: setattr(self, x[0], x[1]), kwargs.items())
def __add_resource(self, resource): """ Add a vanilla resource. """ resource = inflection.singularize(resource) item = getattr(self.cli, 'new_' + resource)() create = getattr(self.dao, 'create_' + resource) create(item)
def longflag(param): if len(param.name) > 1: x = param.name.replace('_', '-') if param.annotation in {list, COUNT}: y = singularize(x) else: y = x return '--' + y
def reduce_flatten_dto(dto, known_dto, config, suggested_name=None): if isinstance(dto, list): elements_type = None for item in dto: if isinstance(item, dict): new_dto = reduce_flatten_dto(item, known_dto, config, suggested_name) element_type = new_dto.name else: element_type = get_type_name(item) if element_type: if not elements_type: elements_type = element_type elif elements_type != element_type: elements_type = get_type_name(object) return elements_type elif isinstance(dto, dict): fields = [] dto_name = suggested_name for key, value in dto.items(): if value is not None: field = None if isinstance(value, dict): element_name = key # parse special attribute to get dto name if all(d.name != element_name for d in known_dto): new_dto = reduce_flatten_dto(value, known_dto, config, element_name) element_name = new_dto.name field = FieldDescription(key, element_name) elif isinstance(value, list): element_name = inflection.singularize(key) elements_type = reduce_flatten_dto(value, known_dto, config, element_name) if not elements_type: elements_type = inflection.singularize(key) field = FieldDescription(key, get_type_name(value), elements_type) else: if key == "__name__": dto_name = value else: field = FieldDescription(key, get_type_name(value)) if field is not None: fields.append(field) new_dto = DtoDescription(dto_name, fields) update_dtos(known_dto, new_dto) return new_dto return known_dto
def metadata_Breed(data, breed_label): breed_array = np.zeros((len(data), 370)) for i in range(len(data)): word_list2 = set([ inflection.singularize(x) for x in data['metadata_annots_top_desc'][i].split(' ') ]) for c, bn in enumerate([ inflection.singularize(x).lower() for x in breed_label.BreedName.values ]): breed_array[i, c] = int(bn in word_list2) metadata_breed = pd.DataFrame(breed_array) metadata_breed.columns = [ 'metadata_breed_' + str(x) for x in range(breed_array.shape[1]) ] data = pd.concat([data, metadata_breed], axis=1) return data
def inflect_given_cardinality(word, num_items): """ Return the singular form of the word if `num_items` is 1. Otherwise, return the plural form of the word. """ if num_items == 1: return singularize(word) else: return pluralize(word)
def create(cls, zendesk, **kwargs): resource = "{}.json".format(cls.resource) singular_resource = singularize(cls.resource) data = {singular_resource: kwargs} resp = zendesk._request(resource, 'POST', **data) if resp.status_code != 201: from exceptions import RequestException raise RequestException(resp.status_code) return cls(**resp.json().get(singular_resource))
def singular(self, text: str) -> str: if text in self.__singular_cache: return self.__singular_cache[text] singular_text = inflection.singularize(text) self.__singular_cache[text] = singular_text return singular_text
def generate_model_name(raml_resource): """ Generate model name. :param raml_resource: Instance of ramlfications.raml.ResourceNode. """ resource_uri = get_resource_uri(raml_resource).strip('/') resource_uri = re.sub('\W', ' ', resource_uri) model_name = inflection.titleize(resource_uri) return inflection.singularize(model_name).replace(' ', '')
def type(string: str): try: prefix, resulting_type = Naming.pop_prefix(string) prefix += Naming.TYPE_PREFIX except IndexError: prefix = "" resulting_type = string resulting_type = inflection.singularize(resulting_type) if Naming._pluralize(resulting_type) else resulting_type return prefix + inflection.camelize(resulting_type)
def names(word, format=[]): ret = {} ret["table"] = inflection.tableize(word) ret["model"] = inflection.singularize(ret["table"]) ret["class"] = inflection.camelize(ret["model"]) ret["crud_class"] = inflection.camelize(ret["table"] + "_controller") if not format: return ret ret = [ret[k] for k in format] return ret[0] if len(ret) == 1 else ret
def __init__(self, data): self.initDatabaseDictionaries() self.words = word_tokenize(data.lower()) self.wordsUnFiltered = nltk.pos_tag(self.words) self.wordsTagged = [] for wt in self.wordsUnFiltered: if wt[0] not in self.stopWords: tuple = (singularize(wt[0]), wt[1]) self.wordsTagged.append(tuple)
def check_object(job, activity, obj): if not obj: activity.end_date = datetime.datetime.now() activity.status = 3 job.overall_status = 3 job.status_message = "%s Definition not found" % inflection.singularize(job.activity_type).title() job.end_date = datetime.datetime.now() db.session.commit() return False return True
def preprocess(self, text): text = text.encode('ascii', 'ignore').lower() text = ZEPreprocessor.replace_numeral_words(text) text = ZEPreprocessor.re_irrelation.sub(' ', text) text = ZEPreprocessor.re_single_space.sub('', text) text = ZEPreprocessor.re_phone_number.sub('', text) text = text.split('\n') text = [' '.join([inflection.singularize(token) for token in _.split(' ')]) for _ in text] return text
def __delete_resource(self, resource, resource_id=None): """ Delete a vanilla resource. """ resource = inflection.singularize(resource) if not resource_id: item = getattr(self.cli, 'search_' + resource)() resource_id = item.id delete = getattr(self.dao, 'delete_' + resource) delete(resource_id)
def serialise(self): import inflection import binascii import simplejson # 'proposal', 'superblock', etc. name = self._meta.name obj_type = inflection.singularize(name) return binascii.hexlify(simplejson.dumps((obj_type, self.get_dict()), sort_keys=True).encode('utf-8')).decode('utf-8')
def standardize(ingredients): recipe = [] for ing in ingredients: temp = '' ing = ing.lower() ing = unidecode.unidecode(ing) ing = ing.split(" or ")[0] ing = re.sub(r"[\(\[].*?[\)\]]", " ", ing) ing = re.sub(r"([0-9]*-ounces)+", " ", ing) ing = re.sub(r"([0-9]*-ounce)+", " ", ing) ing = re.sub(r"([0-9]*-inches)+", " ", ing) ing = re.sub(r"([0-9]*-inch)+", " ", ing) ing = re.sub(r"([0-9])+", " ", ing) ing = re.sub(r"(-)+", " ", ing) ing = re.sub(r"(/)+", " ", ing) ing = ing.split(",")[0] ing = ing.replace("*", " ") ing = ing.replace("%", " ") ing = ing.replace("+", " ") ing = ing.replace("-", " ") ing = ing.replace(".", " ") ing = ing.replace(":", " ") ing = ing.replace("(", " ") ing = ing.split("http")[0] wi = TextBlob(ing) tagbag = wi.tags for pos in tagbag: if (pos[1] in tags) or (singularize(pos[0]) in allow): if singularize(pos[0]) not in stopWords: temp = temp + " " + singularize(pos[0]) temp = temp.strip() if temp: recipe.append(temp) ingredients_set.append(temp) return list(set(recipe))
def register_blueprints(flask, package): package_dir = path.join(getcwd(), __name__, package) module_suffix = '_' + singularize(package) + '.py' module_names = [sub('\.py$', '', c) for c in listdir(package_dir) if c.endswith(module_suffix)] for module_name in module_names: module = import_module(__name__ + '.%s.%s' % (package, module_name)) flask.register_blueprint(module.blueprint)
def standardize(ingredients): recipe = [] for flag, ing in ingredients: temp = '' # print(ing) # ing = '1 1/4 cups all-purpose flour (about 5 1/2 ounces)' ing = ing.lower() ing = unidecode.unidecode(ing) ing = ing.split(" or ")[0] ing = re.sub(r" ?\([^)]+\)", "", ing) ing = re.sub(r"([0-9]*-ounces)+", "", ing) ing = re.sub(r"([0-9]*-ounce)+", "", ing) ing = re.sub(r"([0-9]*-inches)+", "", ing) ing = re.sub(r"([0-9]*-inch)+", "", ing) ing = re.sub(r"([0-9]*)+", "", ing) ing = re.sub(r"(-)+", "", ing) ing = re.sub(r"(/)+", "", ing) commasplit = re.split(r"\,", ing) ing = commasplit[0] ing = ing.replace("*", "") ing = ing.replace("+", "") ing = ing.replace("-", "") ing = ing.replace(".", "") ing = ing.replace(":", "") ing = ing.replace("(", "") ing = ing.split("http")[0] wi = TextBlob(ing) tagbag = wi.tags for pos in tagbag: if (pos[1] in tags) or (singularize(pos[0]) in allow): if singularize(pos[0]) not in stopWords: temp = temp + " " + singularize(pos[0]) temp = re.sub(r"^\s+", "", temp) temp = re.sub(r"\s+$", "", temp) if temp != "": recipe.append((flag, temp)) return recipe
def navigate_to_project_page(self, entity_type=''): """Navigates to a project entity query page""" # clean up the entity_type entity_type = inflection.singularize(inflection.camelize(entity_type)) # Form the url... url = '%s/page/project_default?entity_type=%s&project_id=%s' %(self.baseUrl, entity_type, self.project_id) # Get the url... self.get(url) # Ensure the page is loaded... self.wait_for_page_to_load()
def type(string: str): try: prefix, resulting_type = Naming.pop_prefix(string) prefix += Naming.TYPE_PREFIX except IndexError: prefix = '' resulting_type = string resulting_type = inflection.singularize( resulting_type) if Naming._pluralize( resulting_type) else resulting_type return prefix + inflection.camelize(resulting_type)
def get_model(self, resource_name): """Returns the model :class:`flask.ext.sqlalchemy.Model` for a resource. :param resource_name: name of resource """ resource_name = singularize(resource_name) if resource_name not in self._model_for_resources: raise UnableToProcess('Resource Error', 'Resource [%s] not found' % resource_name) return self._model_for_resources[resource_name]
def get_cfg(self, resource_name): """Returns the :class:`flask_resteasy.configs.APIConfig` for a resource. :param resource_name: name of resource """ resource_name = singularize(resource_name) if resource_name not in self._cfg_for_resources: raise UnableToProcess('Resource Error', 'Resource [%s] not found' % resource_name) return self._cfg_for_resources[resource_name]
def next(self, token): token = inflection.singularize(token) # Fixing nonsensical singularization if token in ['ha', 'contain', 'shoe']: token += 's' self.state = self.state.next(token) if type(self.state) is UnexpectedState: print "UnexpectedState!"
def type(string: str): try: prefix, resulting_type = Naming.pop_prefix(string) prefix += Naming.TYPE_PREFIX except IndexError: prefix = '' resulting_type = string resulting_type = singularize(resulting_type) if Naming._pluralize( resulting_type) else resulting_type resulting_type = resulting_type.replace( '-', '_') # camelize does not convert '-' but '_' return prefix + camelize(resulting_type)
def to_singular_pascal_case(name: str): """Make any string into a singular "PascalCased" string. Args: name (str): The string to convert. Returns: str: The converted string. """ return inflection.singularize( inflection.camelize( inflection.parameterize(inflection.titleize(name), separator="_")))
def augment_depends_on_one_get(self, attributes, name): _get_method = inflection.singularize(name) _key = inflection.dasherize( inflection.underscore( inflection.singularize(self.klass))) _get_config_method = inflection.pluralize(name) if 'dependencies' not in attributes: attributes['dependencies'] = set() attributes['dependencies'].add(_get_method) def get(this): name = inflection.dasherize(inflection.underscore(this.__class__.__name__)) if _key not in this.config: return None return next(val for key, val in this.env.config.get(_get_config_method).iteritems() if (val.name == this.config[_key])) attributes[_get_method] = get return _get_method
def folder_has_items(folder_id, folder_type): sql = "select count(*) from %s where folder_id = %s" % ( inflection.singularize(folder_type), folder_id) try: result = db.engine.execute(sql) for row in result: if int(row[0]) > 0: return True return False except Exception as ex: print ex return True
def singularize_token(tkn, lookup_table, micro_status): lemma=tkn if (tkn.endswith("us") or tkn.endswith("ia") or tkn.endswith( "ta")): # for inflection exception in general-takes into account both lower and upper case (apart from some inflection-exception list used also in next lemma = tkn elif (tkn not in lookup_table[ "inflection_exceptions"]): # Further Inflection Exception list is taken into account lemma = inflection.singularize(tkn) if (tkn != lemma): # Only in case when inflection makes some changes in lemma micro_status.append("Inflection (Plural) Treatment: " + tkn) return lemma
def censor(input_text): """ Returns input_text with any profane words censored """ bad_words = self.get_profane_words() spl = input_text.split(' ') for index, value in spl: word = singularize(value) if word in bad_words: spl[index] = '*' * len(value) return ' '.join(spl)
def generatePair(opinionList, featureList): pair = {} for (key, group) in featureList.groupby("business_id"): opinion = opinionList[key] pair[key] = [] feature = list(group["featureList"])[0] for f in feature: attribute = [jj for (nn, jj) in opinion if singularize(nn) == f] if attribute: result = [(f, attr) for attr in attribute] pair[key].extend(result) return pair
def singularize_rule(self): item = self.item if len(item['prioritized_docids']) < 1: claim_tokens = item['claim_tokens'] # finded_keys = item['prioritized_docids'] claim_tokens = [inflection.singularize(c) for c in claim_tokens] claim = ' '.join(claim_tokens) fkd_new, fk_new = self._keyword_match(claim, raw_set=True) # finded_keys = set(finded_keys) | set(fk_new) item['prioritized_docids'] = list(fk_new) item['structured_docids'] = fkd_new return self
def test_uncountable_word_is_not_greedy(): uncountable_word = "ors" countable_word = "sponsor" inflection.UNCOUNTABLES.add(uncountable_word) try: assert uncountable_word == inflection.singularize(uncountable_word) assert uncountable_word == inflection.pluralize(uncountable_word) assert( inflection.pluralize(uncountable_word) == inflection.singularize(uncountable_word) ) assert "sponsor" == inflection.singularize(countable_word) assert "sponsors" == inflection.pluralize(countable_word) assert ( "sponsor" == inflection.singularize(inflection.pluralize(countable_word)) ) finally: inflection.UNCOUNTABLES.remove(uncountable_word)
def cleaning_data_function_new(text): text = text.lower() words = re.split(r'\n', text) for word in words: text1 = ProgRoman.sub(' ', word) split = re.split(r' ', text1) msg = [ inflection.singularize(text2) for text2 in split if (text2 not in stoplist) and (len(text2) > 2) ] new1.append(msg) return new1
def from_json(cls, name: str, data: str, skip_fields_with_errors=False): data = json.loads(data) # TODO: More useful support for lists # TODO: Bubble up a warning that we ignored everything except the first nonlist item if isinstance(data, list): if len(data) == 0: raise NotImplementedError("Top-level array cannot be an empty list") items_name = pluralize(name) if singularize(items_name) == name: items_name = "items" return cls.from_dict(name, {items_name: data}, skip_fields_with_errors) return cls.from_dict(name, data, skip_fields_with_errors)
def sanitize_name(name): singular = inflection.singularize(name) plural = name if singular == name: plural = inflection.pluralize(name) num = 2 while os.path.exists(plural + '.py'): plural = plural + str(num) num = num + 1 class_name = inflection.camelize(singular) return singular, plural, class_name
def singularize_rule(self): """Singularize words """ item = self.item if len(item['prioritized_docids']) < 1: claim_tokens = item['claim_tokens'] # finded_keys = item['prioritized_docids'] claim_tokens = [inflection.singularize(c) for c in claim_tokens] claim = ' '.join(claim_tokens) fk_new = self._keyword_match(claim) # finded_keys = set(finded_keys) | set(fk_new) item['prioritized_docids'] = list(fk_new) return self
def augment(self, attributes, name): _get_method = self.augment_depends_on_one_get(attributes, name) _create = call_parents('create') if 'create' in attributes: _create = attributes['create'] def create(self): item = getattr(self, _get_method)() if item and (not item.created()): item.create() _create(self) attributes['create'] = create return {inflection.singularize(name): None}
def __get_raw_data__(self): if self._raw_data: return self._raw_data cls = self.__class__ params = {'id': str(self.code)} options = Util.merge_options('params', params, **self.options) path = Util.constructed_path(cls.get_path(), options['params']) r = Connection.request('get', path, **options) response_data = r.json() Util.convert_to_dates(response_data) self._raw_data = response_data[singularize(cls.lookup_key())] return self._raw_data
def inflect(that,word): """ :param that: some object; if it has a length, we use that :param word: this word will be made singular or plural depending on the singularity or plurarlity of that :return: inflected word """ try: cnt = len(that) except TypeError: cnt = 1 inflected_word = singularize(word) if cnt == 1 else pluralize(word) return f"{cnt} {inflected_word}"
def singularize(word): ''' Return the singular form of the given word. TODO: Check that word is a noun (or an adjective or at any rate can be sensibly used as a noun) before calling inflection.singularize? FIXME BUGS: inflection returns many wrong answers by pattern: *aves -> *afe It uses incomplete special case matching (octopus), and does not recognize many other pairs such as: (locus, loci) NB: pattern3.en is not yet functional (2017.07.10) ''' if word.lower()[-4:] == 'aves': return word.rstrip('sS') return inflection.singularize(word)
def __init__(self, klass, other, rel_column=None, backref=None, **kwargs): self.klass = klass self.other = other if rel_column: prop_name = inflection.pluralize(backref) # phonenumbers backref_id = rel_column # owner_id backref = backref_id.split('_id')[0] # parent else: # user.projects prop_name = inflection.pluralize(self.klass.__name__).lower() # project.user_id backref_id = '%s_id' % inflection.singularize(self.other.__name__).lower() # project.user backref = inflection.singularize(self.other.__name__).lower() setattr(other, prop_name, self) self.rel_column = backref_id column = Column(ObjectId) column.name = backref_id klass.__columns__[backref_id] = column setattr(klass, backref_id, column) setattr(klass, backref, RelationshipHasOne(klass, other, backref_id)) print ">>>", backref
def name_or_flag(has_keyword_only, param): def _name_or_flag(p): name = p.name.replace('_', '-') if p.default == p.empty or (has_keyword_only and p.kind == p.POSITIONAL_OR_KEYWORD): return p.name elif len(name) == 1: return '-' + name else: return '--' + name try: if issubclass(param.annotation, (list, Count)): return singularize(_name_or_flag(param)) except TypeError: pass return _name_or_flag(param)
def find_related_fields_to_rename(fields, prefix=[]): """ Find all related fields on a Serializer or NestedBoundField. Args: fields: an object whose `.__iter__()` method returns instances of `BoundField` (e.g. a `Serializer` or `NestedBoundField`) prefix: a list of parent keys that map to the current list of fields. (e.g. `{'one': {'two': {'three': 'value'}}}` will have `prefix=['one', 'two'] when `{'three': 'value'})` is passed into the function. Returns: list: list of RelatedFieldRename instances. """ ret = [] if isinstance(fields, ListSerializer): fields = fields.child.__class__() # this is selfish hack to support something I am doing # hopefully I can factor this out soon if isinstance(fields, BoundField): fields = fields._field # iterate over each field to determine if it is related for field in fields: assert isinstance(field, BoundField), ( 'Fields must be an iterator that returns `BoundField` instances.' ) key = prefix + field.name.split('.') # iterate over any nested lists if field._proxy_class is ListSerializer: ret.extend( find_related_fields_to_rename(field.child, prefix=key) ) # any nested dicts should be iterated over # this could check if `field` is an instance of `NestedBoundField` # but that would break my hack above elif hasattr(field, 'fields'): ret.extend(find_related_fields_to_rename(field, prefix=key)) # the field is a list of pks and should be renamed elif field._proxy_class is ManyRelatedField: new_name = singularize(key[-1]) + '_ids' ret.append(RelatedFieldRename(key, new_name)) # the field is a single pk and should be renamed elif field._proxy_class is PrimaryKeyRelatedField: new_name = key[-1] + '_id' ret.append(RelatedFieldRename(key, new_name)) return ret
def is_match_splabel(labels, label_to_trace, target_list, gt): # all matched gt words all_gt_words = [] for ll in gt: for l in ll: if not l.find('none') >= 0: gt_word = inflection.singularize(l.split('-')[-1]) all_gt_words += [gt_word] for label in labels: is_correct = is_label_correct(label, label_to_trace, target_list, all_gt_words) if is_correct: return True return False
def stemming(word): # if word in be_words: # return 'be' # try: # if en.is_noun(word): # word = en.noun.singular(word) # elif en.is_adjective(word) or en.is_adverb(word): # return word # else: # word = en.verb.present(word) # except Exception: # pass # return en.spelling.correct(word) word = inflection.singularize(word) return word