def get(self):
     try:
         self._response = requests.get(self.URL)
         self._json = json.loads(
             self._response.text[22:len(self._response.text)-3]
         )
         self.nome_completo = self._json['nome-completo']
         for k, v in self._json['equipes'].items():
             self.equipes[k] = Equipe(v)
         fases = self._json['fases']['2528']
         self.rodada = Rodada(fases['rodada'])
         for e, d in fases['classificacao']['equipe'].items():
             for k, v in d.items():
                 if isinstance(v, dict):
                     setattr(
                         self.equipes[e],
                         underscore(snake_string(k)),
                         Base(v)
                     )
                 else:
                     setattr(
                         self.equipes[e],
                         underscore(snake_string(k)),
                         snake_string(v)
                     )
         for k, v in fases['jogos']['id'].items():
             self.jogos[k] = Jogo(v, self.equipes)
         for v in fases['classificacao']['grupo'].values():
             for k in v:
                 self.classificacao.append(self.equipes[k])
     except:
         traceback.print_exc(file=sys.stdout)
def format_keys(obj, format_type=None):
    """
    Takes either a dict or list and returns it with camelized keys only if
    JSON_API_FORMAT_KEYS is set.

    :format_type: Either 'dasherize', 'camelize' or 'underscore'
    """
    if format_type is None:
        format_type = getattr(settings, "JSON_API_FORMAT_KEYS", False)

    if format_type in ("dasherize", "camelize", "underscore", "capitalize"):

        if isinstance(obj, dict):
            formatted = OrderedDict()
            for key, value in obj.items():
                if format_type == "dasherize":
                    # inflection can't dasherize camelCase
                    key = inflection.underscore(key)
                    formatted[inflection.dasherize(key)] = format_keys(value, format_type)
                elif format_type == "camelize":
                    formatted[inflection.camelize(key, False)] = format_keys(value, format_type)
                elif format_type == "capitalize":
                    formatted[inflection.camelize(key)] = format_keys(value, format_type)
                elif format_type == "underscore":
                    formatted[inflection.underscore(key)] = format_keys(value, format_type)
            return formatted
        if isinstance(obj, list):
            return [format_keys(item, format_type) for item in obj]
        else:
            return obj
    else:
        return obj
    def parse(self, stream, media_type=None, parser_context=None):
        """Convert JSONAPI data to JSON data"""
        content = super(JsonApiParser, self).parse(stream, media_type=media_type, parser_context=parser_context)

        self.view = parser_context.get("view", None)

        if "data" in content:
            serializer = self.view.get_serializer(instance=None)
            fields = serializer.get_fields()

            resource_data = {}
            for attr_name, val in six.iteritems(content["data"].pop("attributes", {})):
                resource_data[underscore(attr_name)] = val
            relationships = content["data"].pop("relationships", {})
            if content["data"].get("id"):
                resource_data.update(id=content["data"].pop("id"))

            for field_name, field in six.iteritems(fields):
                if dasherize(field_name) not in relationships:
                    continue

                related_field = get_serializer(field)

                if isinstance(related_field, (RelatedField, BaseSerializer)):
                    rel_data = relationships[dasherize(field_name)].get("data")
                    if rel_data:
                        if isinstance(rel_data, list):
                            rel_data = [data.get("id") for data in rel_data]
                        else:
                            rel_data = rel_data.get("id")
                    resource_data[underscore(field_name)] = rel_data
            return resource_data
        return {}
Example #4
0
    def create(self, name):
        docstring = name.rstrip('.')
        name = re.sub('\W', '_', name.rstrip('.'))

        if re.match('^\w*$', docstring):
            # docstring seems to not be a sentence but a camelcase classname
            # or a underscored upgrade directory name.
            # Lets make it human readable for use as docstring.
            docstring = inflection.humanize(
                inflection.underscore(name))

        step_name = '{0}_{1}'.format(
            datetime.now().strftime(DATETIME_FORMAT),
            inflection.underscore(name))
        step_directory = os.path.join(self.upgrades_directory, step_name)
        os.mkdir(step_directory)

        Path(step_directory).joinpath('__init__.py').touch()

        code_path = os.path.join(step_directory, 'upgrade.py')
        with open(code_path, 'w+') as code_file:
            code_file.write(
                PYTHON_TEMPLATE.format(
                    classname=inflection.camelize(name),
                    docstring=docstring))

        return step_directory
    def _load_from_xml(self, xml):
        self._id = xml.get('id')
        self._code = xml.get('code')

        for child in xml.getchildren():
            if len(child.getchildren()) > 0:
                # Determine if children are all of the same type, e.g. Items
                # has many Item children, or if the current child should be
                # instantiated as a class
                is_list = len(set([i.tag for i in child.getchildren()])) == 1
                if not is_list:
                    # Single object
                    try:
                        cls = getattr(sys.modules[__name__],
                                      inflection.camelize(child.tag))
                        setattr(self, inflection.underscore(child.tag),
                                cls.from_xml(child, parent=self))
                    except AttributeError:
                        # No class for this object, ignore it
                        pass
                    continue
                else:
                    setattr(self, inflection.underscore(child.tag), [])
                    for item_xml in child.getchildren():
                        try:
                            cls = getattr(sys.modules[__name__],
                                          inflection.camelize(item_xml.tag))
                            attr = getattr(self,
                                           inflection.underscore(child.tag))
                            attr.append(cls.from_xml(item_xml, parent=self))
                        except AttributeError:
                            # Give up, remaining items are all the same and
                            # there is no class for them
                            break
                    continue

            key = inflection.underscore(child.tag)
            value = child.text
            if value is not None:
                # Parse numeric types
                if re.match(r'^[\d]+$', value):
                    value = int(value)
                elif re.match(r'^[\d.]+$', value):
                    value = float(value)
                # Parse datetimes, use naive detection of key to avoid trying
                # to parse every field
                elif 'date' in key:
                    try:
                        value = arrow.get(value).datetime
                    except Exception:
                        pass
            self._data[key] = value

        # Reset dirty data because all data should now be clean
        self._to_persist = {}
Example #6
0
    def success_response_structure_test(self, response, status, relationship_keys=None):
        """
        This can be extended in the future to cover stricter validation of the
        response as follows:

        * Top level response MUST contain AT LEAST ONE of ['data', 'meta']
        * Top level response MUST contain ['links']  # Our requirement
            - Top level links MUST contain ['self']; MAY contain ['related']

        * Resource object MUST contain ['id', 'type']
        * Resource object MUST contain AT LEAST ONE of ['attributes', 'relationships']
        * Resource object MAY contain ['links', 'meta']

        * Relationship object MUST contain AT LEAST ONE of ['links', 'data', 'meta']
            - Relationship links object MUST contain AT LEAST ONE of ['self', 'related']
        """
        self.assertEqual(response.status_code, status)

        response_content = self.load_json(response)

        self.assertTrue(
            all(key in response_content for key in self.SUCCESS_HIGH_LEVEL_KEYS))

        for data in self.convert_to_list(response_content['data']):
            self.assertTrue(all(key in data for key in self.SUCCESS_DATA_KEYS))

            if relationship_keys:
                self.assertTrue('relationships' in data)
                relationships = data['relationships']
                self.assertTrue(
                    all(camelize(key, False) in relationships for key in relationship_keys))

                for relationship_name, relationship in relationships.iteritems():
                    self.assertTrue(
                        all(key in relationship for key in ['data', 'links']))

                    for relationship_data in self.convert_to_list(relationship['data']):
                        self.assertTrue(
                            all(key in relationship_data for key in ['type', 'id']))

                    links = relationship['links']

                    resource_pk = self.resource.pk if hasattr(self, 'resource') else '[0-9A-Za-z]*'

                    self.assertRegexpMatches(links['self'], r'^https?://.*/{}/{}/relationships/{}'.format(
                        self.resource_name, resource_pk, underscore(relationship_name)))

                    if hasattr(self, 'remote_relationship_keys') \
                            and relationship_name in self.remote_relationship_keys:
                        self.assertRegexpMatches(links['related'], r'^https?://.*/{}/\w'.format(
                            pluralize(underscore(self.get_remote_relationship_name(relationship_name)))))
                    else:
                        self.assertRegexpMatches(links['related'], r'^https?://.*/{}/{}/{}'.format(
                            self.resource_name, resource_pk, underscore(relationship_name)))
def model_to_command_args(Model):
    '''Take a model class and return a list of args that will create an
    equivalent model in Ember.'''

    fields_to_generate = {
    }

    # iterate over all the fields in this model and create args to build
    # equivalent fields in the Ember model
    for field in Model._meta.get_fields():

        if field.name == 'id':
            # Ember automatically generates the id field, we shouldn't
            # include it
            continue

        was_assigned = False

        for ember_type, django_field_classes in FIELD_TYPE_MAP.items():
            if field.__class__ in django_field_classes:
                if ember_type not in fields_to_generate:
                    fields_to_generate[ember_type] = []

                fields_to_generate[ember_type].append(field)
                was_assigned = True

        if not was_assigned:
            # 'string' is the default tpye -- we didn't match anything else, so
            # we'll assign to that
            if 'string' not in fields_to_generate:
                fields_to_generate['string'] = []

            fields_to_generate['string'].append(field)

    field_args = []
    for ember_type, model_fields in fields_to_generate.items():
        for field in model_fields:
            emberized_field_name = camelize(
                underscore(field.name), uppercase_first_letter=False)

            field_arg = emberized_field_name + ':' + ember_type

            if ember_type in ('belongs-to', 'has-many'):
                # relation fields should also specify what they're related to
                relation_emberized = dasherize(underscore(
                    field.rel.to.__name__))
                field_arg += ':' + relation_emberized

            field_args.append(field_arg)

    full_args = ['generate', 'model', dasherize(underscore(Model.__name__))]
    full_args += field_args

    return full_args
 def __getattr__(self, key):
     if key in ['id', 'code']:
         return self.__dict__['_{}'.format(key)]
     elif key[0] == '_' or key in self.__dict__:
         return self.__dict__[key]
     elif key in self._to_persist:
         return self._to_persist[inflection.underscore(key)]
     elif key in self._data:
         return self._data[inflection.underscore(key)]
     else:
         raise AttributeError, 'Key {} does not exist'.format(key)
Example #9
0
def subject_alias(subject):
    if isinstance(subject, basestring):
        return underscore(subject)
    elif isinstance(subject, type):
        return underscore(subject.__name__)
    elif isinstance(subject, list) and all(type(item) == type(subject[0]) for item in subject):
        if len(subject) > 0:
            return underscore(type(subject[0]).__name__)
        else:
            return '_empty'
    else:
        return underscore(type(subject).__name__)
Example #10
0
 def under(self, data, many=None):
     items = []
     if many:
         for i in data:
             items.append(
                 {underscore(key): value for key, value in i.items()}
             )
         return items
     return {
         underscore(key): value
         for key, value in data.items()
     }
Example #11
0
def flatten(d, parent_key='', sep='_'):
    items = []
    for k, v in d.items():
        new_key = (
            underscore(parent_key) +
            sep +
            underscore(k) if parent_key else underscore(k)
        )
        if isinstance(v, MutableMapping):
            items.extend(flatten(v, new_key, sep=sep).items())
        else:
            items.append((new_key, v))
    return OrderedDict(sorted(items))
def format_keys(obj, format_type=None):
    """
    .. warning::

        `format_keys` function and `JSON_API_FORMAT_KEYS` setting are deprecated and will be
        removed in the future.
        Use `format_field_names` and `JSON_API_FORMAT_FIELD_NAMES` instead. Be aware that
        `format_field_names` only formats keys and preserves value.

    Takes either a dict or list and returns it with camelized keys only if
    JSON_API_FORMAT_KEYS is set.

    :format_type: Either 'dasherize', 'camelize', 'capitalize' or 'underscore'
    """
    warnings.warn(
        "`format_keys` function and `JSON_API_FORMAT_KEYS` setting are deprecated and will be "
        "removed in the future. "
        "Use `format_field_names` and `JSON_API_FORMAT_FIELD_NAMES` instead. Be aware that "
        "`format_field_names` only formats keys and preserves value.",
        DeprecationWarning
    )

    if format_type is None:
        format_type = json_api_settings.FORMAT_KEYS

    if format_type in ('dasherize', 'camelize', 'underscore', 'capitalize'):

        if isinstance(obj, dict):
            formatted = OrderedDict()
            for key, value in obj.items():
                if format_type == 'dasherize':
                    # inflection can't dasherize camelCase
                    key = inflection.underscore(key)
                    formatted[inflection.dasherize(key)] \
                        = format_keys(value, format_type)
                elif format_type == 'camelize':
                    formatted[inflection.camelize(key, False)] \
                        = format_keys(value, format_type)
                elif format_type == 'capitalize':
                    formatted[inflection.camelize(key)] \
                        = format_keys(value, format_type)
                elif format_type == 'underscore':
                    formatted[inflection.underscore(key)] \
                        = format_keys(value, format_type)
            return formatted
        if isinstance(obj, list):
            return [format_keys(item, format_type) for item in obj]
        else:
            return obj
    else:
        return obj
def format_value(value, format_type=None):
    if format_type is None:
        format_type = json_api_settings.format_type
    if format_type == 'dasherize':
        # inflection can't dasherize camelCase
        value = inflection.underscore(value)
        value = inflection.dasherize(value)
    elif format_type == 'camelize':
        value = inflection.camelize(value, False)
    elif format_type == 'capitalize':
        value = inflection.camelize(value)
    elif format_type == 'underscore':
        value = inflection.underscore(value)
    return value
 def __update__(self, equipes=None):
     if not equipes:
         equipes = {}
     for k, v in self.obj.items():
         if isinstance(v, dict):
             setattr(self, underscore(snake_string(k)), Base(v))
         else:
             setattr(self, underscore(snake_string(k)), snake_string(v))
     time1 = self.obj['time1']
     time2 = self.obj['time2']
     if time1:
         self.time1 = equipes[time1]
     if time2:
         self.time2 = equipes[time2]
def format_value(value, format_type=None):
    if format_type is None:
        format_type = getattr(settings, 'JSON_API_FORMAT_KEYS', False)
    if format_type == 'dasherize':
        # inflection can't dasherize camelCase
        value = inflection.underscore(value)
        value = inflection.dasherize(value)
    elif format_type == 'camelize':
        value = inflection.camelize(value, False)
    elif format_type == 'capitalize':
        value = inflection.camelize(value)
    elif format_type == 'underscore':
        value = inflection.underscore(value)
    return value
Example #16
0
def from_request_get_feed_params(request):
    """Given the request, return an object that contains the parameters.

    :param request: The request obj
    :type request: Flask request
    :return: Select parameters passed in through headers or the url
    :rtype: object
    """

    feed_params = {}

    feed_params["filters"] = request.args.get('filters')
    feed_params["authors"] = request.args.get('authors')
    feed_params["start"] = int(request.args.get('start', 0))
    feed_params["results"] = int(request.args.get('results', 10))
    feed_params["sort_by"] = inflection.underscore(
        request.args.get('sort_by', 'updated_at'))
    feed_params["sort_desc"] = not bool(request.args.get('sort_asc', ''))
    username, user_id = current_user.identifier, current_user.id
    feed_params["username"] = username
    feed_params["user_id"] = user_id

    user_obj = (db_session.query(User)
                          .filter(User.id == user_id)
                          .first())

    if user_obj:
        feed_params["subscriptions"] = user_obj.subscriptions
    return feed_params
Example #17
0
    def __init__(self, name, bases, dict):
        super(ExperimentMeta, self).__init__(name, bases, dict)

        # Special case: don't do experiment processing on the base class
        if (
            name == 'Experiment' and
            self.__module__ == ExperimentMeta.__module__
        ):
            return

        slug = inflection.underscore(name)

        if len(slug) > 48:
            raise ValueError("Experiment name too long")

        if slug in EXPERIMENTS:
            raise AssertionError(
                "Experiment %s defined multiple times (as %s.%s and %s.%s)" % (
                    slug,
                    dict['__module__'],
                    dict['__qualname__'],
                    EXPERIMENTS[slug].__module__,
                    EXPERIMENTS[slug].__qualname__,
                ),
            )

        validate_experiment(self)

        self.slug = slug
        EXPERIMENTS[slug] = self
Example #18
0
def format_keys(obj, format_type=None):
    """
    Takes either a dict or list and returns it with camelized keys only if
    REST_EMBER_FORMAT_KEYS is set.

    :format_type: Either 'camelize' or 'underscore'
    """
    if getattr(settings, 'REST_EMBER_FORMAT_KEYS', False)\
        and format_type in ('camelize', 'underscore'):
        
        if isinstance(obj, dict):
            formatted = {}
            for key, value in obj.items():
                if format_type == 'camelize':
                    formatted[inflection.camelize(key, False)]\
                        = format_keys(value, format_type)
                elif format_type == 'underscore':
                    formatted[inflection.underscore(key)]\
                        = format_keys(value, format_type)
            return formatted
        if isinstance(obj, list):
            return [format_keys(item, format_type) for item in obj]
        else:
            return obj
    else:
        return obj
    def __init__(self, field):
        self.field = field

        # Try to name this XSD type

        # Note: should we use field.context.key ?
        # The problem is that when dealing with List/Dict fields, the value_type
        # field also inherits the parent's context

        name = field.getName() or str(inflection.parameterize(field.title))
        if not name:
            try:
                name = field.getTaggedValue("name")
            except KeyError:
                pass

        assert name, "The field should be named"

        name = inflection.underscore(name)
        self.typename = inflection.camelize(name)
        self.name = inflection.dasherize(name)

        # Compute occurence indicators

        self.min_occurs = 1 if field.required else 0
        self.max_occurs = 1
Example #20
0
    def _configure_sideloads(self, meta):
        """
        Assemble configuration for each sideload.
        """
        self.sideloads = []
        configs = []
        for conf in getattr(meta, 'sideloads', []):
            assert isinstance(conf, tuple) and len(conf) >= 2 \
                and len(conf) <= 3, (
                '`Meta.sideloads` must be a list of tuples in the following '
                'format: (<model class>, <serializer class>, '
                '<queryset instance (optional)>)'
            )
            model, serializer = conf[:2]
            queryset = conf[0].objects.all() if (len(conf) == 2) else conf[2]
            configs.append((model, serializer, queryset))

        relations = get_field_info(self.model).relations
        fields = self.base_serializer.fields.values()
        for field_name, info in relations.items():
            try:
                conf = configs[[t[0] for t in configs].index(info.related)]
            except ValueError:
                continue
            field = fields[[f.source for f in fields].index(field_name)]
            key_name = getattr(conf[1].Meta, 'base_key',
                               underscore(conf[0].__name__))
            self.sideloads.append(Sideload(
                field=field, model=conf[0], serializer=conf[1],
                queryset=conf[2], key_name=pluralize(key_name)
            ))
    def get_template_list(self):
        """Retrives a list of template names to use for when rendering the
        template.

        When no
        :attr:`~flask_generic_views.core.TemplateResponseMixin.template_name`
        is set then the following will be provided instead:

        * A template based on :attr:`template_name_suffix`, the model, and the
          current blueprint. The model ``BlogArticle`` in  blueprint
          ``blogging`` would generate the template name
          ``blogging/blog_article_list.html``, no blueprint would generate
          ``blog_article_list.html``

        :returns: list of template names
        :rtype: list

        """
        try:
            names = super(MultipleObjectTemplateResponseMixin, self)\
                .get_template_list()
        except NotImplementedError:
            names = []

            model = self.get_model()

            if model:
                name = underscore(model.__name__)
                names.append(self._format_template_name(name))

            if not names:
                raise

        return names
Example #22
0
 def __new__(cls, name, bases, namespace, **kwds):
     props = {}
     if name == 'VertexProperty':
         element_type = name.lower()
     elif bases:
         element_type = bases[0].__type__
         if element_type not in ['vertex', 'edge']:
             element_type = bases[0].__name__.lower()
         for base in bases:
             base_props = getattr(base, '__properties__', {})
             props.update(base_props)
     else:
         element_type = name.lower()
     namespace['__type__'] = element_type
     if not namespace.get('__label__', None):
         namespace['__label__'] = inflection.underscore(name)
     new_namespace = {}
     props.pop('id', None)
     for k, v in namespace.items():
         if isinstance(v, abc.BaseProperty):
             if element_type == 'edge' and hasattr(v, 'cardinality'):
                 raise exception.MappingError(
                     'Edge property cannot have set/list cardinality')
             props[k] = v
             v = v.__descriptor__(k, v)
         new_namespace[k] = v
     new_namespace['__mapping__'] = mapper.create_mapping(namespace,
                                                          props)
     new_namespace['__properties__'] = props
     result = type.__new__(cls, name, bases, new_namespace)
     return result
Example #23
0
def to_url_case(string):
    ''' Converts input to what a url should look like.
        Right now, it's snake case
        className -> class_name
        '''

    return inflection.underscore(string)
Example #24
0
def get_resource_name(view):
    """
    Return the name of a resource
    """
    try:
        # is the resource name set directly on the view?
        resource_name = getattr(view, 'resource_name')
    except AttributeError:
        try:
            # was it set in the serializer Meta class?
            resource_name = getattr(view, 'serializer_class')\
                .Meta.resource_name
        except AttributeError:
            # camelCase the name of the model if it hasn't been set
            # in either of the other places
            try:
                name = resource_name = getattr(view, 'serializer_class')\
                    .Meta.model.__name__
            except AttributeError:
                try:
                    name = view.model.__name__
                except AttributeError:
                    name = view.__class__.__name__

            resource_name = name[:1].lower() + name[1:]

    if hasattr(view, 'action') and view.action == 'list':
        resource_name = inflection.pluralize(resource_name)
    return inflection.underscore(resource_name)
    def _render_relationships(self, resource):
        """Render the resource's relationships."""
        relationships = {}
        related_models = resource.__mapper__.relationships.keys()
        primary_key_val = getattr(resource, self.primary_key)
        if self.dasherize:
            mapped_relationships = {
                x: dasherize(underscore(x)) for x in related_models}
        else:
            mapped_relationships = {x: x for x in related_models}

        for model in related_models:
            relationships[mapped_relationships[model]] = {
                'links': {
                    'self': '/{}/{}/relationships/{}'.format(
                        resource.__tablename__,
                        primary_key_val,
                        mapped_relationships[model]),
                    'related': '/{}/{}/{}'.format(
                        resource.__tablename__,
                        primary_key_val,
                        mapped_relationships[model])
                }
            }

        return relationships
Example #26
0
def translate_remote(config, setting):
    """
        Translate config names from server to equivalents usable
        in mycroft-core.

        Args:
                config:     base config to populate
                settings:   remote settings to be translated
    """
    IGNORED_SETTINGS = ["uuid", "@type", "active", "user", "device"]

    for k, v in iteritems(setting):
        if k not in IGNORED_SETTINGS:
            # Translate the CamelCase values stored remotely into the
            # Python-style names used within mycroft-core.
            key = inflection.underscore(re.sub(r"Setting(s)?", "", k))
            if isinstance(v, dict):
                config[key] = config.get(key, {})
                translate_remote(config[key], v)
            elif isinstance(v, list):
                if is_remote_list(v):
                    if key not in config:
                        config[key] = {}
                    translate_list(config[key], v)
                else:
                    config[key] = v
            else:
                config[key] = v
    def _render_attributes(self, resource):
        """Render the resources's attributes."""
        attributes = {}
        attrs_to_ignore = set()

        for key, relationship in resource.__mapper__.relationships.items():
            attrs_to_ignore.update(set(
                [column.name for column in relationship.local_columns]).union(
                    {key}))

        if self.dasherize:
            mapped_fields = {x: dasherize(underscore(x)) for x in self.fields}
        else:
            mapped_fields = {x: x for x in self.fields}

        for attribute in self.fields:
            if attribute == self.primary_key:
                continue
            # Per json-api spec, we cannot render foreign keys
            # or relationsips in attributes.
            if attribute in attrs_to_ignore:
                raise AttributeError
            try:
                value = getattr(resource, attribute)
                if isinstance(value, datetime.datetime):
                    attributes[mapped_fields[attribute]] = value.isoformat()
                else:
                    attributes[mapped_fields[attribute]] = value
            except AttributeError:
                raise

        return attributes
Example #28
0
def _add_enums(root_object, target_object):
    """
    Look for enums in the given object to create string constants <ENUM_CLASSNAME>_<ENUM_VALUE>.

    :param dict root_object: the object which may contain enums
    :param object target_object: The target object where to add the string constants to
    """
    for i in root_object.keys():
        enum = None
        propinfo = root_object.propinfo(i)
        if 'enum' in propinfo:
            enum = propinfo
        if propinfo['type'] == 'array':
            if 'enum' in propinfo['items']:
                enum = propinfo['items']
        if not enum:
            continue

        enum_class = str(i).upper()
        if 'title' in enum:
            enum_class = str(inflection.underscore(enum['title'])).upper()
        enum_class += "_"
        for val in enum['enum']:
            enum_value = enum_class + inflection.parameterize(val, '_').upper()
            setattr(target_object, enum_value, val)
Example #29
0
    def expose(self, model, route='/api', access_control=None, resource_class=Resource, **kwargs):
        """
        this adds methods for updating/adding the objects defined by model

        eg: if you expose User(MongoModel) class this will add

            POST /api/users => create
            PUT /api/users/:id: => update
            PATCH /api/user/:id: => update
            DELETE /api/user/:id: => delete
            GET /api/user/:id: => returns user
            GET /api/users => returns all users (you can use ?limit=... )

        """
        endpoint_path = route + '/' + inflection.pluralize(inflection.underscore(model.__name__))
        endpoint = endpoint_path
        resource = Resource(model=model, access_control=access_control)
        self._add_api_method(endpoint_path, resource.list_,
                             methods=['GET'], endpoint=endpoint + '/list')
        self._add_api_method('%s/<id>' % endpoint_path, resource.get_,
                             methods=['GET'], endpoint=endpoint + '/get')

        self._add_api_method(endpoint_path, resource.put_,
                             methods=['PUT'], endpoint=endpoint + '/put')

        self._add_api_method('%s/<id>' % endpoint_path, resource.delete_,
                             methods=['DELETE'], endpoint=endpoint + '/delete')

        self._add_api_method(endpoint_path, resource.post_,
                                                methods=['POST'], endpoint=endpoint + 'post')

        self._add_api_method('%s/<id>' % endpoint_path, resource.patch_,
                                                methods=['PATCH'], endpoint=endpoint + 'patch')
Example #30
0
def normalize_doc_meta(raw_meta):
    normal_meta = {}
    for key, values in raw_meta.items():
        key = inflection.underscore(key)
        value = '\n'.join(values)
        normal_meta[key] = value
    return normal_meta
Example #31
0
def get_id(x) -> str:
    return inflection.underscore(str(x))
Example #32
0
 def name(self):
     return inflection.underscore(self.__class__.__name__)
Example #33
0
gl.GL_QUADS
content = gl.__dict__
if '_module' in content:
    content = content['_module'].__dict__

for name, value in content.items():

    if name.startswith('GLU_'):
        continue
    if name.startswith('GLU'):
        continue
    elif len(name) > 4 and name[3].isupper() and name.startswith('glu'):
        continue

    elif name.startswith('GL_'):
        new_name = name[3:]
    elif name.startswith('GL'):
        new_name = name[2:]
    elif len(name) > 3 and name[2].isupper() and name.startswith('gl'):
        new_name = inflection.underscore(name[2:])

    elif name.startswith('PFNGL'):
        continue
    else:
        continue

    assert new_name not in used, name
    used.add(new_name)
    globals()[new_name] = value
Example #34
0
    def data_cleaning(self, df1):
        ### 1.2 Rename Columns
        cols_old = [
            'Store', 'DayOfWeek', 'Date', 'Open', 'Promo', 'StateHoliday',
            'SchoolHoliday', 'StoreType', 'Assortment', 'CompetitionDistance',
            'CompetitionOpenSinceMonth', 'CompetitionOpenSinceYear', 'Promo2',
            'Promo2SinceWeek', 'Promo2SinceYear', 'PromoInterval'
        ]

        #using the snakecase type
        snakecase = lambda x: inflection.underscore(x)
        cols_new = list(map(snakecase, cols_old))
        df1.columns = cols_new

        ### 1.4 Data Types
        #changing the date type from object to datetime
        df1['date'] = pd.to_datetime(df1['date'])

        ### 1.6 Fillout missing values
        # competition_distance
        # for complete the NaN elements, just apply a lambda function with a value > df1['competition_distance'].max()
        df1['competition_distance'] = df1['competition_distance'].apply(
            lambda x: 2000000.0 if math.isnan(x) else x)

        # competition_open_since_month
        # change the NaN elements to date
        df1['competition_open_since_month'] = df1.apply(
            lambda x: x['date'].month
            if math.isnan(x['competition_open_since_month']) else x[
                'competition_open_since_month'],
            axis=1)

        # competition_open_since_year
        # equal to competition_since_month
        df1['competition_open_since_year'] = df1.apply(
            lambda x: x['date'].year
            if math.isnan(x['competition_open_since_year']) else x[
                'competition_open_since_year'],
            axis=1)

        # promo2_since_week
        df1['promo2_since_week'] = df1.apply(
            lambda x: x['date'].week
            if math.isnan(x['promo2_since_week']) else x['promo2_since_week'],
            axis=1)

        # promo2_since_year
        df1['promo2_since_year'] = df1.apply(
            lambda x: x['date'].year
            if math.isnan(x['promo2_since_year']) else x['promo2_since_year'],
            axis=1)

        #promo_interval
        #Changing names of month to numbers makes the manipulation easier
        month_map = {
            1: 'Jan',
            2: 'Fev',
            3: 'Mar',
            4: 'Apr',
            5: 'May',
            6: 'Jun',
            7: 'Jul',
            8: 'Aug',
            9: 'Sep',
            10: 'Oct',
            11: 'Nov',
            12: 'Dec'
        }

        df1['promo_interval'].fillna(0, inplace=True)

        #there's a list, use .map instead of .apply
        df1['month_map'] = df1['date'].dt.month.map(month_map)

        df1['is_promo'] = df1[['promo_interval', 'month_map']].apply(
            lambda x: 0 if x['promo_interval'] == 0 else 1
            if x['month_map'] in x['promo_interval'].split(',') else 0,
            axis=1)

        ### 1.7 Change Types
        #converting competition_open_since_month, competition_open_since_year, promo2_since_week, promo2_since_year  from float to int
        df1['competition_open_since_month'] = df1[
            'competition_open_since_month'].astype(int)
        df1['competition_open_since_year'] = df1[
            'competition_open_since_year'].astype(int)

        df1['promo2_since_week'] = df1['promo2_since_week'].astype(int)
        df1['promo2_since_year'] = df1['promo2_since_year'].astype(int)

        return df1
Example #35
0
    def extract_included(cls, fields, resource, resource_instance,
                         included_resources, included_cache):
        """
        Adds related data to the top level included key when the request includes
        ?include=example,example_field2
        """
        # this function may be called with an empty record (example: Browsable Interface)
        if not resource_instance:
            return

        current_serializer = fields.serializer
        context = current_serializer.context
        included_serializers = utils.get_included_serializers(
            current_serializer)
        included_resources = copy.copy(included_resources)
        included_resources = [
            inflection.underscore(value) for value in included_resources
        ]

        for field_name, field in iter(fields.items()):
            # Skip URL field
            if field_name == api_settings.URL_FIELD_NAME:
                continue

            # Skip fields without relations or serialized data
            if not isinstance(field,
                              (relations.RelatedField,
                               relations.ManyRelatedField, BaseSerializer)):
                continue

            try:
                included_resources.remove(field_name)
            except ValueError:
                # Skip fields not in requested included resources
                # If no child field, directly continue with the next field
                if field_name not in [
                        node.split('.')[0] for node in included_resources
                ]:
                    continue

            relation_instance = cls.extract_relation_instance(
                field, resource_instance)
            if isinstance(relation_instance, Manager):
                relation_instance = relation_instance.all()

            serializer_data = resource.get(field_name)

            if isinstance(field, relations.ManyRelatedField):
                serializer_class = included_serializers[field_name]
                field = serializer_class(relation_instance,
                                         many=True,
                                         context=context)
                serializer_data = field.data

            if isinstance(field, relations.RelatedField):
                if relation_instance is None or not serializer_data:
                    continue

                many = field._kwargs.get('child_relation', None) is not None

                if isinstance(field, ResourceRelatedField) and not many:
                    already_included = serializer_data['type'] in included_cache and \
                        serializer_data['id'] in included_cache[serializer_data['type']]

                    if already_included:
                        continue

                serializer_class = included_serializers[field_name]
                field = serializer_class(relation_instance,
                                         many=many,
                                         context=context)
                serializer_data = field.data

            new_included_resources = [
                key.replace('%s.' % field_name, '', 1)
                for key in included_resources
                if field_name == key.split('.')[0]
            ]

            if isinstance(field, ListSerializer):
                serializer = field.child
                relation_type = utils.get_resource_type_from_serializer(
                    serializer)
                relation_queryset = list(relation_instance)

                if serializer_data:
                    for position in range(len(serializer_data)):
                        serializer_resource = serializer_data[position]
                        nested_resource_instance = relation_queryset[position]
                        resource_type = (relation_type or
                                         utils.get_resource_type_from_instance(
                                             nested_resource_instance))
                        serializer_fields = utils.get_serializer_fields(
                            serializer.__class__(nested_resource_instance,
                                                 context=serializer.context))
                        new_item = cls.build_json_resource_obj(
                            serializer_fields, serializer_resource,
                            nested_resource_instance, resource_type,
                            getattr(serializer, '_poly_force_type_resolution',
                                    False))
                        included_cache[new_item['type']][new_item['id']] = \
                            utils.format_field_names(new_item)
                        cls.extract_included(
                            serializer_fields,
                            serializer_resource,
                            nested_resource_instance,
                            new_included_resources,
                            included_cache,
                        )

            if isinstance(field, Serializer):
                relation_type = utils.get_resource_type_from_serializer(field)

                # Get the serializer fields
                serializer_fields = utils.get_serializer_fields(field)
                if serializer_data:
                    new_item = cls.build_json_resource_obj(
                        serializer_fields, serializer_data, relation_instance,
                        relation_type,
                        getattr(field, '_poly_force_type_resolution', False))
                    included_cache[new_item['type']][
                        new_item['id']] = utils.format_field_names(new_item)
                    cls.extract_included(
                        serializer_fields,
                        serializer_data,
                        relation_instance,
                        new_included_resources,
                        included_cache,
                    )
 def __get__(self, obj, type):
     tablename = type.__dict__.get('__tablename__')
     if not tablename:
         tablename = inflection.underscore(type.__name__)
         setattr(type, '__tablename__', tablename)
     return tablename
Example #37
0
    def data_cleaning(self, df1):

        cols_old = [
            'Store', 'DayOfWeek', 'Date', 'Open', 'Promo', 'StateHoliday',
            'SchoolHoliday', 'StoreType', 'Assortment', 'CompetitionDistance',
            'CompetitionOpenSinceMonth', 'CompetitionOpenSinceYear', 'Promo2',
            'Promo2SinceWeek', 'Promo2SinceYear', 'PromoInterval'
        ]

        # Changing write pattern to snakecase

        snakecase = lambda x: inflection.underscore(x)

        cols_new = list(map(snakecase, cols_old))

        # Renaming
        df1.columns = cols_new

        ## 1.3. Data Types

        df1['date'] = pd.to_datetime(df1['date'])

        ## 1.5. Fillout NA

        # First, we must analyze what every variable and data with NA values

        # competition_distance
        ## Assumption: if it is NA, maybe it is because the store doesnt have an near competitor
        ## What has been done: CONSIDER AN EXTREME DISTANT RANGE FROM NEAR COMPETITOR

        df1['competition_distance'] = df1['competition_distance'].apply(
            lambda x: 100000 if math.isnan(x) else x)

        # competition_open_since_month
        ## Assumption: there are two main reasons that this data is NA: (i) the store doesnt have a near competitor or
        ## (ii) the store has an near competitor, but it the opening data is unknown, either it is older than the store or data is unavailable
        ## What has been done: CONSIDER THE SAME MONTH THAT THE STORE HAS BEEN OPEN (because it maybe older than the store)

        # Error: EDIT Solved
        df1['competition_open_since_month'] = df1.apply(
            lambda x: x['date'].month
            if math.isnan(x['competition_open_since_month']) else x[
                'competition_open_since_month'],
            axis=1)
        #Alternative:
        #df1.competition_open_since_month.fillna(df1.date.dt.month, inplace=True)

        # competition_open_since_year
        ## Same ideia from variable above

        #Error: EDIT: Solved
        df1['competition_open_since_year'] = df1.apply(
            lambda x: x['date'].year
            if math.isnan(x['competition_open_since_year']) else x[
                'competition_open_since_year'],
            axis=1)
        #Alternative:
        #df1.competition_open_since_year.fillna(df1.date.dt.month, inplace=True)

        # promo2
        ## Doesnt have any NA

        # promo2_since_week
        ## Assumption: it is possible that the NA values are due to lack of participation/extension of any promotions.
        ## What I think should have been done: ALL NA VALUES ARE CONSIDERED "0", AS THE STORE IS NOT EXTENDING PROMOTIONS
        ## What has actually been done: CONSIDER THE SAME VALUE AS THE DATE
        df1['promo2_since_week'] = df1.apply(
            lambda x: x['date'].month
            if math.isnan(x['promo2_since_week']) else x['promo2_since_week'],
            axis=1)

        # promo2_since_year
        ## Same logic as above
        df1['promo2_since_year'] = df1.apply(
            lambda x: x['date'].year
            if math.isnan(x['promo2_since_year']) else x['promo2_since_year'],
            axis=1)

        # promo_interval
        ## The problem here is that, it is hard to understand the way it has been inserted.
        ## What has been done: (i) Analyze the interval of the promo; (ii) Check if sale month is in promo_interval
        ## if it is, (iii) apply value 1 to new column is_promo, else 0.
        ## This way, it will be easy to check if sale is inside a promotion interval.

        month_map = {
            1: 'Jan',
            2: 'Fev',
            3: 'Mar',
            4: 'Apr',
            5: 'May',
            6: 'Jun',
            7: 'Jul',
            8: 'Aug',
            9: 'Sep',
            10: 'Oct',
            11: 'Nov',
            12: 'Dec',
        }

        df1['promo_interval'].fillna(0, inplace=True)
        df1['month_map'] = df1['date'].dt.month.map(month_map)

        df1['is_promo'] = df1[['promo_interval', 'month_map']].apply(
            lambda x: 0 if x['promo_interval'] == 0 else 1
            if x['month_map'] in x['promo_interval'].split(',') else 0,
            axis=1)

        df1.isna().sum()

        df1.sample(5).T

        ## 1.6. Change Types

        # Competion and promos since are portrayed as float types, while it should be int type.

        df1['competition_open_since_month'] = df1[
            'competition_open_since_month'].astype(int)
        df1['competition_open_since_year'] = df1[
            'competition_open_since_year'].astype(int)
        df1['promo2_since_week'] = df1['promo2_since_week'].astype(int)
        df1['promo2_since_year'] = df1['promo2_since_year'].astype(int)

        return df1
Example #38
0
def test_camel_to_snake():
    camel_to_snake("HelloWorld") == "hello_world"
    underscore("HelloWorld") == "hello_world"
Example #39
0
    def data_cleaning(self, df1):

        ## 1.1. Rename Columns
        cols_old = [
            'Store', 'DayOfWeek', 'Date', 'Open', 'Promo', 'StateHoliday',
            'SchoolHoliday', 'StoreType', 'Assortment', 'CompetitionDistance',
            'CompetitionOpenSinceMonth', 'CompetitionOpenSinceYear', 'Promo2',
            'Promo2SinceWeek', 'Promo2SinceYear', 'PromoInterval'
        ]

        snakecase = lambda x: inflection.underscore(x)

        cols_new = list(map(snakecase, cols_old))

        # rename
        df1.columns = cols_new

        ## 1.3. Data Types
        df1['date'] = pd.to_datetime(df1['date'])

        ## 1.5. Fillout NA
        #competition_distance
        df1['competition_distance'] = df1['competition_distance'].apply(
            lambda x: 200000.0 if math.isnan(x) else x)

        #competition_open_since_month
        df1['competition_open_since_month'] = df1.apply(
            lambda x: x['date'].month
            if math.isnan(x['competition_open_since_month']) else x[
                'competition_open_since_month'],
            axis=1)

        #competition_open_since_year
        df1['competition_open_since_year'] = df1.apply(
            lambda x: x['date'].year
            if math.isnan(x['competition_open_since_year']) else x[
                'competition_open_since_year'],
            axis=1)

        #promo2_since_week
        df1['promo2_since_week'] = df1.apply(
            lambda x: x['date'].week
            if math.isnan(x['promo2_since_week']) else x['promo2_since_week'],
            axis=1)

        #promo2_since_year
        df1['promo2_since_year'] = df1.apply(
            lambda x: x['date'].year
            if math.isnan(x['promo2_since_year']) else x['promo2_since_year'],
            axis=1)

        #promo_interval
        month_map = {
            1: 'Jan',
            2: 'Fev',
            3: 'Mar',
            4: 'Apr',
            5: 'May',
            6: 'Jun',
            7: 'Jul',
            8: 'Aug',
            9: 'Sep',
            10: 'Oct',
            11: 'Nov',
            12: 'Dec'
        }

        df1['promo_interval'].fillna(0, inplace=True)

        df1['month_map'] = df1['date'].dt.month.map(month_map)

        df1['is_promo'] = df1[['promo_interval', 'month_map']].apply(
            lambda x: 0 if x['promo_interval'] == 0 else 1
            if x['month_map'] in x['promo_interval'].split(',') else 0,
            axis=1)

        ## 1.6. Change Data Types
        # competiton
        df1['competition_open_since_month'] = df1[
            'competition_open_since_month'].astype(int)
        df1['competition_open_since_year'] = df1[
            'competition_open_since_year'].astype(int)

        # promo2
        df1['promo2_since_week'] = df1['promo2_since_week'].astype(int)
        df1['promo2_since_year'] = df1['promo2_since_year'].astype(int)

        return df1
Example #40
0
 def from_json(cls, js: Dict, *, document: Document):
     js = {inflection.underscore(k): v for k, v in js.items()}
     for key in ["parent", "format"]:
         if key in js:
             js.pop(key)
     return cls(**js, document=document)
Example #41
0
 def unserialize_key(self, key):
     key = inflection.underscore(key)
     if key in self._fields:
         return key
     return key + '_'
Example #42
0
def create_example_files(example_input_directory, example_output_directory):
    if example_output_directory is None:
        example_output_directory = example_input_directory.parent / 'output_examples'
    example_output_directory.mkdir(parents=True, exist_ok=True)

    # store a list of categories and examples
    categories_meta_data = dict()

    # march over each example directory for categories
    for example_directory in example_input_directory.iterdir():
        if example_directory.is_dir():
            example_category_title = inflection.humanize(
                inflection.underscore(example_directory.stem))

            # keep a list of example metas to build a table
            example_meta_datas = []

            # make sure that there are input files here
            yaml_files = example_directory.glob("*.yaml")
            if yaml_files:
                # create the directory
                output_category_directory = example_output_directory / example_directory.stem
                output_category_directory.mkdir(parents=True, exist_ok=True)

                for yaml_file in example_directory.glob("*.yaml"):
                    # compute the metadata
                    example_title = inflection.humanize(
                        inflection.underscore(yaml_file.stem))
                    example_doc_url = f'./{example_output_directory.name}/{example_directory.name}/{yaml_file.stem}.html'
                    example_file_url = BaseExampleUrl + example_directory.name + "/" + yaml_file.name
                    example_meta_datas.append(
                        ExampleMetaData(example_title, example_doc_url,
                                        example_file_url))

                    # copy over the input as markdown
                    header_region = True
                    with open(yaml_file, 'r') as yaml_file_input:
                        with open(
                                output_category_directory /
                            (yaml_file.stem + '.md'), 'w') as markdown_file:
                            # output the header information
                            markdown_file.write('---\n')
                            markdown_file.write('layout: default\n')
                            markdown_file.write(f'title: {example_title}\n')
                            markdown_file.write(f'parent: {ExamplesTitle}\n')
                            markdown_file.write(
                                f'grand_parent: {SimulationsTitle}\n')
                            markdown_file.write('nav_exclude: true\n')
                            markdown_file.write('---\n')

                            # copy over each row
                            for row in yaml_file_input:
                                if header_region:
                                    # check to see if we are done with header region
                                    if row.startswith('---'):
                                        header_region = False
                                        markdown_file.write(
                                            f'\n[{example_directory.name + "/" + yaml_file.name}]({example_file_url})\n'
                                        )
                                        markdown_file.write("```yaml\n")

                                if header_region:
                                    markdown_file.write(
                                        row.replace("#", "",
                                                    1).replace(" ", "", 1))
                                else:
                                    markdown_file.write(row)

                            # close off input
                            markdown_file.write("\n```")

            if example_meta_datas:
                categories_meta_data[
                    example_category_title] = example_meta_datas

    # output an index file
    with open(example_output_directory / ExamplesIndex, 'w') as index_file:
        for category, meta_data_list in categories_meta_data.items():
            # write the header
            index_file.write(f'## {category}\n')
            for meta_data in meta_data_list:
                index_file.write(
                    f'- [{meta_data.title}]({meta_data.doc_url}) [📁]({meta_data.file_url})\n'
                )
            index_file.write('\n')
Example #43
0
def _make_slug(name):
    return inflection.underscore(re.sub(r"[\s&?]", "_",
                                        name).strip("_")).replace('_', '-')
Example #44
0
    def create(self, model, **kwargs):
        model = self.check_noun(model)
        model = sanitized_string(model)
        classname = inflection.camelize(model)
        singular = inflection.singularize(classname)
        plural = inflection.pluralize(classname)
        class_type = kwargs.get('class_type', None)
        extra = {}

        filename = f"{model.lower()}.py"
        template = 'view-function.tpl'
        template_import = 'view-function-import.tpl'
        view_name = inflection.underscore(singular)
        route_name = f'{inflection.underscore(model)}/'
        template_name = f'{model.lower()}.html'

        if class_type is not None:
            filename = f"{model.lower()}_{class_type}.py"
            template = 'view-class.tpl'
            template_import = 'view-class-import.tpl'

            if class_type not in ['template']:
                view_name = inflection.underscore(singular) + f'-{class_type}'
                template_name += f'_{class_type.lower()}.html'
                extra[
                    'object_name'] = plural if class_type == 'list' else singular

            if class_type in ['form', 'update', 'create']:
                extra['form_class'] = f'{classname}Form'

            if class_type in ['list']:
                route_name = f'{inflection.underscore(plural)}/'
                extra['pagination'] = True

            if class_type in ['detail', 'update']:
                route_name += '<slug:slug>'

        content = rendered_file_template(path=self.TEMPLATES_DIRECTORY,
                                         template=template,
                                         context={
                                             'model': model,
                                             'classname': classname,
                                             'class_type': class_type,
                                             'route_name': route_name,
                                             'view_name': view_name,
                                             'template_name': template_name,
                                             **extra,
                                         })

        import_content = rendered_file_template(path=self.TEMPLATES_DIRECTORY,
                                                template=template_import,
                                                context={
                                                    'model': model,
                                                    'classname': classname,
                                                    'class_type': class_type,
                                                })

        self.add_import(template=template_import, content=import_content)

        if self.create_file(path=self.cwd, filename=filename, content=content):

            resource = f"{model}_view."
            if class_type:
                resource = f"{classname}{class_type.capitalize()}View."
            log_success(DEFAULT_CREATE_MESSAGE.format(filename, resource))
Example #45
0
def test_underscore(camel, underscore):
    assert underscore == inflection.underscore(camel)
Example #46
0
def normalize_id(id: str):
    id = inflection.dasherize(inflection.underscore(id)).replace(' ', '-')
    # replace all non word carachercters (dash is ok)
    return re.sub(r'[^\w-]', '', id)
Example #47
0
def enum_name(verb_name, attr_name, value):
    return '.'.join([
        verb_name,
        camelize(attr_name),
        underscore(value).upper().replace('.', '_')
    ])
Example #48
0
def _get_table_name(classname):
    return inflection.pluralize(inflection.underscore(classname))
# -*- coding: utf-8 -*-
"""
Created by: abibeka, edited by [email protected]
Purpose: Read WMATA schedule data; Schedule_082719-201718.mdb
"""
# https://stackoverflow.com/questions/39835770/read-data-from-pyodbc-to-pandas
import pandas as pd, os, inflection, numpy as np

mdb_to_excel_file_loc = r'C:\Users\e043868\Documents\RawNav'

stop_file = os.path.join(mdb_to_excel_file_loc, '082719-201718 Stop.xlsx')
stop_dat = pd.read_excel(stop_file)
stop_dat = stop_dat.dropna(axis=1)
stop_dat.columns = [
    inflection.underscore(col_nm) for col_nm in stop_dat.columns
]
stop_dat.rename(columns={
    'longitude': 'stop_lon',
    'latitude': 'stop_lat',
    'heading': 'stop_heading'
},
                inplace=True)

pattern_file = os.path.join(mdb_to_excel_file_loc,
                            '082719-201718 Pattern.xlsx')
pattern_dat = pd.read_excel(pattern_file)
pattern_dat = pattern_dat[[
    'PatternID', 'TARoute', 'PatternName', 'Direction', 'Distance', 'CDRoute',
    'CDVariation', 'PatternDestination', 'RouteText', 'RouteKey',
    'PatternDestination2', 'RouteText2', 'Direction2', 'PatternName2',
    'TARoute2', 'PubRouteDir', 'PatternNotes', 'DirectionID'
Example #50
0
 def get_type(self):
     return underscore(type(self).__name__)
Example #51
0
 def __getattr__(self, item):
     snake_case_item = inflection.underscore(item)
     if snake_case_item in super().__getattribute__('properties')():
         return super().__getattribute__(snake_case_item)
     else:
         return super().__getattribute__(item)
Example #52
0
def to_python_name(x: str) -> str:
    return inflection.underscore(x)
Example #53
0
def parse(filename):

    classes_to_parse = []
    classes_to_exclude = []
    with open(CONF_NAME) as f:
        data = f.read()
    try:
        settings = json.loads(data)
    except (SyntaxError, json.decoder.JSONDecodeError):
        print('Settings conf file syntax error')
        exit()
    vocabulary_prefix = settings.get('vocabulary_prefix', '')
    for c in settings.get('dli_include', []):
        classes_to_parse.append(
            URIRef(
                c.replace('dli:',
                          '{}ontologies/dli.jsonld#'.format(DLI_BASE))))
    for c in settings.get('pot_exclude', []):
        classes_to_exclude.append(
            URIRef(
                c.replace('pot:',
                          '{}ontologies/pot.jsonld#'.format(POT_BASE))))
    with open(filename) as f:
        data = f.read()

    graph = Graph().parse(data=data, format='json-ld')
    class_triples = graph.triples(
        (None, URIRef('http://www.w3.org/1999/02/22-rdf-syntax-ns#type'),
         URIRef('{}ontologies/pot.jsonld#Class'.format(POT_BASE))))
    for class_triplet in map(Triplet._make, list(class_triples)):
        vocabulary_dict, vocabulary, exclude = build_vocabulary(
            graph,
            class_triplet,
            excludes=classes_to_exclude,
            vocabulary_prefix=vocabulary_prefix)
        if exclude:
            continue
        identity_dict = build_identity(graph, class_triplet, vocabulary)

        with open(
                'result/pot/identities/identity-{}.jsonld'.format(
                    underscore(class_triplet.subject.split('#')[1])),
                'w') as f:
            f.write(
                json.dumps({'@context': identity_dict},
                           indent=4,
                           separators=(',', ': ')))
        with open(
                'result/pot/vocabularies/{}{}.jsonld'.format(
                    vocabulary_prefix,
                    underscore(class_triplet.subject.split('#')[1])),
                'w') as f:
            f.write(
                json.dumps(vocabulary_dict, indent=4, separators=(',', ': ')))

    graph = Graph().parse(
        'https://digitalliving.github.io/standards/ontologies/dli.jsonld',
        format='json-ld')

    class_triples = graph.triples(
        (None, URIRef('http://www.w3.org/1999/02/22-rdf-syntax-ns#type'),
         URIRef('{}ontologies/dli.jsonld#Class'.format(DLI_BASE))))
    found_classes = []
    for class_triplet in map(Triplet._make, list(class_triples)):
        if not class_triplet.subject in classes_to_parse:
            continue
        found_classes.append(class_triplet.subject)
        vocabulary_dict, vocabulary, exclude = build_vocabulary(
            graph,
            class_triplet,
            PATH_BASE=POT_BASE,
            BASE_VOCABULARY=BASE_VOCABULARY_DLI,
            context_key='dli',
            excludes=[])
        identity_dict = build_identity(graph,
                                       class_triplet,
                                       vocabulary,
                                       BASE_IDENTITY=BASE_IDENTITY_DLI,
                                       context_key='dli')

        with open(
                'result/dli/identities/identity-{}.jsonld'.format(
                    underscore(class_triplet.subject.split('#')[1])),
                'w') as f:
            f.write(
                json.dumps({'@context': identity_dict},
                           indent=4,
                           separators=(',', ': ')))
        with open(
                'result/dli/vocabularies/{}{}.jsonld'.format(
                    vocabulary_prefix,
                    underscore(class_triplet.subject.split('#')[1])),
                'w') as f:
            f.write(
                json.dumps(vocabulary_dict, indent=4, separators=(',', ': ')))
    with open('error.log', 'a+') as el:
        for class_to_parse in classes_to_parse:
            if class_to_parse not in found_classes:
                el.write('[{}] Class not found in DLI vocab: '.format(
                    datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")) +
                         str(class_to_parse) + '\n')
Example #54
0
def build_vocabulary(graph,
                     class_triplet,
                     PATH_BASE=POT_BASE,
                     BASE_VOCABULARY=BASE_VOCABULARY_POT,
                     context_key='pot',
                     excludes=None,
                     vocabulary_prefix=''):
    if class_triplet.subject in excludes:
        return None, None, True
    vocabulary_dict = deepcopy(BASE_VOCABULARY)
    class_key = class_triplet.subject.split('#')[1]
    vocabulary = '{}vocabularies/{}{}.jsonld#'.format(PATH_BASE,
                                                      vocabulary_prefix,
                                                      underscore(class_key))
    vocabulary_dict['@context']['vocab'] = vocabulary
    vocabulary_dict['@id'] = vocabulary[:-1]
    title, description = get_title_and_description(class_triplet.subject,
                                                   graph)
    total_attributes = []
    parents = list(
        map(
            Triplet._make,
            list(
                graph.triples(
                    (class_triplet.subject, URIRef(SUBCLASS_REF), None)))))
    while len(parents):
        tParents = []
        for parent in parents:
            if parent.object in excludes:
                return None, None, True  # If some parent are in exludes, we exclude whole vocab
            if parent.subject == parent.object:
                continue
            total_attributes += map(
                Triplet._make,
                list(
                    graph.triples(
                        (None,
                         URIRef('http://www.w3.org/2000/01/rdf-schema#domain'),
                         parent.object))))
            tParents += list(
                map(
                    Triplet._make,
                    list(
                        graph.triples(
                            (parent.object, URIRef(SUBCLASS_REF), None)))))
        parents = tParents.copy()

    total_attributes += map(
        Triplet._make,
        list(
            graph.triples(
                (None, URIRef('http://www.w3.org/2000/01/rdf-schema#domain'),
                 class_triplet.subject))))
    supported_class = {
        "@id": "{}:{}".format(context_key, class_key),
        "@type": "{}:{}".format(context_key, class_key),
        "dli:title": title,
        "dli:description": description,
    }
    supported_attributes = [
        {
            "@type": "{}:SupportedAttribute".format(context_key),
            "dli:attribute": "{}:name".format(context_key),
            "dli:title": "name",
            "dli:description": "name",
            "dli:required": True
        },
        {
            "@type": "{}:SupportedAttribute".format(context_key),
            "dli:attribute": "dli:data",
            "dli:title": "data",
            "dli:description": "data",
            "dli:required": True,
            "dli:valueType": "xsd:object"
        },
    ]
    for domain in total_attributes:
        key = domain.subject.split('#')[1]
        if key.lower() == 'name':
            continue
        title, description = get_title_and_description(domain.subject, graph)
        supported_types = get_supported_types(domain.subject, graph,
                                              context_key)
        supported_attribute = {
            "@type": "{}:SupportedAttribute".format(context_key),
            "dli:attribute": "{}:{}".format(context_key, key),
            "dli:title": title,
            "dli:description": description,
            "dli:required": False
        }
        if len(supported_types) > 0:
            supported_attribute['dli:valueType'] = supported_types
        if not next((attribute for attribute in supported_attributes
                     if attribute["dli:attribute"] ==
                     supported_attribute["dli:attribute"]), None):
            supported_attributes.append(supported_attribute)
    supported_class['{}:supportedAttribute'.format(
        context_key)] = supported_attributes
    vocabulary_dict['{}:supportedClass'.format(context_key)] = [
        supported_class,
    ]

    return vocabulary_dict, vocabulary, False
def snake(text):
    return inflection.underscore(text)
Example #56
0
def handler(context, event):

    # extract the stuff we need
    image_url = event.body.decode('utf-8').strip()
    key = os.environ.get('FACE_API_KEY')
    base_url = os.environ.get('FACE_API_BASE_URL')

    if key is None:
        context.logger.warn('Face API key not set, cannot continue')
        return _build_response(context,
                               'Function misconfigured: Face API key not set',
                               503)

    if base_url is None:
        context.logger.warn('Face API base URL not set, cannot continue')
        return _build_response(
            context, 'Function misconfigured: Face API base URL not set', 503)

    if not image_url:
        context.logger.warn('No URL given in request body')
        return _build_response(context, 'Image URL required', 400)

    # configure cognitive face wrapper
    cf.Key.set(key)
    cf.BaseUrl.set(base_url)

    # attempt to request using the provided info
    try:
        context.logger.info(
            'Requesting detection from Face API: {0}'.format(image_url))
        detected_faces = cf.face.detect(
            image_url,
            face_id=False,
            attributes='age,gender,glasses,smile,emotion')
    except Exception as error:
        context.logger.warn('Face API error occurred: {0}'.format(error))
        return _build_response(context, 'Face API error occurred', 503)

    parsed_faces = []

    # determine the center point of each detected face and map it to its attributes,
    # as well as clean up the retreived data for viewing comfort
    for face in detected_faces:
        coordinates = face['faceRectangle']
        attributes = face['faceAttributes']

        center_x = coordinates['left'] + coordinates['width'] / 2
        center_y = coordinates['top'] + coordinates['height'] / 2

        # determine the primary emotion based on its weighing
        primary_emotion = sorted(attributes['emotion'].items(),
                                 key=lambda item: item[1])[-1][0]

        parsed_face = {
            'x':
            center_x,
            'y':
            center_y,
            'position':
            '({0},{1})'.format(int(center_x), int(center_y)),
            'gender':
            inflection.humanize(attributes['gender']),
            'age':
            int(attributes['age']),
            'glasses':
            inflection.humanize(inflection.underscore(attributes['glasses'])),
            'primary_emotion':
            inflection.humanize(primary_emotion),
            'smile':
            '{0:.1f}%'.format(attributes['smile'] * 100),
        }

        parsed_faces.append(parsed_face)

    # sort according to center point, first x then y
    parsed_faces.sort(key=lambda face: (face['x'], face['y']))

    # prepare the data for tabulation
    first_row = ('', ) + tuple(face['position'] for face in parsed_faces)
    make_row = lambda name: (inflection.humanize(name), ) + tuple(
        face[name] for face in parsed_faces)

    other_rows = [
        make_row(name)
        for name in ['gender', 'age', 'primary_emotion', 'glasses', 'smile']
    ]

    # return the human-readable face data in a neat table format
    return _build_response(
        context,
        tabulate.tabulate([first_row] + other_rows,
                          headers='firstrow',
                          tablefmt='fancy_grid',
                          numalign='center',
                          stralign='center'), 200)
    def __init__(self, base, prefix=''):
        """
        Initialize the serializer.

        :param base: Declarative base instance
        :param namespace: The namespace of the API endpoint
        """
        self.base = base
        self.prefix = prefix
        self.models = {}
        for name, model in base._decl_class_registry.items():
            if name.startswith('_'):
                continue

            prepped_name = self._api_type_for_model(model)
            api_type = getattr(model, '__jsonapi_type_override__',
                               prepped_name)

            model_keys = set(model.__mapper__.all_orm_descriptors.keys())
            model_keys |= set(model.__mapper__.relationships.keys())

            model.__jsonapi_attribute_descriptors__ = {}
            model.__jsonapi_rel_desc__ = {}
            model.__jsonapi_permissions__ = {}
            model.__jsonapi_type__ = api_type
            model.__jsonapi_map_to_py__ = {
                dasherize(underscore(x)): x
                for x in model_keys
            }
            model.__jsonapi_map_to_api__ = {
                x: dasherize(underscore(x))
                for x in model_keys
            }

            for prop_name, prop_value in iterate_attributes(model):

                if hasattr(prop_value, '__jsonapi_desc_for_attrs__'):
                    defaults = {'get': None, 'set': None}
                    descriptors = model.__jsonapi_attribute_descriptors__
                    for attribute in prop_value.__jsonapi_desc_for_attrs__:
                        descriptors.setdefault(attribute, defaults)
                        attr_desc = descriptors[attribute]
                        for action in prop_value.__jsonapi_action__:
                            attr_desc[action] = prop_value

                if hasattr(prop_value, '__jsonapi_desc_for_rels__'):
                    defaults = {
                        'get': None,
                        'set': None,
                        'append': None,
                        'remove': None
                    }
                    rels_desc = model.__jsonapi_rel_desc__
                    for relationship in prop_value.__jsonapi_desc_for_rels__:
                        rels_desc.setdefault(relationship, defaults)
                        rel_desc = rels_desc[relationship]
                        for action in prop_value.__jsonapi_action__:
                            rel_desc[action] = prop_value

                if hasattr(prop_value, '__jsonapi_check_permission__'):
                    defaults = {
                        'view': [],
                        'create': [],
                        'edit': [],
                        'delete': [],
                        'remove': [],
                        'append': []
                    }
                    perm_obj = model.__jsonapi_permissions__
                    for check_for in prop_value.__jsonapi_chk_perm_for__:
                        perm_obj.setdefault(check_for, defaults)
                        perm_idv = perm_obj[check_for]
                        check_perms = prop_value.__jsonapi_check_permission__
                        for check_perm in check_perms:
                            perm_idv[check_perm] = prop_value
            self.models[model.__jsonapi_type__] = model
Example #58
0
 def postprocess(self, path, key, data):
     return inflection.underscore(key), data
Example #59
0
def to_snake_case(s: str) -> str:
    return inflection.underscore(s)
Example #60
0
 def __tablename__(self):
     return underscore(pluralize(self.__name__))