예제 #1
0
 def load_field_info(self, field_info):
     field_datatype = field_info["Type"]
     is_key = field_info['Key'] in ('P', 'U')
     field_name = field_info['Field']
     if field_datatype == "NUMBER" and (field_info['Precision'] == 0
                                        or field_info['Precision'] == None):
         return IntField(field_name, is_key=is_key)
     elif field_datatype == 'NUMBER' and field_info['Precision'] > 0:
         return DecimalField(field_name, is_key=is_key)
     elif field_datatype == 'NVARCHAR2':
         return StringField(field_name, is_key=is_key)
     elif field_datatype == 'VARCHAR2':
         return StringField(field_name, is_key=is_key)
     elif field_datatype == 'FLOAT':
         return DecimalField(field_name, is_key=is_key)
     elif field_datatype == 'CHAR':
         return StringField(field_name, is_key=is_key)
     elif field_datatype == 'TIMESTAMP(6)':
         return DatetimeField(field_name, is_key=is_key)
     elif field_datatype == 'CLOB':
         return StringField(field_name, is_key=is_key)
     elif field_datatype == 'DATE':
         return DatetimeField(field_name, is_key=is_key)
     else:
         raise Exception('Unsupportted type ' + field_datatype)
예제 #2
0
파일: gui.py 프로젝트: JoselleAstrid/fgx-re
    def read_fields_from_spec(self):
        # Ordered field list. Determines the order we read in fields from
        # the input file, and the order we display widgets to view/edit
        # the field values.
        # Fields can have child fields too, so it's a tree structure.
        # The most common kind of parent-child relationship is when there's an
        # array, and each element in that array is a struct of multiple fields.
        self.fields = []

        # Read field specifications from CSV, one field per row.
        # We'll read in each field as a dict. The first row in the CSV file
        # specifies the dict keys.
        # Here we are automatically assuming a replay file, but maybe we'll
        # detect and support other GCI types like ghosts later.
        with open('replay_fields.csv', 'r') as f:

            for field_spec in csv.DictReader(f):

                try:
                    if field_spec['data_type'] == '':
                        raise FieldSpecError(
                            "One of the fields is missing a data_type.")
                    field_type = field_spec['data_type']

                    if field_type == 'array':
                        field = ArrayField(field_spec, QComboBoxWidget())
                    elif field_type == 'dict':
                        field = DictField(field_spec)
                    elif field_type == 'float':
                        field = FloatField(field_spec, QLineEditWidget())
                    elif field_type == 'hex':
                        field = HexField(field_spec, QLineEditWidget())
                    elif field_type == 'hex_long_read_only':
                        # We don't support editable QTextEdits, because
                        # QTextEdit doesn't have a callback that only responds
                        # to user edits; it responds to any function that edits
                        # the text content. This results in an infinite loop
                        # during the text -> value -> text back-and-forth
                        # writing that we do to ensure consistent text format.
                        field = LongHexField(field_spec,
                                             QTextEditReadOnlyWidget())
                    elif field_type == 'int':
                        field = IntField(field_spec, QLineEditWidget())
                    else:
                        raise FieldSpecError(
                            "One of the fields has an unsupported data_type:"
                            f" {field_type}")

                except FieldSpecError as e:
                    self.main_gui.display_error(f"Fields CSV error: {e}")
                    return False

                # Add top-level fields to self.fields
                if not field.parent:
                    self.fields.append(field)

        return True
예제 #3
0
파일: model.py 프로젝트: frolkin28/my_orm
    def __new__(cls, name, parents, props):
        props['pk'] = IntField(primary_key=True)

        for field_name, field in props.items():
            if type(field) in [
                    IntField, VarcharField, FloatField, TextField, BooleanField
            ]:
                field._name = field_name  # for each Field object add _name

        if '__tablename__' in props.keys():
            props['query'] = Query(props['__tablename__'])

        return type.__new__(cls, name, parents, props)
예제 #4
0
class User(Model):
    name = StringField(required=True)
    description = StringField()
    date_added = DateField()
    age = IntField()
    coins = FloatField()
    is_superuser = BooleanField()

    def __str__(self):
        return 'User {}'.format(self.name, self.age)

    def __repr__(self):
        return '<User {}>'.format(self.name, self.age)

    def update(self):
        pass

    class Meta:
        table_name = 'ormtable'
        order_by = ('name', )
예제 #5
0
    def __new__(cls, name, bases, attrs):
        super_new = super(TopLevelDocumentMetaclass, cls).__new__
        # Classes defined in this package are abstract and should not have
        # their own metadata with DB collection, etc.
        # __metaclass__ is only set on the class with the __metaclass__
        # attribute (i.e. it is not set on subclasses). This differentiates
        # 'real' documents from the 'Document' class
        if attrs.get("__metaclass__") == TopLevelDocumentMetaclass:
            return super_new(cls, name, bases, attrs)

        collection = name.lower()

        id_field = None
        base_indexes = []
        base_meta = {}

        # Subclassed documents inherit collection from superclass
        for base in bases:
            if hasattr(base, "_meta") and "collection" in base._meta:
                collection = base._meta["collection"]

                # Propagate index options.
                for key in ("index_background", "index_drop_dups", "index_opts"):
                    if key in base._meta:
                        base_meta[key] = base._meta[key]

                id_field = id_field or base._meta.get("id_field")
                base_indexes += base._meta.get("indexes", [])

        meta = {
            "collection": collection,
            "max_documents": None,
            "max_size": None,
            "ordering": [],  # default ordering applied at runtime
            "indexes": [],  # indexes to be ensured at runtime
            "id_field": id_field,
            "index_background": True,
            "index_drop_dups": False,
            "index_opts": {},
            "queryset_class": QuerySet,
            "db_name": None,
            "force_insert": False,
            "hash_field": None,
            "hash_db_field": "_h",
            "sharded": True,
            "write_concern": 1,
        }
        meta.update(base_meta)

        # Apply document-defined meta options
        meta.update(attrs.get("meta", {}))
        attrs["_meta"] = meta

        # Set up collection manager, needs the class to have fields so use
        # DocumentMetaclass before instantiating CollectionManager object
        new_class = super_new(cls, name, bases, attrs)

        # Provide a default queryset unless one has been manually provided
        if not hasattr(new_class, "objects"):
            new_class.objects = QuerySetManager()

        user_indexes = [QuerySet._build_index_spec(new_class, spec) for spec in meta["indexes"]] + base_indexes
        new_class._meta["indexes"] = user_indexes

        unique_indexes = []
        for field_name, field in new_class._fields.items():
            # Generate a list of indexes needed by uniqueness constraints
            if field.unique:
                field.required = True
                unique_fields = [field.db_field]

                # Add any unique_with fields to the back of the index spec
                if field.unique_with:
                    if isinstance(field.unique_with, basestring):
                        field.unique_with = [field.unique_with]

                    # Convert unique_with field names to real field names
                    unique_with = []
                    for other_name in field.unique_with:
                        parts = other_name.split(".")
                        # Lookup real name
                        parts = QuerySet._lookup_field(new_class, parts)
                        name_parts = [part.db_field for part in parts]
                        unique_with.append(".".join(name_parts))
                        # Unique field should be required
                        parts[-1].required = True
                    unique_fields += unique_with

                # Add the new index to the list
                index = [(f, pymongo.ASCENDING) for f in unique_fields]
                unique_indexes.append(index)

            # Check for custom primary key
            if field.primary_key:
                current_pk = new_class._meta["id_field"]
                if current_pk and current_pk != field_name:
                    raise ValueError("Cannot override primary key field")

                if not current_pk:
                    new_class._meta["id_field"] = field_name
                    # Make 'Document.id' an alias to the real primary key field
                    new_class.id = field

        new_class._meta["unique_indexes"] = unique_indexes

        if not new_class._meta["id_field"]:
            new_class._meta["id_field"] = "id"
            id_field = ObjectIdField(db_field="_id")
            id_field.name = "id"
            id_field.primary_key = True
            id_field.required = False
            new_class._fields["id"] = id_field
            new_class.id = new_class._fields["id"]

        if meta["hash_field"]:
            assert "shard_hash" not in new_class._fields, "You already have a shard hash"

            assert meta["hash_field"] in new_class._fields, "The field you want to hash doesn't exist"

            from fields import IntField

            field = IntField(db_field=meta["hash_db_field"], required=True)
            new_class._fields["shard_hash"] = field
            field.owner_document = new_class
            new_class.shard_hash = field

        return new_class
예제 #6
0
def IntInputRow(title, *args, **kw):
    return Row(
        (Label(title,
               tooltipText=kw.get('tooltipText')), IntField(*args, **kw)))
예제 #7
0
    def __new__(cls, name, bases, attrs):
        super_new = super(TopLevelDocumentMetaclass, cls).__new__
        # Classes defined in this package are abstract and should not have
        # their own metadata with DB collection, etc.
        # __metaclass__ is only set on the class with the __metaclass__
        # attribute (i.e. it is not set on subclasses). This differentiates
        # 'real' documents from the 'Document' class
        if attrs.get('__metaclass__') == TopLevelDocumentMetaclass:
            return super_new(cls, name, bases, attrs)

        collection = name.lower()

        id_field = None
        base_indexes = []
        base_meta = {}

        # Subclassed documents inherit collection from superclass
        for base in bases:
            if hasattr(base, '_meta') and 'collection' in base._meta:
                collection = base._meta['collection']

                # Propagate index options.
                for key in ('index_background', 'index_drop_dups',
                            'index_opts'):
                    if key in base._meta:
                        base_meta[key] = base._meta[key]

                id_field = id_field or base._meta.get('id_field')
                base_indexes += base._meta.get('indexes', [])

        meta = {
            'collection': collection,
            'max_documents': None,
            'max_size': None,
            'ordering': [],  # default ordering applied at runtime
            'indexes': [],  # indexes to be ensured at runtime
            'id_field': id_field,
            'index_background': True,
            'index_drop_dups': False,
            'index_opts': {},
            'queryset_class': QuerySet,
            'db_name': None,
            'force_insert': False,
            'hash_field': None,
            'hash_db_field': '_h',
            'sharded': True,
            'write_concern': 1
        }
        meta.update(base_meta)

        # Apply document-defined meta options
        meta.update(attrs.get('meta', {}))
        attrs['_meta'] = meta

        # Set up collection manager, needs the class to have fields so use
        # DocumentMetaclass before instantiating CollectionManager object
        new_class = super_new(cls, name, bases, attrs)

        # Check if trying to use sweeper db; default to unsharded DB which has
        # unsharded shard as default primary to reduce load
        if hasattr(new_class, 'meta') and 'abstract' not in new_class.meta and\
                'shard_key' not in new_class.meta:
            import mongoengine.connection
            import inspect
            if mongoengine.connection._default_db == 'sweeper' and 'clroot/sweeper' in inspect.getfile(
                    new_class):
                new_class.meta['shard_key'] = False
                new_class._meta['db_name'] = 'sweeper-unsharded'

        # Provide a default queryset unless one has been manually provided
        if not hasattr(new_class, 'objects'):
            new_class.objects = QuerySetManager()

        user_indexes = [
            QuerySet._build_index_spec(new_class, spec)
            for spec in meta['indexes']
        ] + base_indexes
        new_class._meta['indexes'] = user_indexes

        unique_indexes = []
        for field_name, field in new_class._fields.items():
            # Generate a list of indexes needed by uniqueness constraints
            if field.unique:
                field.required = True
                unique_fields = [field.db_field]

                # Add any unique_with fields to the back of the index spec
                if field.unique_with:
                    if isinstance(field.unique_with, basestring):
                        field.unique_with = [field.unique_with]

                    # Convert unique_with field names to real field names
                    unique_with = []
                    for other_name in field.unique_with:
                        parts = other_name.split('.')
                        # Lookup real name
                        parts = QuerySet._lookup_field(new_class, parts)
                        name_parts = [part.db_field for part in parts]
                        unique_with.append('.'.join(name_parts))
                        # Unique field should be required
                        parts[-1].required = True
                    unique_fields += unique_with

                # Add the new index to the list
                index = [(f, pymongo.ASCENDING) for f in unique_fields]
                unique_indexes.append(index)

            # Check for custom primary key
            if field.primary_key:
                current_pk = new_class._meta['id_field']
                if current_pk and current_pk != field_name:
                    raise ValueError('Cannot override primary key field')

                if not current_pk:
                    new_class._meta['id_field'] = field_name
                    # Make 'Document.id' an alias to the real primary key field
                    new_class.id = field

        new_class._meta['unique_indexes'] = unique_indexes

        if not new_class._meta['id_field']:
            new_class._meta['id_field'] = 'id'
            id_field = ObjectIdField(db_field='_id')
            id_field.name = 'id'
            id_field.primary_key = True
            id_field.required = False
            new_class._fields['id'] = id_field
            new_class.id = new_class._fields['id']

        if meta['hash_field']:
            assert 'shard_hash' not in new_class._fields, \
                    "You already have a shard hash"

            assert meta['hash_field'] in new_class._fields, \
                    "The field you want to hash doesn't exist"

            from fields import IntField

            field = IntField(db_field=meta['hash_db_field'], required=True)
            new_class._fields['shard_hash'] = field
            field.owner_document = new_class
            new_class.shard_hash = field

        return new_class
예제 #8
0
파일: test.py 프로젝트: frolkin28/my_orm
class MyModel(Model):
    __tablename__ = 'test'
    a = IntField(unique=True)
    b = VarcharField(num_char=255)
    c = FloatField()
예제 #9
0
    def __new__(cls, name, bases, attrs):
        super_new = super(TopLevelDocumentMetaclass, cls).__new__
        # Classes defined in this package are abstract and should not have
        # their own metadata with DB collection, etc.
        # __metaclass__ is only set on the class with the __metaclass__
        # attribute (i.e. it is not set on subclasses). This differentiates
        # 'real' documents from the 'Document' class
        if attrs.get('__metaclass__') == TopLevelDocumentMetaclass:
            return super_new(cls, name, bases, attrs)

        collection = name.lower()

        id_field = None
        base_indexes = []
        base_meta = {}

        # Subclassed documents inherit collection from superclass
        for base in bases:
            if hasattr(base, '_meta') and 'collection' in base._meta:
                collection = base._meta['collection']

                # Propagate index options.
                for key in ('index_background', 'index_drop_dups', 'index_opts'):
                   if key in base._meta:
                      base_meta[key] = base._meta[key]

                id_field = id_field or base._meta.get('id_field')
                base_indexes += base._meta.get('indexes', [])

        meta = {
            'collection': collection,
            'max_documents': None,
            'max_size': None,
            'ordering': [], # default ordering applied at runtime
            'indexes': [], # indexes to be ensured at runtime
            'id_field': id_field,
            'index_background': True,
            'index_drop_dups': False,
            'index_opts': {},
            'queryset_class': QuerySet,
            'db_name': None,

            'force_insert': False,

            'hash_field': None,
            'hash_db_field': '_h',
            'sharded': True,

            'write_concern': 1
        }
        meta.update(base_meta)

        # Apply document-defined meta options
        meta.update(attrs.get('meta', {}))
        attrs['_meta'] = meta

        # Set up collection manager, needs the class to have fields so use
        # DocumentMetaclass before instantiating CollectionManager object
        new_class = super_new(cls, name, bases, attrs)

        # Check if trying to use sweeper db; default to unsharded DB which has
        # unsharded shard as default primary to reduce load
        if hasattr(new_class, 'meta') and 'abstract' not in new_class.meta and\
                'shard_key' not in new_class.meta:
            import mongoengine.connection
            import inspect
            if mongoengine.connection._default_db == 'sweeper' and 'clroot/sweeper' in inspect.getfile(new_class):
                new_class.meta['shard_key'] = False
                new_class._meta['db_name'] = 'sweeper-unsharded'

        # Provide a default queryset unless one has been manually provided
        if not hasattr(new_class, 'objects'):
            new_class.objects = QuerySetManager()

        user_indexes = [QuerySet._build_index_spec(new_class, spec)
                        for spec in meta['indexes']] + base_indexes
        new_class._meta['indexes'] = user_indexes

        unique_indexes = []
        for field_name, field in new_class._fields.items():
            # Generate a list of indexes needed by uniqueness constraints
            if field.unique:
                field.required = True
                unique_fields = [field.db_field]

                # Add any unique_with fields to the back of the index spec
                if field.unique_with:
                    if isinstance(field.unique_with, basestring):
                        field.unique_with = [field.unique_with]

                    # Convert unique_with field names to real field names
                    unique_with = []
                    for other_name in field.unique_with:
                        parts = other_name.split('.')
                        # Lookup real name
                        parts = QuerySet._lookup_field(new_class, parts)
                        name_parts = [part.db_field for part in parts]
                        unique_with.append('.'.join(name_parts))
                        # Unique field should be required
                        parts[-1].required = True
                    unique_fields += unique_with

                # Add the new index to the list
                index = [(f, pymongo.ASCENDING) for f in unique_fields]
                unique_indexes.append(index)

            # Check for custom primary key
            if field.primary_key:
                current_pk = new_class._meta['id_field']
                if current_pk and current_pk != field_name:
                    raise ValueError('Cannot override primary key field')

                if not current_pk:
                    new_class._meta['id_field'] = field_name
                    # Make 'Document.id' an alias to the real primary key field
                    new_class.id = field

        new_class._meta['unique_indexes'] = unique_indexes

        if not new_class._meta['id_field']:
            new_class._meta['id_field'] = 'id'
            id_field = ObjectIdField(db_field='_id')
            id_field.name = 'id'
            id_field.primary_key = True
            id_field.required = False
            new_class._fields['id'] = id_field
            new_class.id = new_class._fields['id']

        if meta['hash_field']:
            assert 'shard_hash' not in new_class._fields, \
                    "You already have a shard hash"

            assert meta['hash_field'] in new_class._fields, \
                    "The field you want to hash doesn't exist"

            from fields import IntField

            field = IntField(db_field=meta['hash_db_field'], required=True)
            new_class._fields['shard_hash'] = field
            field.owner_document = new_class
            new_class.shard_hash = field

        return new_class
예제 #10
0
    def __new__(cls, name, bases, attrs):
        super_new = super(TopLevelDocumentMetaclass, cls).__new__
        # Classes defined in this package are abstract and should not have
        # their own metadata with DB collection, etc.
        # __metaclass__ is only set on the class with the __metaclass__
        # attribute (i.e. it is not set on subclasses). This differentiates
        # 'real' documents from the 'Document' class
        if attrs.get('__metaclass__') == TopLevelDocumentMetaclass:
            return super_new(cls, name, bases, attrs)

        collection = name.lower()

        id_field = None
        base_indexes = []
        base_meta = {}

        # Subclassed documents inherit collection from superclass
        for base in bases:
            if hasattr(base, '_meta') and 'collection' in base._meta:
                collection = base._meta['collection']

                # Propagate index options.
                for key in ('index_background', 'index_drop_dups',
                            'index_opts'):
                    if key in base._meta:
                        base_meta[key] = base._meta[key]

                id_field = id_field or base._meta.get('id_field')
                base_indexes += base._meta.get('indexes', [])

        meta = {
            'collection': collection,
            'max_documents': None,
            'max_size': None,
            'ordering': [],  # default ordering applied at runtime
            'indexes': [],  # indexes to be ensured at runtime
            'id_field': id_field,
            'index_background': True,
            'index_drop_dups': False,
            'index_opts': {},
            'db_name': None,
            'force_insert': False,
            'hash_field': None,
            'hash_db_field': '_h',
            'sharded': True,
            'write_concern': 1
        }
        meta.update(base_meta)

        # Apply document-defined meta options
        meta.update(attrs.get('meta', {}))
        attrs['_meta'] = meta

        # Set up collection manager, needs the class to have fields so use
        # DocumentMetaclass before instantiating CollectionManager object
        new_class = super_new(cls, name, bases, attrs)

        # Check if trying to use sweeper db; default to unsharded DB which has
        # unsharded shard as default primary to reduce load
        if hasattr(new_class, 'meta') and 'abstract' not in new_class.meta and\
                'shard_key' not in new_class.meta:
            import mongoengine.connection
            import inspect
            if mongoengine.connection._default_db == 'sweeper' and 'clroot/sweeper' in inspect.getfile(
                    new_class):
                new_class.meta['shard_key'] = False
                new_class._meta['db_name'] = 'sweeper-unsharded'

        for field_name, field in new_class._fields.items():
            if field.primary_key:
                current_pk = new_class._meta['id_field']
                if current_pk and current_pk != field_name:
                    raise ValueError('Cannot override primary key field')

                if not current_pk:
                    new_class._meta['id_field'] = field_name
                    # Make 'Document.id' an alias to the real primary key field
                    new_class.id = field

        if not new_class._meta['id_field']:
            new_class._meta['id_field'] = 'id'
            id_field = ObjectIdField(db_field='_id')
            id_field.name = 'id'
            id_field.primary_key = True
            id_field.required = False
            new_class._fields['id'] = id_field
            new_class.id = new_class._fields['id']

        if meta['hash_field']:
            assert 'shard_hash' not in new_class._fields, \
                "You already have a shard hash"

            assert meta['hash_field'] in new_class._fields, \
                "The field you want to hash doesn't exist"

            from fields import IntField

            field = IntField(db_field=meta['hash_db_field'], required=True)
            new_class._fields['shard_hash'] = field
            field.owner_document = new_class
            new_class.shard_hash = field

        return new_class