Exemplo n.º 1
0
    def test_fk_copy(self):
        c1 = Column('foo', Integer)
        c2 = Column('bar', Integer)
        m = MetaData()
        t1 = Table('t', m, c1, c2)

        kw = dict(onupdate="X", 
                        ondelete="Y", use_alter=True, name='f1',
                        deferrable="Z", initially="Q", link_to_name=True)

        fk1 = ForeignKey(c1, **kw) 
        fk2 = ForeignKeyConstraint((c1,), (c2,), **kw)

        t1.append_constraint(fk2)
        fk1c = fk1.copy()
        fk2c = fk2.copy()

        for k in kw:
            eq_(getattr(fk1c, k), kw[k])
            eq_(getattr(fk2c, k), kw[k])
Exemplo n.º 2
0
UnApprovedRolesSelect = PersonRolesTable.select(and_(
    PeopleTable.c.id==PersonRolesTable.c.person_id,
    PersonRolesTable.c.role_status!='approved')).alias('unapproved')

# The identity schema -- These must follow some conventions that TG
# understands and are shared with other Fedora services via the python-fedora
# module.

visits_table = Table('visit', metadata,
    Column('visit_key', String(40), primary_key=True),
    Column('created', DateTime, nullable=False, default=datetime.now(pytz.utc)),
    Column('expiry', DateTime)
)

visit_identity_table = Table('visit_identity', metadata,
    Column('visit_key', String(40), ForeignKey('visit.visit_key'),
        primary_key=True),
    Column('user_id', Integer, ForeignKey('people.id'), index=True),
    Column('ssl', Boolean)
)

serial_seq = Sequence('serial_seq')

#
# Mapped Classes
#

admin_group = config.get('admingroup', 'accounts')
system_group = config.get('systemgroup', 'fas-system')
thirdparty_group = config.get('thirdpartygroup', 'thirdparty')
Exemplo n.º 3
0
class TableInformeDireccione(Base, GenericTable):
    __tablename__ = "TableInformeDirecciones"

    idInforme = Column(ForeignKey("TableInforme.id"), primary_key=True, nullable=False)
    idUsuario = Column(ForeignKey("TableUsuario.id"), primary_key=True, nullable=False)
    directorio = Column(Text)

    TableInforme = relationship("TableInforme")
    TableUsuario = relationship("TableUsuario")


t_TableRelInfCau = Table(
    "TableRelInfCau",
    metadata,
    Column(
        "idInforme", ForeignKey("TableInforme.id"), primary_key=True, nullable=False
    ),
    Column(
        "idCausa", ForeignKey("TableCausa.idCausa"), primary_key=True, nullable=False
    ),
)


class TableRelEscObj(Base, GenericTable):
    __tablename__ = "TableRelEscObj"

    idCausa = Column(
        ForeignKey("TableEscrito.idCausa"), primary_key=True, nullable=False
    )
    idEscrito = Column(
        ForeignKey("TableEscrito.idEscrito"), primary_key=True, nullable=False
Exemplo n.º 4
0
class SqlRun(Base):
    """
    DB model for :py:class:`mlflow.entities.Run`. These are recorded in ``runs`` table.
    """
    __tablename__ = 'runs'

    run_uuid = Column(String(32), nullable=False)
    """
    Run UUID: `String` (limit 32 characters). *Primary Key* for ``runs`` table.
    """
    name = Column(String(250))
    """
    Run name: `String` (limit 250 characters).
    """
    source_type = Column(String(20),
                         default=SourceType.to_string(SourceType.LOCAL))
    """
    Source Type: `String` (limit 20 characters). Can be one of ``NOTEBOOK``, ``JOB``, ``PROJECT``,
                 ``LOCAL`` (default), or ``UNKNOWN``.
    """
    source_name = Column(String(500))
    """
    Name of source recording the run: `String` (limit 500 characters).
    """
    entry_point_name = Column(String(50))
    """
    Entry-point name that launched the run run: `String` (limit 50 characters).
    """
    user_id = Column(String(256), nullable=True, default=None)
    """
    User ID: `String` (limit 256 characters). Defaults to ``null``.
    """
    status = Column(String(20),
                    default=RunStatus.to_string(RunStatus.SCHEDULED))
    """
    Run Status: `String` (limit 20 characters). Can be one of ``RUNNING``, ``SCHEDULED`` (default),
                ``FINISHED``, ``FAILED``.
    """
    start_time = Column(BigInteger, default=int(time.time()))
    """
    Run start time: `BigInteger`. Defaults to current system time.
    """
    end_time = Column(BigInteger, nullable=True, default=None)
    """
    Run end time: `BigInteger`.
    """
    source_version = Column(String(50))
    """
    Source version: `String` (limit 50 characters).
    """
    lifecycle_stage = Column(String(20), default=LifecycleStage.ACTIVE)
    """
    Lifecycle Stage of run: `String` (limit 32 characters).
                            Can be either ``active`` (default) or ``deleted``.
    """
    artifact_uri = Column(String(200), default=None)
    """
    Default artifact location for this run: `String` (limit 200 characters).
    """
    experiment_id = Column(Integer, ForeignKey('experiments.experiment_id'))
    """
    Experiment ID to which this run belongs to: *Foreign Key* into ``experiment`` table.
    """
    experiment = relationship('SqlExperiment',
                              backref=backref('runs', cascade='all'))
    """
    SQLAlchemy relationship (many:one) with :py:class:`mlflow.store.dbmodels.models.SqlExperiment`.
    """

    __table_args__ = (CheckConstraint(source_type.in_(SourceTypes),
                                      name='source_type'),
                      CheckConstraint(status.in_(RunStatusTypes),
                                      name='status'),
                      CheckConstraint(lifecycle_stage.in_(
                          LifecycleStage.view_type_to_stages(ViewType.ALL)),
                                      name='runs_lifecycle_stage'),
                      PrimaryKeyConstraint('run_uuid', name='run_pk'))

    @staticmethod
    def get_attribute_name(mlflow_attribute_name):
        """
        Resolves an MLflow attribute name to a `SqlRun` attribute name.
        """
        # Currently, MLflow Search attributes defined in `SearchUtils.VALID_SEARCH_ATTRIBUTE_KEYS`
        # share the same names as their corresponding `SqlRun` attributes. Therefore, this function
        # returns the same attribute name
        return mlflow_attribute_name

    def to_mlflow_entity(self):
        """
        Convert DB model to corresponding MLflow entity.

        :return: :py:class:`mlflow.entities.Run`.
        """
        run_info = RunInfo(run_uuid=self.run_uuid,
                           run_id=self.run_uuid,
                           experiment_id=str(self.experiment_id),
                           user_id=self.user_id,
                           status=self.status,
                           start_time=self.start_time,
                           end_time=self.end_time,
                           lifecycle_stage=self.lifecycle_stage,
                           artifact_uri=self.artifact_uri)

        run_data = RunData(
            metrics=[m.to_mlflow_entity() for m in self.latest_metrics],
            params=[p.to_mlflow_entity() for p in self.params],
            tags=[t.to_mlflow_entity() for t in self.tags])

        return Run(run_info=run_info, run_data=run_data)
class B(Base):
    __tablename__ = "b"
    id = Column(Integer, primary_key=True)
    a_id = Column(ForeignKey("a.id"))
    data = Column(String)
Exemplo n.º 6
0
def candidate_subclass(class_name, args, table_name=None, cardinality=None,
    values=None):
    """
    Creates and returns a Candidate subclass with provided argument names,
    which are Context type. Creates the table in DB if does not exist yet.

    Import using:

    .. code-block:: python

        from smartmendr.ensemble_learner.models import candidate_subclass

    :param class_name: The name of the class, should be "camel case" e.g.
        NewCandidate
    :param args: A list of names of consituent arguments, which refer to the
        Contexts--representing mentions--that comprise the candidate
    :param table_name: The name of the corresponding table in DB; if not
        provided, is converted from camel case by default, e.g. new_candidate
    :param cardinality: The cardinality of the variable corresponding to the
        Candidate. By default is 2 i.e. is a binary value, e.g. is or is not
        a true mention.
    """
    if table_name is None:
        table_name = camel_to_under(class_name)

    # If cardinality and values are None, default to binary classification
    if cardinality is None and values is None:
        values = [True, False]
        cardinality = 2

    # Else use values if present, and validate proper input
    elif values is not None:
        if cardinality is not None and len(values) != cardinality:
            raise ValueError("Number of values must match cardinality.")
        if None in values:
            raise ValueError("`None` is a protected value.")
        # Note that bools are instances of ints in Python...
        if any([isinstance(v, int) and not isinstance(v, bool) for v in values]):
            raise ValueError("Default usage of values is consecutive integers. Leave values unset if attempting to define values as integers.")
        cardinality = len(values)

    # If cardinality is specified but not values, fill in with ints
    elif cardinality is not None:
        values = list(range(cardinality))

    class_spec = (args, table_name, cardinality, values)
    if class_name in candidate_subclasses:
        if class_spec == candidate_subclasses[class_name][1]:
            return candidate_subclasses[class_name][0]
        else:
            raise ValueError('Candidate subclass ' + class_name + ' already exists in memory with incompatible ' +
                             'specification: ' + str(candidate_subclasses[class_name][1]))
    else:
        # Set the class attributes == the columns in the database
        class_attribs = {

            # Declares name for storage table
            '__tablename__' : table_name,

            # Connects candidate_subclass records to generic Candidate records
            'id' : Column(
                Integer,
                ForeignKey('candidate.id', ondelete='CASCADE'),
                primary_key=True
            ),

            # Store values & cardinality information in the class only
            'values' : values,
            'cardinality' : cardinality,

            # Polymorphism information for SQLAlchemy
            '__mapper_args__' : {'polymorphic_identity': table_name},

            # Helper method to get argument names
            '__argnames__' : args,
        }

        # Create named arguments, i.e. the entity mentions comprising the relation
        # mention
        # For each entity mention: id, cid ("canonical id"), and pointer to Context
        unique_args = []
        for arg in args:

            # Primary arguments are constituent Contexts, and their ids
            class_attribs[arg + '_id'] = Column(
                Integer, ForeignKey('context.id', ondelete='CASCADE'), index=True)
            class_attribs[arg] = relationship(
                'Context',
                backref=backref(
                    table_name + '_' + arg + 's',
                    cascade_backrefs=False,
                    cascade='all, delete-orphan'
                ),
                cascade_backrefs=False,
                foreign_keys=class_attribs[arg + '_id']
            )
            unique_args.append(class_attribs[arg + '_id'])

            # Canonical ids, to be set post-entity normalization stage
            class_attribs[arg + '_cid'] = Column(String)

        # Add unique constraints to the arguments
        class_attribs['__table_args__'] = (
            UniqueConstraint(*unique_args),
        )

        # Create class
        C = type(class_name, (Candidate,), class_attribs)

        # Create table in DB
        if not smartmendr_engine.dialect.has_table(smartmendr_engine, table_name):
            C.__table__.create(bind=smartmendr_engine)

        candidate_subclasses[class_name] = C, class_spec

        return C
Exemplo n.º 7
0
    college_id = Column(Integer, ForeignKey('college.id'), nullable=False)  #学院
    college = relationship("College")
    department_id = Column(Integer,
                           ForeignKey('department.id'),
                           nullable=False)  #部门
    department = relationship("Department")
    tel_num = Column(String(30), unique=True, nullable=False)  #电话号
    birthday = Column(Date)

    def __repr__(self):
        return self.name


assoc_teacher_student = Table(
    'teacher_student', Model.metadata, Column('id', Integer, primary_key=True),
    Column('teacher_id', Integer, ForeignKey('teacher.id')),
    Column('student_id', Integer, ForeignKey('student.id')))


# 学生
class Student(Model):
    id = Column(Integer, primary_key=True)
    stu_num = Column(String(30), unique=True, nullable=False)  #学号
    name = Column(String(50), nullable=False)
    college_id = Column(Integer, ForeignKey('college.id'), nullable=False)
    college = relationship("College")
    major_id = Column(Integer, ForeignKey('major.id'), nullable=False)
    major = relationship("Major")
    mclass_id = Column(Integer, ForeignKey('mclass.id'), nullable=False)
    mclass = relationship("MClass")
    teachers = relationship('Teacher',
Exemplo n.º 8
0
from typing import Any

from sqlalchemy import Column, Integer, String, Text, ForeignKey, Table
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, backref, Session


Base = declarative_base()

category_association = Table('category_association', Base.metadata,
                             Column('page_id', Integer, ForeignKey('pages.id')),
                             Column('category_id', Integer, ForeignKey('categories.id'))
                             )


class WikiPage(Base):
    __tablename__ = 'pages'
    id = Column(Integer, primary_key=True)
    revision_id = Column(Integer)
    latest_revision_online = Column(Integer)
    content = Column(Text)
    title = Column(String(64))
    redirect_to_id = Column(Integer, ForeignKey('pages.id'))
    redirect_from = relationship('WikiPage', backref=backref('redirect_to', remote_side=[id]))
    categories = relationship('Category', secondary=category_association, backref='pages')

    def __str__(self) -> str:
        return "(id={}, title='{}', revision_id={}, latest_revision_online={}, " \
               "redirect_from={}, " \
               "redirect_to_id={}, categories={}, " \
               "content='{}')" \
Exemplo n.º 9
0
Arquivo: db.py Projeto: theonlyx/xnote
__all__ = ['TAGS', 'NOTES', 'NOTE_TAGS', 'create_tables']

meta = MetaData()

TAGS = Table(
    'tags', meta,
    Column('id', Integer, primary_key=True, autoincrement=True),
    Column('tag', String, nullable=False),
    UniqueConstraint('tag')
)

NOTES = Table(
    'notes', meta,
    Column('id', Integer, primary_key=True, autoincrement=True),
    Column('time', Integer, ForeignKey('users.id'), nullable=False),
    Column('time', DateTime, nullable=False, default=func.now()),
    Column('title', String, nullable=False),
    Column('body', String, nullable=False)
)

NOTE_TAGS = Table(
    'note_tags', meta,
    Column('note_id', Integer, ForeignKey('notes.id'), nullable=False),
    Column('tag_id', Integer, ForeignKey('tags.id'), nullable=False),
)


def create_tables(engine):
    meta.create_all(engine)
Exemplo n.º 10
0
        return self.name


class Benefit(Model):
    id = Column(Integer, primary_key=True)
    name = Column(String(50), unique=True, nullable=False)

    def __repr__(self):
        return self.name


assoc_benefits_employee = Table(
    'benefits_employee', Model.metadata, Column('id',
                                                Integer,
                                                primary_key=True),
    Column('benefit_id', Integer, ForeignKey('benefit.id')),
    Column('employee_id', Integer, ForeignKey('employee.id')))


def today():
    return datetime.datetime.today().strftime('%Y-%m-%d')


class EmployeeHistory(Model):
    id = Column(Integer, primary_key=True)
    department_id = Column(Integer,
                           ForeignKey('department.id'),
                           nullable=False)
    department = relationship("Department")
    employee_id = Column(Integer, ForeignKey('employee.id'), nullable=False)
    employee = relationship("Employee")
Exemplo n.º 11
0
class AvroSchema(Base, BaseModel):

    __tablename__ = 'avro_schema'

    id = Column(Integer, primary_key=True)

    # The JSON string representation of the avro schema.
    avro_schema = Column('avro_schema', Text, nullable=False)

    # Id of the topic that the schema is associated to.
    # It is a foreign key to Topic table.
    topic_id = Column(
        Integer,
        ForeignKey('topic.id'),
        nullable=False
    )

    # The schema_id where this schema is derived from.
    base_schema_id = Column(Integer, ForeignKey('avro_schema.id'))

    # Schema status: RW (read/write), R (read-only), Disabled
    status = Column(
        Enum(
            AvroSchemaStatus.READ_AND_WRITE,
            AvroSchemaStatus.READ_ONLY,
            AvroSchemaStatus.DISABLED,
            name='status'
        ),
        default=AvroSchemaStatus.READ_AND_WRITE,
        nullable=False
    )

    # Timestamp when the entry is created
    created_at = build_time_column(
        default_now=True,
        nullable=False
    )

    # Timestamp when the entry is last updated
    updated_at = build_time_column(
        default_now=True,
        onupdate_now=True,
        nullable=False
    )

    producers = relationship(Producer, backref="avro_schema")

    consumers = relationship(Consumer, backref="avro_schema")

    avro_schema_elements = relationship(
        AvroSchemaElement,
        backref="avro_schema"
    )

    @property
    def note(self):
        note = session.query(
            Note
        ).filter(
            Note.reference_type == ReferenceTypeEnum.SCHEMA,
            Note.reference_id == self.id
        ).first()
        return note

    @property
    def avro_schema_json(self):
        return simplejson.loads(self.avro_schema)

    @avro_schema_json.setter
    def avro_schema_json(self, schema_json):
        self.avro_schema = simplejson.dumps(schema_json, sort_keys=True)

    @property
    def avro_schema_with_doc(self):
        """Get the JSON representation of the Avro schema with the
        documentation and element Id of each doc-eligible element.
        """
        key_to_element_map = dict(
            (o.key, o) for o in self.avro_schema_elements
        )
        avro_schema_obj = schema.make_avsc_object(self.avro_schema_json)

        schema_elements = deque([(avro_schema_obj, None)])
        while len(schema_elements) > 0:
            schema_obj, parent_key = schema_elements.popleft()
            element_cls = _schema_to_element_map.get(schema_obj.__class__)
            if not element_cls:
                continue
            _schema_element = element_cls(schema_obj, parent_key)
            self._add_doc_to_schema(_schema_element, key_to_element_map)

            parent_key = _schema_element.key
            for nested_schema in _schema_element.nested_schema_objects:
                schema_elements.append((nested_schema, parent_key))

        return avro_schema_obj.to_json()

    ELEMENT_ID_ATTR = 'element_id'
    DOC_ATTR = 'doc'

    def _add_doc_to_schema(self, schema_element, key_to_element_map):
        element = key_to_element_map.get(schema_element.key)
        if not element:
            return

        schema_element.schema_obj.set_prop(self.DOC_ATTR, element.doc)
        schema_element.schema_obj.set_prop(self.ELEMENT_ID_ATTR, element.id)

    @classmethod
    def create_schema_elements_from_json(cls, avro_schema_json):
        """Get all the schema elements that exist in the given schema JSON.
        :param avro_schema_json: JSON representation of an Avro schema
        :return: List of AvroSchemaElement objects
        """

        avro_schema_elements = []
        elements = cls._create_schema_elements_from_json(avro_schema_json)
        for _schema_element, schema_obj in elements:
            avro_schema_element = AvroSchemaElement(
                key=_schema_element.key,
                element_type=_schema_element.element_type,
                doc=schema_obj.get_prop('doc')
            )
            avro_schema_elements.append(avro_schema_element)

        return avro_schema_elements

    @classmethod
    def _create_schema_elements_from_json(cls, avro_schema_json):
        avro_schema_obj = schema.make_avsc_object(avro_schema_json)
        schema_elements = []
        schema_elements_queue = deque([(avro_schema_obj, None)])
        while schema_elements_queue:
            schema_obj, parent_key = schema_elements_queue.popleft()
            element_cls = _schema_to_element_map.get(schema_obj.__class__)
            if not element_cls:
                continue

            _schema_element = element_cls(schema_obj, parent_key)
            schema_elements.append((_schema_element, schema_obj))
            parent_key = _schema_element.key
            for nested_schema in _schema_element.nested_schema_objects:
                schema_elements_queue.append((nested_schema, parent_key))
        return schema_elements

    @classmethod
    def verify_avro_schema(cls, avro_schema_json):
        """Verify whether the given JSON representation is a valid Avro schema.

        :param avro_schema_json: JSON representation of the Avro schema
        :return: A tuple (is_valid, error) in which the first element
        indicates whether the given JSON is a valid Avro schema, and the
        second element is the error if it is not valid.
        """
        try:
            schema.make_avsc_object(avro_schema_json)
            return True, None
        except Exception as e:
            return False, repr(e)

    @classmethod
    def verify_avro_schema_has_docs(cls, avro_schema_json):
        """ Verify if the given Avro schema has docs.
        According to avro spec `doc` is supported by `record` type,
        all fields within the `record` and `enum`.

        :param avro_schema_json: JSON representation of the Avro schema

        :raises ValueError: avro_schema_json with missing docs
        :raises Exception: invalid avro_schema_json
        """
        elements = cls._create_schema_elements_from_json(
            avro_schema_json
        )
        schema_elements_missing_doc = [
            schema_element.key
            for schema_element, schema_obj in elements
            if not schema_element.has_docs_if_supported
        ]

        if schema_elements_missing_doc:
            # TODO DATAPIPE-970  implement better exception response during
            # registering avro schema with missing docs
            raise ValueError("Missing `doc` for Schema Elements(s) {}".format(
                ', '.join(schema_elements_missing_doc)
            ))
Exemplo n.º 12
0
class Address(Base):
    """
    The model for the address of a consumer

    ADDRESS MODEL
    -----------------------------------
    guid: String
        Unique ID assigned to the address (PK)
    user_id: String
        The user ID for which the address is stored (FK)
    address_line1: String
        The address of a particular user
    address_line1: String
        The address of a particular user
    city: String
        The city of the user
    country: String
        The country of the user
    pin_code: Integer
        The pin code of the user
    lat_long: String
        The geo-coorinates of the user
    """

    __tablename__ = "addresses"

    guid = Column(UUIDType(binary=False), primary_key=True)
    user_id = Column(
        UUIDType(binary=False), ForeignKey('user.guid'),
        nullable=False
    )
    block_id = Column(
        UUIDType(binary=False), ForeignKey('blocks.guid'),
        nullable=False
    )
    door_number = Column(String(120), nullable=False)  # door number
    default = Column(Boolean, nullable=False, default=True)
    name = Column(String(30), nullable=False)
    created_at = deferred(
        Column(DateTime, nullable=False, default=CURRENT_TIME()),
        group='defaults'
    )
    updated_at = deferred(
        Column(
            DateTime, nullable=False,
            default=CURRENT_TIME(), onupdate=func.now(),
        ),
        group='defaults'
    )

    @property
    def serialize(self):
        """
        Returns a JSON variant of the model
        """
        return {
            "guid": self.guid,
            "user_id": self.user_id,
            "name": self.name,
            "door_number": self.door_number,
            "block_id": self.block_id,
            "default": self.default
        }
Exemplo n.º 13
0
    # 只在一个表中添加relationship字段是可以的,为单向反射(无法反查)
    # backref是反向引用,一个表中的relationship中定义了backref,那么在另一个关系表中就直接找到对应的行
    # 不用backref也是可以的,详见sqlalchemy.schema.RelationshipProperty的属性
    # relationship 不在那边那边就是多的一方


# 使用外键操作有利于保证数据的完整性和一致性,但效率会低一些相比手工控制

#######################################################  多对多关系  #############################################
# https://docs.sqlalchemy.org/en/latest/orm/tutorial.html#orm-tutorial-many-to-many
# 多对多关系是用两个一对多关系来表示的,也就是说需要一张中间表
from sqlalchemy import Table

# 中间表
a_b = Table('a_b', Base.metadata,
            Column('a_id', Integer, ForeignKey('a.id'), primary_key=True),
            Column('b_id', Integer, ForeignKey('b.id'), primary_key=True))


class A(Base):
    __tablename__ = 'a'
    id = Column(Integer, primary_key=True)
    name = Column(String(32), nullable=False)
    b = relationship(
        'B',  # 必须写类名
        secondary=a_b,  # 中间表
        cascade="delete, delete-orphan",  # 级联删除
        single_parent=True,  # 多对多级联删除
        passive_deletes=True  # 关联删除
    )
    # b = relationship('b', secondary=a_b,backref=backref('a')) 也可以写反向引用,这样就不用再b中写relationship了,但是这样用.的时候就不提示了
Exemplo n.º 14
0
# @Last modified by:   zetasyanthis
# @Last modified time: 2017/07/21
# @License MIT
"""
This module contains definitions for association tables used in many-to-many
mappings.
"""

from sqlalchemy import Column, ForeignKey, Integer, Table

from myarchive.db.tag_db.tables.base import Base

at_file_tag = Table(
    'at_file_tag',
    Base.metadata,
    Column("file_id", Integer, ForeignKey("files.id"), primary_key=True),
    Column("tag_id", Integer, ForeignKey("tags.tag_id"), primary_key=True),
    info="Association table for mapping files to tags and vice versa.")

at_tweet_tag = Table(
    'at_tweet_tag',
    Base.metadata,
    Column("tweet_id", Integer, ForeignKey("tweets.id"), primary_key=True),
    Column("tag_id", Integer, ForeignKey("tags.tag_id"), primary_key=True),
    info="Association table for mapping tweets to tags and vice versa.")

at_ljcomment_tag = Table(
    'at_ljcomment_tag',
    Base.metadata,
    Column("lj_comment_id",
           Integer,
Exemplo n.º 15
0
class AdminLog(Base):
    __tablename__ = 'adminlog'

    id = db.Column(Integer, primary_key=True)
    admin_id = Column(Integer, ForeignKey('admin.id'))
    ip = db.Column(db.String(100))  # 登录IP
Exemplo n.º 16
0
    ForeignKey,
    Integer,
    JSON,
    String,
    Table,
)
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import relationship

Base = declarative_base()

notice_cves = Table(
    "notice_cves",
    Base.metadata,
    Column("notice_id", String, ForeignKey("notice.id")),
    Column("cve_id", String, ForeignKey("cve.id")),
)

notice_references = Table(
    "notice_references",
    Base.metadata,
    Column("notice_id", String, ForeignKey("notice.id")),
    Column("reference_id", Integer, ForeignKey("reference.id")),
)

notice_releases = Table(
    "notice_releases",
    Base.metadata,
    Column("notice_id", String, ForeignKey("notice.id")),
    Column("release_id", Integer, ForeignKey("release.id")),
Exemplo n.º 17
0
class Query(Model):
    """ORM model for SQL query"""

    __tablename__ = 'query'
    id = Column(Integer, primary_key=True)
    client_id = Column(String(11), unique=True, nullable=False)

    database_id = Column(Integer, ForeignKey('dbs.id'), nullable=False)

    # Store the tmp table into the DB only if the user asks for it.
    tmp_table_name = Column(String(256))
    user_id = Column(Integer, ForeignKey('ab_user.id'), nullable=True)
    status = Column(String(16), default=QueryStatus.PENDING)
    tab_name = Column(String(256))
    sql_editor_id = Column(String(256))
    schema = Column(String(256))
    sql = Column(Text)
    # Query to retrieve the results,
    # used only in case of select_as_cta_used is true.
    select_sql = Column(Text)
    executed_sql = Column(Text)
    # Could be configured in the superset config.
    limit = Column(Integer)
    limit_used = Column(Boolean, default=False)
    select_as_cta = Column(Boolean)
    select_as_cta_used = Column(Boolean, default=False)

    progress = Column(Integer, default=0)  # 1..100
    # # of rows in the result set or rows modified.
    rows = Column(Integer)
    error_message = Column(Text)
    # key used to store the results in the results backend
    results_key = Column(String(64), index=True)

    # Using Numeric in place of DateTime for sub-second precision
    # stored as seconds since epoch, allowing for milliseconds
    start_time = Column(Numeric(precision=20, scale=6))
    start_running_time = Column(Numeric(precision=20, scale=6))
    end_time = Column(Numeric(precision=20, scale=6))
    end_result_backend_time = Column(Numeric(precision=20, scale=6))
    tracking_url = Column(Text)

    changed_on = Column(DateTime,
                        default=datetime.utcnow,
                        onupdate=datetime.utcnow,
                        nullable=True)

    database = relationship('Database',
                            foreign_keys=[database_id],
                            backref=backref('queries',
                                            cascade='all, delete-orphan'))
    user = relationship(security_manager.user_model, foreign_keys=[user_id])

    __table_args__ = (sqla.Index('ti_user_id_changed_on', user_id,
                                 changed_on), )

    @property
    def limit_reached(self):
        return self.rows == self.limit if self.limit_used else False

    def to_dict(self):
        return {
            'changedOn': self.changed_on,
            'changed_on': self.changed_on.isoformat(),
            'dbId': self.database_id,
            'db': self.database.database_name,
            'endDttm': self.end_time,
            'errorMessage': self.error_message,
            'executedSql': self.executed_sql,
            'id': self.client_id,
            'limit': self.limit,
            'progress': self.progress,
            'rows': self.rows,
            'schema': self.schema,
            'ctas': self.select_as_cta,
            'serverId': self.id,
            'sql': self.sql,
            'sqlEditorId': self.sql_editor_id,
            'startDttm': self.start_time,
            'state': self.status.lower(),
            'tab': self.tab_name,
            'tempTable': self.tmp_table_name,
            'userId': self.user_id,
            'user': user_label(self.user),
            'limit_reached': self.limit_reached,
            'resultsKey': self.results_key,
            'trackingUrl': self.tracking_url,
        }

    @property
    def name(self):
        """Name property"""
        ts = datetime.now().isoformat()
        ts = ts.replace('-', '').replace(':', '').split('.')[0]
        tab = (self.tab_name.replace(' ', '_').lower()
               if self.tab_name else 'notab')
        tab = re.sub(r'\W+', '', tab)
        return 'sqllab_{tab}_{ts}'.format(**locals())
Exemplo n.º 18
0
def define_harvester_tables():

    global harvest_source_table
    global harvest_job_table
    global harvest_object_table
    global harvest_object_extra_table
    global harvest_gather_error_table
    global harvest_object_error_table
    global harvest_log_table

    harvest_source_table = Table('harvest_source', metadata,
        Column('id', types.UnicodeText, primary_key=True, default=make_uuid),
        Column('url', types.UnicodeText, nullable=False),
        Column('title', types.UnicodeText, default=u''),
        Column('description', types.UnicodeText, default=u''),
        Column('config', types.UnicodeText, default=u''),
        Column('created', types.DateTime, default=datetime.datetime.utcnow),
        Column('type',types.UnicodeText,nullable=False),
        Column('active',types.Boolean,default=True),
        Column('user_id', types.UnicodeText, default=u''),
        Column('publisher_id', types.UnicodeText, default=u''),
        Column('frequency', types.UnicodeText, default=u'MANUAL'),
        Column('next_run', types.DateTime),
    )
    # Was harvesting_job
    harvest_job_table = Table('harvest_job', metadata,
        Column('id', types.UnicodeText, primary_key=True, default=make_uuid),
        Column('created', types.DateTime, default=datetime.datetime.utcnow),
        Column('gather_started', types.DateTime),
        Column('gather_finished', types.DateTime),
        Column('finished', types.DateTime),
        Column('source_id', types.UnicodeText, ForeignKey('harvest_source.id')),
        # status: New, Running, Finished
        Column('status', types.UnicodeText, default=u'New', nullable=False),
    )
    # A harvest_object contains a representation of one dataset during a
    # particular harvest
    harvest_object_table = Table('harvest_object', metadata,
        Column('id', types.UnicodeText, primary_key=True, default=make_uuid),
        # The guid is the 'identity' of the dataset, according to the source.
        # So if you reharvest it, then the harvester knows which dataset to
        # update because of this identity. The identity needs to be unique
        # within this CKAN.
        Column('guid', types.UnicodeText, default=u''),
        # When you harvest a dataset multiple times, only the latest
        # successfully imported harvest_object should be flagged 'current'.
        # The import_stage usually reads and writes it.
        Column('current',types.Boolean,default=False),
        Column('gathered', types.DateTime, default=datetime.datetime.utcnow),
        Column('fetch_started', types.DateTime),
        Column('content', types.UnicodeText, nullable=True),
        Column('fetch_finished', types.DateTime),
        Column('import_started', types.DateTime),
        Column('import_finished', types.DateTime),
        # state: WAITING, FETCH, IMPORT, COMPLETE, ERROR
        Column('state', types.UnicodeText, default=u'WAITING'),
        Column('metadata_modified_date', types.DateTime),
        Column('retry_times',types.Integer, default=0),
        Column('harvest_job_id', types.UnicodeText, ForeignKey('harvest_job.id')),
        Column('harvest_source_id', types.UnicodeText, ForeignKey('harvest_source.id')),
        Column('package_id', types.UnicodeText, ForeignKey('package.id', deferrable=True), nullable=True),
        # report_status: 'added', 'updated', 'not modified', 'deleted', 'errored'
        Column('report_status', types.UnicodeText, nullable=True),
        Index('harvest_job_id_idx', 'harvest_job_id'),
    )

    # New table
    harvest_object_extra_table = Table('harvest_object_extra', metadata,
        Column('id', types.UnicodeText, primary_key=True, default=make_uuid),
        Column('harvest_object_id', types.UnicodeText, ForeignKey('harvest_object.id')),
        Column('key',types.UnicodeText),
        Column('value', types.UnicodeText),
    )

    # New table
    harvest_gather_error_table = Table('harvest_gather_error',metadata,
        Column('id', types.UnicodeText, primary_key=True, default=make_uuid),
        Column('harvest_job_id', types.UnicodeText, ForeignKey('harvest_job.id')),
        Column('message', types.UnicodeText),
        Column('created', types.DateTime, default=datetime.datetime.utcnow),
    )
    # New table
    harvest_object_error_table = Table('harvest_object_error',metadata,
        Column('id', types.UnicodeText, primary_key=True, default=make_uuid),
        Column('harvest_object_id', types.UnicodeText, ForeignKey('harvest_object.id')),
        Column('message', types.UnicodeText),
        Column('stage', types.UnicodeText),
        Column('line', types.Integer),
        Column('created', types.DateTime, default=datetime.datetime.utcnow),
    )
    # Harvest Log table
    harvest_log_table = Table('harvest_log', metadata,
        Column('id', types.UnicodeText, primary_key=True, default=make_uuid),
        Column('content', types.UnicodeText, nullable=False),
        Column('level', types.Enum('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL', name='log_level')),
        Column('created', types.DateTime, default=datetime.datetime.utcnow),
    )

    mapper(
        HarvestSource,
        harvest_source_table,
        properties={
            'jobs': relation(
                HarvestJob,
                lazy=True,
                backref=u'source',
                order_by=harvest_job_table.c.created,
            ),
        },
    )

    mapper(
        HarvestJob,
        harvest_job_table,
    )

    mapper(
        HarvestObject,
        harvest_object_table,
        properties={
            'package':relation(
                Package,
                lazy=True,
                backref='harvest_objects',
            ),
            'job': relation(
                HarvestJob,
                lazy=True,
                backref=u'objects',
            ),
            'source': relation(
                HarvestSource,
                lazy=True,
                backref=u'objects',
            ),

        },
    )

    mapper(
        HarvestGatherError,
        harvest_gather_error_table,
        properties={
            'job':relation(
                HarvestJob,
                backref='gather_errors'
            ),
        },
    )

    mapper(
        HarvestObjectError,
        harvest_object_error_table,
        properties={
            'object':relation(
                HarvestObject,
                backref=backref('errors', cascade='all,delete-orphan')
            ),
        },
    )

    mapper(
        HarvestObjectExtra,
        harvest_object_extra_table,
        properties={
            'object':relation(
                HarvestObject,
                backref=backref('extras', cascade='all,delete-orphan')
            ),
        },
    )
    
    mapper(
        HarvestLog,
        harvest_log_table,
    )

    event.listen(HarvestObject, 'before_insert', harvest_object_before_insert_listener)
Exemplo n.º 19
0
class Algorithm(Base):
    __tablename__ = 'ALGORITHMS'

    id = Column(Integer, primary_key=True)
    module = Column(String)
    classname = Column(String)

    fk_category = Column(
        Integer, ForeignKey('ALGORITHM_CATEGORIES.id', ondelete="CASCADE"))
    group_name = Column(String)
    group_description = Column(String)

    displayname = Column(String)
    description = Column(String)
    subsection_name = Column(String)

    required_datatype = Column(String)
    datatype_filter = Column(String)
    parameter_name = Column(String)
    outputlist = Column(String)

    last_introspection_check = Column(DateTime)
    removed = Column(Boolean, default=False)

    algorithm_category = relationship(AlgorithmCategory,
                                      backref=backref('ALGORITHMS',
                                                      order_by=id,
                                                      cascade="delete, all"))

    def __init__(self,
                 module,
                 classname,
                 category_key,
                 group_name=None,
                 group_description=None,
                 display_name='',
                 description="",
                 subsection_name=None,
                 last_introspection_check=None):

        self.module = module
        self.classname = classname
        self.fk_category = category_key
        self.group_name = group_name
        self.group_description = group_description
        self.displayname = display_name
        self.description = description
        self.last_introspection_check = last_introspection_check
        self.removed = False

        if subsection_name is not None:
            self.subsection_name = subsection_name
        else:
            self.subsection_name = self.module.split('.')[-1].replace(
                '_adapter', '')

    def __repr__(self):
        return "<Algorithm('%s', '%s', '%s', '%d', '%s', '%s', '%s', '%s', '%s')>" % (
            self.id, self.module, self.classname, self.fk_category,
            self.displayname, self.subsection_name, self.group_name,
            self.group_description, self.removed)
Exemplo n.º 20
0
                        Table, Integer, String, select, update, ForeignKey)

ENGINE = create_ENGINE('sqlite:///TESTE.db', echo=False)

METADATA = MetaData(bind=ENGINE)

USERS = Table('usuarios', METADATA,
              Column('id', Integer, primary_key=True),
              Column('nome', String(40), index=True),
              Column('senha', String),
              Column('numero_de_serie', String, nullable=False))
CAFETEIRA_STATUS = Table('Cafeteira_status', METADATA,
                         Column('agua_disponivel', Integer),
                         Column('cafe_disponivel', Integer),
                         Column('n_de_serie', String,
                                ForeignKey(USERS.c.numero_de_serie)))
CAFETEIRA_TASKS = Table('Cafeteira_tasks', METADATA,
                        Column('id', Integer, primary_key=True),
                        Column('cafe', Integer, nullable=False),
                        Column('agua', Integer, nullable=False),
                        Column('horario', String, nullable=False),
                        Column('n_de_serie', String,
                               ForeignKey(USERS.c.numero_de_serie)))
TESTE = Table('horario', METADATA,
              Column('id', Integer, primary_key=True),
              Column('hora', String))

# METADATA.create_all()

def Update(status):
    '''Opercao de update'''
Exemplo n.º 21
0
class OperationGroup(Base, Exportable):
    """
    We use this group entity, to map in DB a group of operations started 
    in the same time by the user
    """
    __tablename__ = "OPERATION_GROUPS"

    id = Column(Integer, primary_key=True)
    name = Column(String)
    range1 = Column(String)
    range2 = Column(String)
    range3 = Column(String)
    gid = Column(String)
    fk_launched_in = Column(Integer,
                            ForeignKey('PROJECTS.id', ondelete="CASCADE"))
    project = relationship(Project,
                           backref=backref('OPERATION_GROUPS',
                                           order_by=id,
                                           cascade="all,delete"))

    def __init__(self, project_id, name='incomplete', ranges=None):
        self.name = name
        if ranges:
            if len(ranges) > 0:
                self.range1 = ranges[0]
            if len(ranges) > 1:
                self.range2 = ranges[1]
            if len(ranges) > 2:
                self.range3 = ranges[2]
        self.gid = generate_guid()
        self.fk_launched_in = project_id

    def __repr__(self):
        return "<OperationGroup(%s,%s)>" % (self.name, self.gid)

    @property
    def range_references(self):
        """Memorized range starter"""
        ranges = [self.range1]
        if self.range2 and self.range2 != 'null':
            ranges.append(self.range2)
        if self.range3 and self.range3 != 'null':
            ranges.append(self.range3)
        return ranges

    def fill_operationgroup_name(self, entities_in_group):
        """
        Display name for UI.
        """
        new_name = "of " + entities_in_group + " varying "
        if self.range1 is not None:
            range_param1 = RangeParameter.from_json(self.range1)
            new_name += range_param1.name
        if self.range2 is not None:
            range_param2 = RangeParameter.from_json(self.range2)
            new_name += " x " + range_param2.name
        if self.range3 is not None:
            range_param3 = RangeParameter.from_json(self.range3)
            new_name += " x " + range_param3.name

        new_name += " - " + date2string(datetime.now(),
                                        date_format=LESS_COMPLEX_TIME_FORMAT)
        self.name = new_name
Exemplo n.º 22
0
    user_id = Column(Integer, ForeignKey('users.id'))

    user = relationship('User', back_populates='addresses')

    def __repr__(self):
        return '<Address email_address="%s">' % self.email_address


#=====================================================================================
# Many to many
#=====================================================================================

# association table
post_keywords = Table(
    'post_keywords', Base.metadata,
    Column('post_id', ForeignKey('posts.id'), primary_key=True),
    Column('keyword_id', ForeignKey('keywords.id'), primary_key=True))


class BlogPost(Base):
    __tablename__ = 'posts'

    id = Column(Integer, primary_key=True)
    user_id = Column(Integer, ForeignKey('users.id'))
    headline = Column(String(255), nullable=False)
    body = Column(Text)

    # many to many BlogPost <-> Keyword
    keywords = relationship('Keyword',
                            secondary=post_keywords,
                            back_populates='posts')
Exemplo n.º 23
0
class Operation(Base, Exportable):
    """
    The class used to log any action executed in Projects.
    """
    __tablename__ = 'OPERATIONS'

    id = Column(Integer, primary_key=True)
    fk_launched_by = Column(Integer, ForeignKey('USERS.id'))
    fk_launched_in = Column(Integer,
                            ForeignKey('PROJECTS.id', ondelete="CASCADE"))
    fk_from_algo = Column(Integer, ForeignKey('ALGORITHMS.id'))
    fk_operation_group = Column(Integer,
                                ForeignKey('OPERATION_GROUPS.id',
                                           ondelete="CASCADE"),
                                default=None)
    gid = Column(String)
    view_model_gid = Column(String)
    create_date = Column(
        DateTime)  # Date at which the user generated this entity
    start_date = Column(
        DateTime
    )  # Actual time when the operation executions is started (without queue time)
    completion_date = Column(
        DateTime
    )  # Time when the operation got status FINISHED/ ERROR or CANCEL set.
    status = Column(String, index=True)
    visible = Column(Boolean, default=True)
    additional_info = Column(String)
    user_group = Column(String, default=None)
    range_values = Column(String, default=None)
    estimated_disk_size = Column(Integer)

    algorithm = relationship(Algorithm)
    project = relationship(Project,
                           backref=backref('OPERATIONS',
                                           order_by=id,
                                           cascade="all,delete"))
    operation_group = relationship(OperationGroup)
    user = relationship(User)

    def __init__(self,
                 view_model_gid,
                 fk_launched_by,
                 fk_launched_in,
                 fk_from_algo,
                 status=STATUS_PENDING,
                 start_date=None,
                 completion_date=None,
                 op_group_id=None,
                 additional_info='',
                 user_group=None,
                 range_values=None,
                 estimated_disk_size=0):
        self.fk_launched_by = fk_launched_by
        self.fk_launched_in = fk_launched_in
        self.fk_from_algo = fk_from_algo
        self.view_model_gid = view_model_gid
        self.create_date = datetime.now()
        self.start_date = start_date
        self.completion_date = completion_date
        self.status = status
        self.visible = True
        self.fk_operation_group = op_group_id
        self.range_values = range_values
        self.user_group = user_group
        self.additional_info = additional_info
        self.gid = generate_guid()
        self.estimated_disk_size = estimated_disk_size

    def __repr__(self):
        return "<Operation('%s', %s, %s,'%s','%s','%s','%s', '%s','%s',%s, '%s', '%s', '%s', %s)>" \
               % (self.view_model_gid, self.gid, self.fk_launched_by, self.fk_launched_in, self.fk_from_algo,
                  self.create_date, self.start_date, self.completion_date, self.status, self.visible,
                  self.fk_operation_group, self.user_group, self.additional_info, self.estimated_disk_size)

    def start_now(self):
        """ Update Operation fields at startup: Status and Date"""
        self.start_date = datetime.now()
        self.status = STATUS_STARTED

    def mark_complete(self, status, additional_info=None):
        """ Update Operation fields on completion: Status and Date"""
        self.completion_date = datetime.now()
        if additional_info is not None:
            self.additional_info = additional_info
        self.status = status

    @property
    def has_finished(self):
        return has_finished(self.status)

    def to_dict(self):
        """
        Overwrite superclass method to add required changes.
        """
        _, base_dict = super(Operation, self).to_dict(excludes=[
            'id', 'fk_launched_by', 'user', 'fk_launched_in', 'project',
            'fk_from_algo', 'algorithm', 'fk_operation_group',
            'operation_group'
        ])
        base_dict['fk_launched_in'] = self.project.gid
        base_dict['fk_from_algo'] = json.dumps(
            dict(module=self.algorithm.module,
                 classname=self.algorithm.classname))
        # We keep the information for the operation_group in this place (on each operation)
        # because we don't have an XML file for the operation_group entity.
        # We don't want to keep the information about the operation groups into the project XML file
        # because it may be opened from different places and may produce conflicts.
        if self.operation_group:
            base_dict['fk_operation_group'] = json.dumps(
                self.operation_group.to_dict()[1])
        return self.__class__.__name__, base_dict

    # TODO: Fix this hackish dao pass
    def from_dict(self, dictionary, dao, user_id=None, project_gid=None):
        """
        Add specific attributes from a input dictionary.
        """

        # If user id was specified try to load it, otherwise use System account
        user = dao.get_system_user(
        ) if user_id is None else dao.get_user_by_id(user_id)
        self.fk_launched_by = user.id

        # Find parent Project
        prj_to_load = project_gid if project_gid is not None else dictionary[
            'fk_launched_in']
        parent_project = dao.get_project_by_gid(prj_to_load)
        self.fk_launched_in = parent_project.id
        self.project = parent_project

        # Find parent Algorithm
        source_algorithm = json.loads(dictionary['fk_from_algo'])
        algorithm = dao.get_algorithm_by_module(source_algorithm['module'],
                                                source_algorithm['classname'])

        if algorithm:
            self.algorithm = algorithm
            self.fk_from_algo = algorithm.id
        else:
            # The algorithm that produced this operation no longer exists most likely due to
            # exported operation from different version. Fallback to tvb importer.
            LOG.warning(
                "Algorithm group %s was not found in DB. Most likely cause is that archive was exported "
                "from a different TVB version. Using fallback TVB_Importer as source of "
                "this operation." % (source_algorithm['module'], ))
            algorithm = dao.get_algorithm_by_module(TVB_IMPORTER_MODULE,
                                                    TVB_IMPORTER_CLASS)
            self.fk_from_algo = algorithm.id
            dictionary['additional_info'] = (
                "The original parameters for this operation were: \nAdapter: %s "
                "\nParameters %s" %
                (source_algorithm['module'] + '.' +
                 source_algorithm['classname'], dictionary['parameters']))

        # Find OperationGroup, if any
        if 'fk_operation_group' in dictionary:
            group_dict = json.loads(dictionary['fk_operation_group'])
            op_group = None
            if group_dict:
                op_group = dao.get_operationgroup_by_gid(group_dict['gid'])
                if not op_group:
                    name = group_dict['name']
                    ranges = [
                        group_dict['range1'], group_dict['range2'],
                        group_dict['range3']
                    ]
                    gid = group_dict['gid']
                    op_group = OperationGroup(self.fk_launched_in, name,
                                              ranges)
                    op_group.gid = gid
                    op_group = dao.store_entity(op_group)
            self.operation_group = op_group
            self.fk_operation_group = op_group.id
        else:
            self.operation_group = None
            self.fk_operation_group = None

        self.meta_data = dictionary['meta_data']
        self.create_date = string2date(dictionary['create_date'])
        if dictionary['start_date'] != "None":
            self.start_date = string2date(dictionary['start_date'])
        if dictionary['completion_date'] != "None":
            self.completion_date = string2date(dictionary['completion_date'])
        self.status = self._parse_status(dictionary['status'])
        self.visible = string2bool(dictionary['visible'])
        self.range_values = dictionary['range_values']
        self.user_group = dictionary['user_group']
        self.additional_info = dictionary['additional_info']
        self.gid = dictionary['gid']

        return self, dictionary['parameters']

    def _parse_status(self, status):
        """
        To keep backwards compatibility, when we import an operation that did not have new 
        operation status.
        """
        if 'FINISHED' in status:
            return STATUS_FINISHED
        elif 'ERROR' in status:
            return STATUS_ERROR
        elif 'CANCELED' in status:
            return STATUS_CANCELED
        elif 'STARTED' in status:
            return STATUS_STARTED

        return STATUS_PENDING
class Candidate(Base):
    __tablename__ = "candidate"

    id = Column(Integer,
                primary_key=True,
                nullable=False)
    name = Column(String(100),
                  nullable=False)
    email = Column(String(100),
                  nullable=False)
    address = Column(String(100),
                  nullable=False)
    mobileno = Column(String(10),
                  nullable=False)
    skills = Column(Text,
                  nullable=True)
    experience = Column(Integer,
                  nullable=False)
    source = Column(String(30),nullable=True)
    reffered_by = Column(Integer,
                   ForeignKey('employee.id'),
                   nullable=False)
    resume = Column(String(100),
                  nullable=True)
    status = Column(String(100),
                  nullable=False)
    current_ctc = Column(Integer)
    expected_ctc = Column(Integer)
    current_organization = Column(String(100))

    notice_period = Column(Integer)


     # Relationship
    job_has_candidate = relationship("JobHasCandidate",back_populates="candidate")

    def __init__(self,name=None, skills=None,experience=None,email=None,address=None,mobileno=None,source=None,reffered_by=None,resume=None,status=None,current_ctc=None,expected_ctc=None,current_organization=None,notice_period=None):
        self.name=name
        self.email=email
        self.address=address
        self.mobileno=mobileno
        self.skills=skills
        self.experience=experience
        self.source=source
        self.reffered_by=reffered_by
        self.resume=resume
        self.status=status
        self.current_ctc=current_ctc
        self.expected_ctc=expected_ctc
        self.current_organization=current_organization
        self.notice_period=notice_period

    def serialize(self):
        return {
         'id' : self.id,
         'name': self.name,
         'email': self.email,
         'address': self.address,
         'mobileno': self.mobileno,
         'skills': self.skills,
         'experience' : self.experience,
         'source' : self.source,
         'reffered_by' : self.reffered_by,
         'resume' : self.resume,
         'status' : self.status,
         'current_ctc' : self.current_ctc,
         'expected_ctc' : self.expected_ctc,
         'current_organization' : self.current_organization,
         'notice_period' : self.notice_period
        }

    def deserialize(self, candidate_json):
        self.id = candidate_json.get('id')
        self.name = candidate_json.get('name')
        self.email = candidate_json.get('email')
        self.address = candidate_json.get('address')
        self.mobileno = candidate_json.get('mobileno')
        self.skills = candidate_json.get('skills')
        self.experience = candidate_json.get('experience')
        self.source = candidate_json.get('source')
        self.reffered_by = candidate_json.get('reffered_by')
        self.resume = candidate_json.get('resume')
        self.status = candidate_json.get('status')
        self.current_ctc = candidate_json.get('current_ctc')
        self.expected_ctc = candidate_json.get('expected_ctc')
        self.current_organization = candidate_json.get('current_organization')
        self.notice_period = candidate_json.get('notice_period')
Exemplo n.º 25
0
from sqlalchemy import create_engine
from sqlalchemy.orm import relationship
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
import hashlib, binascii, os
import pickle

engine = create_engine("sqlite:///:memory:")#, echo = True)
Session = sessionmaker(bind=engine)

Base = declarative_base()

from sqlalchemy import Column, Integer, String, Table, ForeignKey, Index, and_, exists

user_room_mapping = Table('usersrooms', Base.metadata,
    Column('user_id', Integer, ForeignKey('users.id')),
    Column('room_id', Integer, ForeignKey('rooms.id'))
)

def hash_password(password):
    """Hash a password for storing."""
    salt = hashlib.sha256(os.urandom(60)).hexdigest().encode('ascii')
    pwdhash = hashlib.pbkdf2_hmac('sha512', password.encode('utf-8'), 
                                salt, 100000)
    pwdhash = binascii.hexlify(pwdhash)
    return (salt + pwdhash).decode('ascii')
 
def verify_password(stored_password, provided_password):
    """Verify a stored password against one provided by user"""
    salt = stored_password[:64]
    stored_password = stored_password[64:]
Exemplo n.º 26
0
from .db_config import db
from sqlalchemy import Column, Integer, String, Table, ForeignKey


freeway_business = db.Table(
    "freeway_business",
    Column("freeway_id", Integer, ForeignKey("freeway.id")),
    Column("business_entity_id", Integer, ForeignKey("business_entity.id")),
)
Exemplo n.º 27
0
class Reference(base):
    """The model for a Reference."""

    __tablename__ = "reference"
    __mapper_args__ = {"confirm_deleted_rows": False}

    id = Column(Integer, primary_key=True)
    type = Column(String)
    bot_inline_message_id = Column(String)
    message_id = Column(BigInteger)

    # Keep those for now, in case we migrate to mtproto
    message_dc_id = Column(BigInteger)
    message_access_hash = Column(BigInteger)

    user_id = Column(
        BigInteger,
        ForeignKey("user.id", ondelete="cascade", name="user_fk"),
        nullable=True,
        index=True,
    )
    user = relationship("User", foreign_keys="Reference.user_id")

    created_at = Column(DateTime, server_default=func.now(), nullable=False)
    updated_at = Column(DateTime,
                        server_default=func.now(),
                        onupdate=func.now(),
                        nullable=False)

    # ManyToOne
    poll_id = Column(
        Integer,
        ForeignKey("poll.id", ondelete="cascade", name="reference_poll"),
        nullable=False,
        index=True,
    )
    poll = relationship("Poll")

    def __init__(
        self,
        poll,
        reference_type,
        user=None,
        message_id=None,
        inline_message_id=None,
    ):
        """Create a new poll."""
        self.poll = poll
        self.type = reference_type
        # There are three types of references
        # 1. Messages in private chat:
        # - Admin interface
        # - Private vote
        if (user is not None and message_id is not None and reference_type in [
                ReferenceType.admin.name, ReferenceType.private_vote.name
        ]):
            self.user = user
            self.message_id = message_id

        # 2. Messages shared via inline query
        elif (inline_message_id is not None
              and reference_type == ReferenceType.inline.name):
            self.bot_inline_message_id = inline_message_id

        else:
            raise Exception(
                "Tried to create Reference with wrong type or missing parameters"
            )

    def __repr__(self):
        """Print as string."""
        if self.type == ReferenceType.inline.name:
            message = f"Reference {self.id}: message_id {self.message_id}"
        elif self.type == ReferenceType.admin.name:
            message = f"Reference {self.id}: message_id {self.message_id}, admin: {self.user.id}"
        else:
            message = f"Reference {self.id}: message_id {self.message_id}, user: {self.user.id}"

        return message
Exemplo n.º 28
0
    def define_tables(cls, metadata):
        def make_some_columns():
            return [Column("c%d" % i, Integer) for i in range(2)]

        Table(
            "a",
            metadata,
            Column(
                "id", Integer, primary_key=True, test_needs_autoincrement=True
            ),
            *make_some_columns()
        )
        Table(
            "b",
            metadata,
            Column(
                "id", Integer, primary_key=True, test_needs_autoincrement=True
            ),
            Column("a_id", ForeignKey("a.id")),
            *make_some_columns()
        )
        Table(
            "c",
            metadata,
            Column(
                "id", Integer, primary_key=True, test_needs_autoincrement=True
            ),
            Column("b_id", ForeignKey("b.id")),
            *make_some_columns()
        )
        Table(
            "d",
            metadata,
            Column(
                "id", Integer, primary_key=True, test_needs_autoincrement=True
            ),
            Column("b_id", ForeignKey("b.id")),
            *make_some_columns()
        )
        Table(
            "e",
            metadata,
            Column(
                "id", Integer, primary_key=True, test_needs_autoincrement=True
            ),
            Column("b_id", ForeignKey("b.id")),
            *make_some_columns()
        )
        Table(
            "f",
            metadata,
            Column(
                "id", Integer, primary_key=True, test_needs_autoincrement=True
            ),
            Column("b_id", ForeignKey("b.id")),
            *make_some_columns()
        )
        Table(
            "g",
            metadata,
            Column(
                "id", Integer, primary_key=True, test_needs_autoincrement=True
            ),
            Column("a_id", ForeignKey("a.id")),
            *make_some_columns()
        )
Exemplo n.º 29
0
import enum
from sqlalchemy import Table, Column, Integer, String, ForeignKey, Enum
from sqlalchemy.orm import relationship, backref
from sqlalchemy.ext.declarative import declarative_base

Base = declarative_base()

student_subject = Table(
    'student_subject',
    Base.metadata,
    Column('student_id', Integer, ForeignKey('student.id')),
    Column('subject_id', Integer, ForeignKey('subject.id')),
)


class DayOfWeek(enum.IntEnum):
    MONDAY = 0
    TUESDAY = 1
    WEDNESDAY = 2
    THURSDAY = 3
    FRIDAY = 4
    SATURDAY = 5
    SUNDAY = 6


class Place(Base):
    __tablename__ = 'place'

    id = Column(Integer, primary_key=True)
    name = Column(String(64))
Exemplo n.º 30
0
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy import Table, Column, Integer, String, Boolean,\
    Float, DateTime, ForeignKey, Text, Index
from sqlalchemy import UniqueConstraint
from sqlalchemy.orm import relationship
import datetime

Base = declarative_base()

# This table actually allows us to make a many to many relationship
# between transactions table and grep_outputs table
target_association_table = Table(
    'target_session_association', Base.metadata,
    Column('target_id', Integer, ForeignKey('targets.id')),
    Column('session_id', Integer, ForeignKey('sessions.id')))

Index('target_id_idx',
      target_association_table.c.target_id,
      postgresql_using='btree')


class Session(Base):
    __tablename__ = "sessions"

    id = Column(Integer, primary_key=True, autoincrement=True)
    name = Column(String, unique=True)
    active = Column(Boolean, default=False)
    targets = relationship("Target",
                           secondary=target_association_table,
                           backref="sessions")
Exemplo n.º 31
0
class CompAssignment(Base):
    __tablename__ = "completed_assignments"
    id = Column(Integer, primary_key=True, autoincrement=True)
    student = Column(String, ForeignKey("users.email"))
    link = Column(String)
    completed = Column(DateTime)