Example #1
0
def get_scores(session, submission_id):
    """Get the scores for each fold of a submission.

    Parameters
    ----------
    session : :class:`sqlalchemy.orm.Session`
        The session to directly perform the operation on the database.
    submission_id : int
        The id of the submission.

    Returns
    -------
    scores : pd.DataFrame
        A pandas dataframe containing the scores of each fold.
    """
    results = defaultdict(list)
    index = []
    all_cv_folds = (session.query(SubmissionOnCVFold).filter_by(
        submission_id=submission_id).options(defer("full_train_y_pred"),
                                             defer("test_y_pred")).all())
    all_cv_folds = sorted(all_cv_folds, key=lambda x: x.id)
    for fold_id, cv_fold in enumerate(all_cv_folds):
        for step in ('train', 'valid', 'test'):
            index.append((fold_id, step))
            for score in cv_fold.scores:
                results[score.name].append(getattr(score, step + '_score'))
    multi_index = pd.MultiIndex.from_tuples(index, names=['fold', 'step'])
    scores = pd.DataFrame(results, index=multi_index)
    return scores
Example #2
0
def get_time(session, submission_id):
    """Get the computation time for each fold of a submission.

    Parameters
    ----------
    session : :class:`sqlalchemy.orm.Session`
        The session to directly perform the operation on the database.
    submission_id : int
        The id of the submission.

    Returns
    -------
    computation_time : pd.DataFrame
        A pandas dataframe containing the computation time of each fold.
    """
    results = defaultdict(list)
    all_cv_folds = (session.query(SubmissionOnCVFold).filter_by(
        submission_id=submission_id).options(defer("full_train_y_pred"),
                                             defer("test_y_pred")).all())
    all_cv_folds = sorted(all_cv_folds, key=lambda x: x.id)
    for fold_id, cv_fold in enumerate(all_cv_folds):
        results['fold'].append(fold_id)
        for step in ('train', 'valid', 'test'):
            results[step].append(getattr(cv_fold, '{}_time'.format(step)))
    return pd.DataFrame(results).set_index('fold')
Example #3
0
def set_predictions(session, submission_id, path_predictions):
    """Set the predictions in the database.

    Parameters
    ----------
    config : dict
        Configuration file containing the information to connect to the
        dataset. If you are using the configuration provided by ramp, it
        corresponds to the the `sqlalchemy` key.
    submission_id : int
        The id of the submission.
    path_predictions : str
        The path where the results files are located.
    """
    all_cv_folds = (session.query(SubmissionOnCVFold).filter_by(
        submission_id=submission_id).options(defer("full_train_y_pred"),
                                             defer("test_y_pred")).all())
    all_cv_folds = sorted(all_cv_folds, key=lambda x: x.id)
    for fold_id, cv_fold in enumerate(all_cv_folds):
        path_results = os.path.join(path_predictions,
                                    'fold_{}'.format(fold_id))
        cv_fold.full_train_y_pred = np.load(
            os.path.join(path_results, 'y_pred_train.npz'))['y_pred']
        cv_fold.test_y_pred = np.load(
            os.path.join(path_results, 'y_pred_test.npz'))['y_pred']
    session.commit()
Example #4
0
def set_scores(session, submission_id, path_predictions):
    """Set the scores in the database.

    Parameters
    ----------
    session : :class:`sqlalchemy.orm.Session`
        The session to directly perform the operation on the database.
    submission_id : int
        The id of the submission.
    path_predictions : str
        The path where the results files are located.
    """
    all_cv_folds = (session.query(SubmissionOnCVFold).filter_by(
        submission_id=submission_id).options(defer("full_train_y_pred"),
                                             defer("test_y_pred")).all())
    all_cv_folds = sorted(all_cv_folds, key=lambda x: x.id)
    for fold_id, cv_fold in enumerate(all_cv_folds):
        path_results = os.path.join(path_predictions,
                                    'fold_{}'.format(fold_id))
        scores_update = pd.read_csv(os.path.join(path_results, 'scores.csv'),
                                    index_col=0)
        for score in cv_fold.scores:
            for step in scores_update.index:
                value = scores_update.loc[step, score.name]
                setattr(score, step + '_score', value)
    session.commit()
Example #5
0
    def get(self, project_id, document_id):
        document, _ = self.get_document(project_id, document_id, True)

        highlights = (
            self.db.query(database.Highlight)
            .options(defer('snippet'))
            .filter(database.Highlight.document_id == document.id)
            .order_by(database.Highlight.start_offset)
            .options(joinedload(database.Highlight.tags))
            .options(defer('tags.highlights_count'))
        ).all()

        return self.send_json({
            'contents': [
                {'offset': 0, 'contents': document.contents},
            ],
            'text_direction': document.text_direction.name,
            'highlights': [
                {'id': hl.id,
                 'start_offset': hl.start_offset,
                 'end_offset': hl.end_offset,
                 'tags': [t.id for t in hl.tags]}
                for hl in highlights
            ],
        })
Example #6
0
    def test_unbound_options(self):
        User, Address, Keyword, Order, Item = self.classes(
            "User", "Address", "Keyword", "Order", "Item"
        )

        self._run_cache_key_fixture(
            lambda: (
                joinedload(User.addresses),
                joinedload(User.addresses.of_type(aliased(Address))),
                joinedload("addresses"),
                joinedload(User.orders).selectinload("items"),
                joinedload(User.orders).selectinload(Order.items),
                defer(User.id),
                defer("id"),
                defer("*"),
                defer(Address.id),
                subqueryload(User.orders),
                selectinload(User.orders),
                joinedload(User.addresses).defer(Address.id),
                joinedload(aliased(User).addresses).defer(Address.id),
                joinedload(User.addresses).defer("id"),
                joinedload(User.orders).joinedload(Order.items),
                joinedload(User.orders).subqueryload(Order.items),
                subqueryload(User.orders).subqueryload(Order.items),
                subqueryload(User.orders)
                .subqueryload(Order.items)
                .defer(Item.description),
                defaultload(User.orders).defaultload(Order.items),
                defaultload(User.orders),
            ),
            compare_values=True,
        )
Example #7
0
File: api.py Project: joram/recipes
def recipes_search():
    session = Session()

    recipe_qs = session.query(Recipe).options(
        defer('url'),
        defer('title'),
        defer('ingredients'),
    )

    title = request.args.get('title')
    if title is not None and title != "undefined":
        recipe_qs = recipe_qs.filter(Recipe.title.like(f'%{title}%'))

    tag = request.args.get('tag')
    if tag is not None and tag != "undefined":
        qs = session.query(RecipeTag).filter(RecipeTag.tag_name == tag)
        recipe_pub_ids = [rt.recipe_pub_id for rt in qs.all()]
        recipe_qs = recipe_qs.filter(Recipe.pub_id.in_(recipe_pub_ids))

    ingredient = request.args.get('ingredient')
    if ingredient is not None and ingredient != "undefined":
        qs = session.query(RecipeIngredient).filter(
            RecipeIngredient.ingredient_name == ingredient)
        recipe_pub_ids = [ri.recipe_pub_id for ri in qs.all()]
        recipe_qs = recipe_qs.filter(Recipe.pub_id.in_(recipe_pub_ids))

    results = [recipe.json() for recipe in recipe_qs.all()]
    print(f"{len(results)} recipes found")
    return flask.jsonify(results)
Example #8
0
def test_sign_up_team(session_scope_function):
    event_name, username = '******', 'test_user'

    sign_up_team(session_scope_function, event_name, username)
    event_team = session_scope_function.query(EventTeam).all()
    assert len(event_team) == 1
    event_team = event_team[0]

    # when signing up a team, the team is approved and the sandbox is setup:
    # the starting kit is submitted without training it.
    assert event_team.last_submission_name == 'starting_kit'
    assert event_team.approved is True
    # check the status of the sandbox submission
    submission = session_scope_function.query(Submission).all()
    assert len(submission) == 1
    submission = submission[0]
    assert submission.name == 'starting_kit'
    assert submission.event_team == event_team
    submission_file = submission.files[0]
    assert submission_file.name == 'classifier'
    assert submission_file.extension == 'py'
    assert (os.path.join('submission_000000001', 'classifier.py')
            in submission_file.path)
    # check the submission on cv fold
    cv_folds = (session_scope_function.query(SubmissionOnCVFold).options(
        defer("full_train_y_pred"), defer("test_y_pred")).all())
    for fold in cv_folds:
        assert fold.state == 'new'
        assert fold.best is False
        assert fold.contributivity == pytest.approx(0)
Example #9
0
 def _defer_unused_opds_entry(self, query, work_model=Work):
     """Defer the appropriate opds entry
     """
     if Configuration.DEFAULT_OPDS_FORMAT == "simple_opds_entry":
         return query.options(defer(work_model.verbose_opds_entry))
     else:
         return query.options(defer(work_model.simple_opds_entry))
Example #10
0
 def get_all(cls, limit=None, offset=0):
     """docstring for get_all"""
     # TODO: Don't fetch ALL date (no need for html and mdfield)
     if limit:
         return cls.query.options(defer('_html')).filter_by(_type=cls._my_type).offset(offset).limit(limit)
     else:
         return cls.query.options(defer('_html')).filter_by(_type=cls._my_type)
Example #11
0
    def node_state_from_database(self, flow_config, node_id):
        # let keep it simple for now
        # return self.node_from_database(flow_config, node_id)

        node = self.session.query(Node).filter_by(id=node_id).options(
            defer("work_data"), defer("user_data")).one()
        logging.info("node_from_database got node %s", node)

        node_mem = flow_config.new_node(node.node_num)
        node_mem.state = node.state
        node_mem.id = node.id
        node_mem.flow_id = node.flow_id

        input_nodes_num_str = node.input_nodes

        node_nums = self._get_node_num(input_nodes_num_str)

        if node_nums:
            nodes = self.session.query(Node).filter(
                Node.flow_id == node.flow_id).filter(
                    Node.node_num.in_(node_nums)).options(
                        defer("work_data"), defer("user_data"))

            for n in nodes:
                print("+++++++", n)
                input_node_mem = flow_config.new_node(n.node_num)
                input_node_mem.state = n.state
                input_node_mem.id = n.id
                input_node_mem.flow_id = n.flow_id

                node_mem.input_nodes.append(input_node_mem)

        return node_mem
Example #12
0
def get_sources_data_fast():
    all_journals = Journal.query.options(defer('api_raw_crossref'),
                                         defer('api_raw_issn')).all()
    all_repos = Repository.query.all()
    all_sources = all_journals + all_repos

    return all_sources
Example #13
0
def get_closest_struc_django(distinct=True):
    nodes = ParameterData.query().with_entities('id')
    struc_type = StructureData._query_type_string

    depth = (sa.session.query(DbPath.depth).filter(
        DbPath.child_id.in_(nodes)).join(DbNode, DbPath.parent).filter(
            DbNode.type.like("{}%".format(struc_type))).order_by(DbPath.depth))

    if distinct:
        depth = depth.distinct()

    depth = depth[0][0]

    q = (DbPath.query.filter(DbPath.child_id.in_(nodes)).join(
        DbNode,
        DbPath.parent).filter(DbNode.type.like(
            "{}%".format(struc_type))).filter(DbPath.depth == depth))

    q = q.distinct()

    q = q.with_entities(DbPath.id)

    res = (StructureData.query().join(DbPath, DbNode.child_paths).filter(
        DbPath.child_id.in_(nodes)).filter(DbPath.id.in_(q)))

    res = res.distinct()

    res = res.order_by(DbNode.ctime)

    if not with_attr:
        res = res.options(defer(DbNode.attributes), defer(DbNode.extras))

    return res.all()
Example #14
0
def farthest_cif_django(distinct=True):
    nodes = ParameterData.query().with_entities('id')
    cif_type = CifData._query_type_string

    depth = (sa.session.query(DbPath.depth).filter(
        DbPath.child_id.in_(nodes)).join(DbNode, DbPath.parent).filter(
            DbNode.type.like("%{}%".format(cif_type))).order_by(
                DbPath.depth.desc()))

    if distinct:
        depth = depth.distinct()

    depth = depth[0][0]

    q = (DbPath.query.filter(DbPath.child_id.in_(nodes)).join(
        DbNode, DbPath.parent).filter(DbNode.type.like(
            "%{}%".format(cif_type))).filter(
                DbPath.depth == depth).distinct().with_entities(DbPath.id))

    res = (DbNode.query.filter(DbNode.type.like("%{}%".format(cif_type))).join(
        DbPath, DbPath.parent_id == DbNode.id).filter(
            DbPath.child_id.in_(nodes)).filter(DbPath.id.in_(q)).options(
                defer(DbNode.attributes), defer(DbNode.extras)).distinct())

    return res.all()
Example #15
0
def set_time(session, submission_id, path_predictions):
    """Set the timing information in the database.

    Parameters
    ----------
    session : :class:`sqlalchemy.orm.Session`
        The session to directly perform the operation on the database.
    submission_id : int
        The id of the submission.
    path_predictions : str
        The path where the results files are located.
    """
    all_cv_folds = (session.query(SubmissionOnCVFold).filter_by(
        submission_id=submission_id).options(defer("full_train_y_pred"),
                                             defer("test_y_pred")).all())
    all_cv_folds = sorted(all_cv_folds, key=lambda x: x.id)
    for fold_id, cv_fold in enumerate(all_cv_folds):
        path_results = os.path.join(path_predictions,
                                    'fold_{}'.format(fold_id))
        results = {}
        for step in ('train', 'valid', 'test'):
            results[step + '_time'] = np.loadtxt(
                os.path.join(path_results, step + '_time')).item()
        for key, value in results.items():
            setattr(cv_fold, key, value)
    session.commit()
Example #16
0
def test_subir_archivo():
    import base64
    from sqlalchemy.orm import defer
    from digesto.model import obtener_session
    from digesto.model.Utils import md5sum
    from digesto.model.DigestoModelLocal import DigestoModelLocal
    from digesto.model.entities.Digesto import Archivo

    with obtener_session() as session:
        a = session.query(Archivo).filter(
            Archivo.nombre == 'prueba.pdf').options(
                defer('contenido')).one_or_none()
        if a:
            session.delete(a)
            session.commit()

    longitud_binaria_original = 0
    longitud_b64_original = 0
    b64_contenido_original = ''
    md5_original = ''
    aid = ''
    with open('test/prueba.pdf', 'rb') as f:
        contenido = f.read()
        ''' guardo variables de control '''
        longitud_binaria_original = len(contenido)
        b64_contenido_original = base64.b64encode(contenido).decode('utf8')
        longitud_b64_original = len(b64_contenido_original)
        md5_original = md5sum(contenido)

        with obtener_session() as session:
            aid = DigestoModelLocal.subir_archivo(session, 'prueba.pdf',
                                                  contenido, 'application/pdf')
            session.commit()

            assert session.query(Archivo).filter(Archivo.id == aid).options(
                defer('contenido')).one_or_none() is not None

    with obtener_session() as session:
        archivo = DigestoModelLocal.obtener_archivo(session, aid)
        assert archivo is not None

        assert md5_original == archivo.hash_
        """ decodifico el contenido y lo guardo en un archivo """
        nombre = f"/tmp/{archivo.nombre}"
        with open(nombre, 'wb') as f:
            b64cont = archivo.contenido
            assert b64_contenido_original == b64cont
            assert len(b64cont) == longitud_b64_original

            contenido = base64.b64decode(archivo.contenido.encode())
            assert len(contenido) == longitud_binaria_original

            f.write(contenido)
        """ chequeo que el hash del archivo guardado sea igual que el de la base """
        with open(nombre, 'rb') as f:
            md5s = md5sum(f.read())
            assert md5s == archivo.hash_
        """ chequeo que el hash original sea igual al calculado del archivo escrito """
        assert md5_original == md5s
    """ limpio la base nuevamente """
Example #17
0
 def GET(self):
     session=database.Session()
     with AutoClose(session) as ac:
         allnews=session.query(News).options(defer(News.content),defer(News.image)).order_by(News.id.desc()).limit(6).all()
         project=session.query(Project).options(defer(Project.content)).order_by(func.random()).first()
         partners=session.query(Partner).all()
         tpl=WebSiteBasePage.jinja2_env.get_template('Index.html')
         return tpl.render(news=allnews,project=project,partners=partners)
Example #18
0
def score_submission(session, submission_id):
    """Score a submission and change its state to 'scored'

    Parameters
    ----------
    session : :class:`sqlalchemy.orm.Session`
        The session to directly perform the operation on the database.
    submission_id : int
        submission id

    Raises
    ------
    ValueError :
        when the state of the submission is not 'tested'
        (only a submission with state 'tested' can be scored)
    """
    submission = select_submission_by_id(session, submission_id)
    if submission.state != 'tested':
        raise ValueError('Submission state must be "tested"'
                         ' to score, not "{}"'.format(submission.state))

    # We are conservative:
    # only score if all stages (train, test, validation)
    # were completed. submission_on_cv_fold compute scores can be called
    # manually if needed for submission in various error states.
    all_cv_folds = (session.query(SubmissionOnCVFold).filter_by(
        submission_id=submission_id).options(defer("full_train_y_pred"),
                                             defer("test_y_pred")).all())
    all_cv_folds = sorted(all_cv_folds, key=lambda x: x.id)
    for submission_on_cv_fold in all_cv_folds:
        submission_on_cv_fold.session = session
        submission_on_cv_fold.compute_train_scores()
        submission_on_cv_fold.compute_valid_scores()
        submission_on_cv_fold.compute_test_scores()
        submission_on_cv_fold.state = 'scored'
    session.commit()
    # TODO: We are not managing the bagged score.
    # submission.compute_test_score_cv_bag(session)
    # submission.compute_valid_score_cv_bag(session)
    # Means and stds were constructed on demand by fetching fold times.
    # It was slow because submission_on_folds contain also possibly large
    # predictions. If postgres solves this issue (which can be tested on
    # the mean and std scores on the private leaderbaord), the
    # corresponding columns (which are now redundant) can be deleted in
    # Submission and this computation can also be deleted.
    submission.train_time_cv_mean = np.mean(
        [ts.train_time for ts in all_cv_folds])
    submission.valid_time_cv_mean = np.mean(
        [ts.valid_time for ts in all_cv_folds])
    submission.test_time_cv_mean = np.mean(
        [ts.test_time for ts in all_cv_folds])
    submission.train_time_cv_std = np.std(
        [ts.train_time for ts in all_cv_folds])
    submission.valid_time_cv_std = np.std(
        [ts.valid_time for ts in all_cv_folds])
    submission.test_time_cv_std = np.std([ts.test_time for ts in all_cv_folds])
    submission.state = 'scored'
    session.commit()
Example #19
0
def get_journal_data(query_string=None):
    journal_meta_query = Journal.query.options(defer('api_raw_crossref'),
                                               defer('api_raw_issn'))
    if query_string:
        journal_meta_query = journal_meta_query.filter(
            or_(Journal.title.ilike(u"%{}%".format(query_string)),
                Journal.publisher.ilike(u"%{}%".format(query_string))))
    journal_meta = journal_meta_query.all()
    return journal_meta
Example #20
0
    def get(self):
        # get the topics
        topics = self.session.query(Topic)
        topics = topics.options(defer(Topic.meta))
        topics = topics.options(defer(Topic.content))
        topics = topics.options(defer(Topic.order))
        topics = topics.order_by(Topic.title)

        # get the news
        news = self.session.query(News)
        news = news.options(defer(News.meta))
        news = news.options(defer(News.content))
        news = news.options(defer(News.order))
        news = news.order_by(News.title)

        # get the forms
        forms = self.session.query(FormDefinition)
        forms = forms.options(defer(FormDefinition.definition))
        forms = forms.options(defer(FormDefinition.checksum))
        forms = forms.order_by(FormDefinition.title)

        # get the resources
        resources = self.session.query(Resource)
        resources = resources.options(defer(Resource.timezone))
        resources = resources.order_by(Resource.title)

        return {
            'topics': topics.all(),
            'news': news.all(),
            'forms': forms.all(),
            'resources': resources.all(),
        }
Example #21
0
File: app.py Project: schaiana/BCD
def gerarProvaOpcoes():
	msgErro=''
	msgSucesso=''
	form = FormGeraProva()
	idDisciplina = request.args['idDisciplina']
	select_assuntos = Assuntos.query.filter_by(idDisciplina=idDisciplina).all()
	assuntos = [(str(x.idAssunto), x.nmAssunto) for x in select_assuntos]
	form.idsAssuntos.choices = assuntos
	if form.validate_on_submit():
		#query inicial com join e filtro de disciplinas
		qyQuestoes = Questoes.query.join(Assuntos).join(Disciplinas).filter_by(idDisciplina=idDisciplina)

		#filtro tipo de questão
		qyQuestoes = qyQuestoes.filter(Questoes.nmTipo==form.tipoQuestao.data)

		#adiciona filtro de assuntos
		if form.idsAssuntos.data:
			qyQuestoes = qyQuestoes.filter(Questoes.idAssunto.in_(form.idsAssuntos.data))

		#adiciona filtro de somente questões novas
		if form.somenteNovas.data == "Sim":
			qyHistorico = Historico.query.options(defer('*')).add_column('idQuestao')
			qyQuestoes = qyQuestoes.filter(Questoes.idQuestao.notin_(qyHistorico))

		#adiciona filtro para excluir questoes utilizadas a partir de dtLimite
		if form.dtLimite.data:
			qyQuestoesExcluir = Historico.query.join(Provas).options(defer('*')).add_column('idQuestao').filter(Provas.dtRealizacao>=form.dtLimite.data)
			qyQuestoes = qyQuestoes.filter(Questoes.idQuestao.notin_(qyQuestoesExcluir))

		qyQuestoes = qyQuestoes.order_by('RANDOM()').limit(form.nQuestoes.data)

		numeroQuestoesEncontradas = qyQuestoes.count()
		if numeroQuestoesEncontradas == 0:
			msgErro = 'Nenhuma questão foi encontrada!'

		else:
			prova = Provas(dtRealizacao=datetime.datetime.now())
			db.session.add(prova)

			#faz o flush para que a prova seja inserida e o objeto prova tenha o idProva
			db.session.flush()

			selectQuestoes = qyQuestoes.all()

			for questao in selectQuestoes:
				inseriHistorico = Historico(idProva=prova.idProva, idQuestao=questao.idQuestao, idAssunto=questao.idAssunto,
				                            idDisciplina=questao.idDisciplina, idUsuario=session.get('idUsuario'))
				db.session.add(inseriHistorico)

			msgSucesso = 'A prova foi gerada com sucesso'
			if numeroQuestoesEncontradas<form.nQuestoes.data:
				msgSucesso = '{}<br>Porém somente {} questões foram encontradas'.format(msgSucesso,
				                                                                        (form.nQuestoes.data-numeroQuestoesEncontradas))
			db.session.commit()
	return render_template('gerarProvaOpcoes.html', title='Listar Assuntos', form=form, assuntos=select_assuntos, msgErro=msgErro, msgSucesso=msgSucesso)
Example #22
0
        def user(request):
            environ = request.environ
            cached = environ.get('auth.user_obj')

            # Check that the cached value is in the current DBSession (and
            # therefore can load fields from DB).
            if cached is not None and cached in DBSession:
                return cached

            # Username, password and token are validated here.
            user_id = request.authenticated_userid

            user = DBSession.query(User).filter(
                (User.id == user_id) if user_id is not None
                else (User.keyname == 'guest'),
            ).options(
                defer(User.description),
                defer(User.password_hash),
                defer(User.oauth_subject),
                defer(User.oauth_tstamp),
            ).one()

            if user.disabled:
                raise UserDisabledException()

            # Update last_activity if more than activity_delta time passed, but
            # only once per request.
            if cached is None:
                # Make locals in order to avoid SA session expiration issues
                user_id, user_la = user.id, user.last_activity

                delta = self.options['activity_delta']
                if (
                    (user_la is None or (datetime.utcnow() - user_la) > delta)
                    and not request.session.get('invite', False)
                ):

                    def update_last_activity(request):
                        with transaction.manager:
                            DBSession.query(User).filter_by(
                                principal_id=user_id,
                                last_activity=user_la,
                            ).update(dict(last_activity=datetime.utcnow()))

                    request.add_finished_callback(update_last_activity)

            # Store essential user details request's environ
            environ['auth.user'] = dict(
                id=user.id, keyname=user.keyname,
                display_name=user.display_name,
                language=user.language)

            environ['auth.user_obj'] = user
            return user
Example #23
0
 def get_user_by_username(username: str) -> User:
     """
     Get a single user from the database based on their username.
     :param username: Username which uniquely identifies the user.
     :return: The result of the database query.
     """
     return User.query\
         .filter_by(username=username)\
         .filter(User.deleted.is_(False))\
         .options(defer('profilepic'), defer('profilepic_name'))\
         .first()
Example #24
0
    def works(self, facets=None, pagination=None):
        """Find Works that will go together in this Lane.

        Works will:

        * Be in one of the languages listed in `languages`,
          and not one of the languages listed in `exclude_languages`.

        * Be filed under of the genres listed in `self.genre_ids` (or, if
          `self.include_subgenres` is True, any of those genres'
          subgenres).

        * Have the same appeal as `self.appeal`, if `self.appeal` is present.

        * Are intended for the audience in `self.audience`.

        * Are fiction (if `self.fiction` is True), or nonfiction (if fiction
          is false), or of the default fiction status for the genre
          (if fiction==FICTION_DEFAULT_FOR_GENRE and all genres have
          the same default fiction status). If fiction==None, no fiction
          restriction is applied.

        * Have a delivery mechanism that can be rendered by the
          default client.

        * Have an unsuppressed license pool.
        """

        q = self._db.query(Work).join(Work.presentation_edition)
        q = q.join(Work.license_pools).join(LicensePool.data_source).join(
            LicensePool.identifier)
        q = q.options(
            contains_eager(Work.license_pools),
            contains_eager(Work.presentation_edition),
            contains_eager(Work.license_pools, LicensePool.data_source),
            contains_eager(Work.license_pools,
                           LicensePool.presentation_edition),
            contains_eager(Work.license_pools, LicensePool.identifier),
            defer(Work.presentation_edition, Edition.extra),
            defer(Work.license_pools, LicensePool.presentation_edition,
                  Edition.extra),
        )
        q = self._defer_unused_opds_entry(q)

        if self.genre_ids:
            q = q.join(Work.work_genres)
            q = q.options(contains_eager(Work.work_genres))
            q = q.filter(WorkGenre.genre_id.in_(self.genre_ids))

        q = self.apply_filters(q, facets, pagination, Work, Edition)
        if not q:
            # apply_filters may return None in subclasses of Lane
            return None
        return q
Example #25
0
 def get_user_by_email(email: str) -> User:
     """
     Get a single user from the database based on their email.
     :param email: Email which uniquely identifies the user.
     :return: The result of the database query.
     """
     return User.query\
         .filter_by(email=email)\
         .filter(User.deleted.is_(False))\
         .options(defer('profilepic'), defer('profilepic_name'))\
         .first()
Example #26
0
def fetch_medicine_list(p_id):
    try:
        session = request_session()
        query = session.query(MedicinePerPatient)
        query = query.filter(MedicinePerPatient.patient_user_id == p_id)
        query = query.options(defer(MedicinePerPatient.intake_state), defer(MedicinePerPatient.intake_time))
        # query = query.filter(Patient.patient_user_id == MedicinePerPatient.patient_user_id).filter(Medicine.medicine_id == MedicinePerPatient.medicine_id)
        result = query.all()
        session.close()
        return result
    except SQLAlchemyError:
        return False
Example #27
0
 def create_filter_query(self, _disable_eager_loads):
     strategy = lazyload if _disable_eager_loads else subqueryload
     return db.session.query(db.Tag) \
         .join(db.TagCategory) \
         .options(
             defer(db.Tag.first_name),
             defer(db.Tag.suggestion_count),
             defer(db.Tag.implication_count),
             defer(db.Tag.post_count),
             strategy(db.Tag.names),
             strategy(db.Tag.suggestions).joinedload(db.Tag.names),
             strategy(db.Tag.implications).joinedload(db.Tag.names))
Example #28
0
def visit_create_json_view(element, ddlcompiler, **kwargs):
    name = element.name
    base_query = element.query
    json_column = element.json_column
    json_schema = element.json_schema
    extract_date_parts = element.extract_date_parts

    columns = []
    properties = get_properties(json_schema)
    element.columns = []
    for p in properties:
        if isinstance(p, Array):
            continue

        column = p.json_func(json_column, p.path)
        column_label = "%s.%s" % (json_column.name, p.path)

        if extract_date_parts and \
           isinstance(p, (DateTime, DateTimeNoTZ, Date)):
            for part in extract_date_parts:
                part_column = func.date_part_immutable(part, column)
                columns.append(part_column.label(
                    "%s_%s" % (column_label, part)))
            p.date_parts = extract_date_parts

        columns.append(column.label(column_label))
        p.column_name = column_label
        element.columns.append(p)

    # Don't include columns in the the view that are part of an == condition in
    # the WHERE clause.
    for where in inspection.inspect(base_query).whereclause:
        if where.operator == operator.eq:
            for expr in [where.left, where.right]:
                if isinstance(expr, Column):
                    # Curiously, we can't seem to defer the Column object
                    # expr._Annotated__element itself.
                    column_name = expr._Annotated__element.name
                    base_query = base_query.options(defer(column_name))

    # Don't include the JSON column in the view.
    base_query = base_query.options(defer(json_column)) 
    query = base_query.add_columns(*columns)

    create_view = CreateView(name, query, replace=element.replace)
    create_indexes = CreateIndexes(
            base_query, columns, drop_existing=element.drop_existing_indexes)

    view_sql = visit_create_view(create_view, ddlcompiler)
    indexes_sql = visit_create_indexes(create_indexes, ddlcompiler)

    return view_sql + "; " + indexes_sql
Example #29
0
 def rebuild(self):
     self.repositories = self.app.install_model.context.current.query(
         self.app.install_model.ToolShedRepository).options(
             defer(self.app.install_model.ToolShedRepository.metadata),
             joinedload('tool_dependencies').
             subqueryload('tool_shed_repository').options(
                 defer(self.app.install_model.ToolShedRepository.metadata)),
         ).all()
     repos_by_tuple = defaultdict(list)
     for repository in self.repositories + self.local_repositories:
         repos_by_tuple[(repository.tool_shed, repository.owner,
                         repository.name)].append(repository)
     self.repos_by_tuple = repos_by_tuple
Example #30
0
    def test_state_change_col_to_deferred(self):
        """Behavioral test to verify the current activity of loader callables."""

        users, User = self.tables.users, self.classes.User


        mapper(User, users)

        sess = create_session()

        # deferred attribute option, gets the LoadDeferredColumns
        # callable
        u1 = sess.query(User).options(defer(User.name)).first()
        assert isinstance(
                    attributes.instance_state(u1).callables['name'],
                    strategies.LoadDeferredColumns
                )

        # expire the attr, it gets the InstanceState callable
        sess.expire(u1, ['name'])
        assert isinstance(
                    attributes.instance_state(u1).callables['name'],
                    state.InstanceState
                )

        # load it, callable is gone
        u1.name
        assert 'name' not in attributes.instance_state(u1).callables

        # same for expire all
        sess.expunge_all()
        u1 = sess.query(User).options(defer(User.name)).first()
        sess.expire(u1)
        assert isinstance(
                    attributes.instance_state(u1).callables['name'],
                    state.InstanceState
                )

        # load over it.  everything normal.
        sess.query(User).first()
        assert 'name' not in attributes.instance_state(u1).callables

        sess.expunge_all()
        u1 = sess.query(User).first()
        # for non present, still expires the same way
        del u1.name
        sess.expire(u1)
        assert 'name' in attributes.instance_state(u1).callables
Example #31
0
    def test_state_change_col_to_deferred(self):
        """Behavioral test to verify the current activity of loader callables."""

        users, User = self.tables.users, self.classes.User


        mapper(User, users)

        sess = create_session()

        # deferred attribute option, gets the LoadDeferredColumns
        # callable
        u1 = sess.query(User).options(defer(User.name)).first()
        assert isinstance(
                    attributes.instance_state(u1).callables['name'],
                    strategies.LoadDeferredColumns
                )

        # expire the attr, it gets the InstanceState callable
        sess.expire(u1, ['name'])
        assert isinstance(
                    attributes.instance_state(u1).callables['name'],
                    state.InstanceState
                )

        # load it, callable is gone
        u1.name
        assert 'name' not in attributes.instance_state(u1).callables

        # same for expire all
        sess.expunge_all()
        u1 = sess.query(User).options(defer(User.name)).first()
        sess.expire(u1)
        assert isinstance(
                    attributes.instance_state(u1).callables['name'],
                    state.InstanceState
                )

        # load over it.  everything normal.
        sess.query(User).first()
        assert 'name' not in attributes.instance_state(u1).callables

        sess.expunge_all()
        u1 = sess.query(User).first()
        # for non present, still expires the same way
        del u1.name
        sess.expire(u1)
        assert 'name' in attributes.instance_state(u1).callables
Example #32
0
    def get(self):
        parser = reqparse.RequestParser()
        parser.add_argument('family', type=str)
        parser.add_argument('format', type=str)
        parser.add_argument('element', type=str, action="append")
        args = parser.parse_args()

        # never load the pseudo column for converted_from entries
        # (would take much more time and memory)
        q = (Pseudopotential.query
             .options((joinedload("converted_from")
                       .defer('pseudo'))))

        if args['family']:
            q = (q.join(PseudopotentialFamily)
                  .filter(PseudopotentialFamily.name == args['family']))

        if args['format']:
            q = q.filter_by(format=args['format'])

        if args['element']:
            q = q.filter(Pseudopotential.element.in_(args['element']))

        if any(args.values()):
            return [marshal(p, pseudo_resource_fields) for p in q]
        else:
            # defer loading of the pseudo column for the primary
            # pseudo as well (reduces loading time by 80%)
            q = q.options(defer('pseudo'))
            return [marshal(p, pseudo_list_fields) for p in q]
Example #33
0
    def test_path_entity(self):
        """test the legacy *addl_attrs argument."""

        User = self.classes.User
        Order = self.classes.Order
        Item = self.classes.Item

        users = self.tables.users
        orders = self.tables.orders
        items = self.tables.items
        order_items = self.tables.order_items

        mapper(User, users, properties={
                "orders": relationship(Order, lazy="joined")
            })
        mapper(Order, orders, properties={
                "items": relationship(Item, secondary=order_items, lazy="joined")
            })
        mapper(Item, items)

        sess = create_session()

        exp = ("SELECT users.id AS users_id, users.name AS users_name, "
            "items_1.id AS items_1_id, orders_1.id AS orders_1_id, "
            "orders_1.user_id AS orders_1_user_id, orders_1.address_id "
            "AS orders_1_address_id, orders_1.description AS "
            "orders_1_description, orders_1.isopen AS orders_1_isopen "
            "FROM users LEFT OUTER JOIN orders AS orders_1 "
            "ON users.id = orders_1.user_id LEFT OUTER JOIN "
            "(order_items AS order_items_1 JOIN items AS items_1 "
                "ON items_1.id = order_items_1.item_id) "
            "ON orders_1.id = order_items_1.order_id")

        q = sess.query(User).options(defer(User.orders, Order.items, Item.description))
        self.assert_compile(q, exp)
Example #34
0
    def test_options(self):
        """Options on a mapper to create deferred and undeferred columns"""

        orders, Order = self.tables.orders, self.classes.Order

        mapper(Order, orders)

        sess = create_session()
        q = sess.query(Order).order_by(Order.id).options(defer('user_id'))

        def go():
            q.all()[0].user_id

        self.sql_eq_(go, [("SELECT orders.id AS orders_id, "
                           "orders.address_id AS orders_address_id, "
                           "orders.description AS orders_description, "
                           "orders.isopen AS orders_isopen "
                           "FROM orders ORDER BY orders.id", {}),
                          ("SELECT orders.user_id AS orders_user_id "
                           "FROM orders WHERE orders.id = :param_1", {
                               'param_1': 1
                           })])
        sess.expunge_all()

        q2 = q.options(undefer('user_id'))
        self.sql_eq_(q2.all, [("SELECT orders.id AS orders_id, "
                               "orders.user_id AS orders_user_id, "
                               "orders.address_id AS orders_address_id, "
                               "orders.description AS orders_description, "
                               "orders.isopen AS orders_isopen "
                               "FROM orders ORDER BY orders.id", {})])
Example #35
0
    def _build_list_query(self, params, **kwargs):
        # TODO: What about joins?
        cursor = self._set_list_query_opts(self.Model.query, params)
        filter_params = self._prepare_filter_params(params)
        filter_params.update(kwargs)
        fields = self._get_show_fields(params)
        if fields:
            opts = []
            for field in self.Model.__table__.columns.keys():
                if field in fields or field in ('id', ):
                    opts.append(undefer(getattr(self.Model, field)))
                else:
                    opts.append(defer(getattr(self.Model, field)))
            relation_properties = filter(
                lambda p: isinstance(p, properties.RelationshipProperty),
                self.Model.__mapper__.iterate_properties)
            for field in relation_properties:
                if field.key in fields:
                    cursor = cursor.options(
                        joinedload_all(getattr(self.Model, field.key)))
            if opts:
                cursor = cursor.options(*opts)

        for name, val in filter_params.iteritems():
            fltr = self._build_query_item(name, val)
            if fltr is not None:
                cursor = cursor.filter(fltr)
        return cursor
Example #36
0
def _select(model, *fields):
    pk_columns = _primary_key_names(model)
    all_columns = _get_columns(model).keys()
    relations = _get_relations(model).keys()

    fields = list(set(fields)) if fields else all_columns

    # select all column properties if none is specified
    for attr in fields:
        if attr in all_columns:
            break
    else:
        fields.extend(all_columns)

    options = []

    # ensure PKs are included and defer unrequested attributes (including related)
    # NB: we intentionally allows fields like "related.attribute" to pass through

    for attr in (c.key for c in _get_mapper(model).iterate_properties):
        if attr not in fields:
            if attr in pk_columns:
                fields.append(attr)
            elif attr in all_columns:
                options.append(defer(attr))
            # relationships
            elif attr in relations:
                options.append(lazyload(attr))
    return options
Example #37
0
    def test_path_entity(self):
        """test the legacy *addl_attrs argument."""

        User = self.classes.User
        Order = self.classes.Order
        Item = self.classes.Item

        users = self.tables.users
        orders = self.tables.orders
        items = self.tables.items
        order_items = self.tables.order_items

        mapper(User, users, properties={
                "orders": relationship(Order, lazy="joined")
            })
        mapper(Order, orders, properties={
                "items": relationship(Item, secondary=order_items, lazy="joined")
            })
        mapper(Item, items)

        sess = create_session()

        exp = ("SELECT users.id AS users_id, users.name AS users_name, "
            "items_1.id AS items_1_id, orders_1.id AS orders_1_id, "
            "orders_1.user_id AS orders_1_user_id, orders_1.address_id "
            "AS orders_1_address_id, orders_1.description AS "
            "orders_1_description, orders_1.isopen AS orders_1_isopen "
            "FROM users LEFT OUTER JOIN orders AS orders_1 "
            "ON users.id = orders_1.user_id LEFT OUTER JOIN "
            "(order_items AS order_items_1 JOIN items AS items_1 "
                "ON items_1.id = order_items_1.item_id) "
            "ON orders_1.id = order_items_1.order_id")

        q = sess.query(User).options(defer(User.orders, Order.items, Item.description))
        self.assert_compile(q, exp)
Example #38
0
    def test_options(self):
        """Options on a mapper to create deferred and undeferred columns"""

        orders, Order = self.tables.orders, self.classes.Order


        mapper(Order, orders)

        sess = create_session()
        q = sess.query(Order).order_by(Order.id).options(defer('user_id'))

        def go():
            q.all()[0].user_id

        self.sql_eq_(go, [
            ("SELECT orders.id AS orders_id, "
             "orders.address_id AS orders_address_id, "
             "orders.description AS orders_description, "
             "orders.isopen AS orders_isopen "
             "FROM orders ORDER BY orders.id", {}),
            ("SELECT orders.user_id AS orders_user_id "
             "FROM orders WHERE orders.id = :param_1",
             {'param_1':1})])
        sess.expunge_all()

        q2 = q.options(undefer('user_id'))
        self.sql_eq_(q2.all, [
            ("SELECT orders.id AS orders_id, "
             "orders.user_id AS orders_user_id, "
             "orders.address_id AS orders_address_id, "
             "orders.description AS orders_description, "
             "orders.isopen AS orders_isopen "
             "FROM orders ORDER BY orders.id",
             {})])
Example #39
0
 def resolve_tx(self, info):
     return (
         db.session.query(Event)
         .options(defer(Event.data))
         .filter_by(block_id=self.block_id, log_index=self.log_index)
         .one()
     )
Example #40
0
    def get(self):
        session = self.db_session
        # query demo
        user_obj = session.query(User) \
            .options(defer(User.email), lazyload("address_obj_s").load_only("name")).first()
        user_name = self.login_user.email if self.login_user else user_obj.name

        if user_obj:
            if self.login_user:
                self.render_template(DASHBOARD_DIR_NAME, 'page_user_dashboard.html', name=user_name,
                                     address_obj_s=user_obj.address_obj_s)
            else:
                self.render_template(DASHBOARD_DIR_NAME, 'page_dashboard.html', name=user_name,
                                     address_obj_s=user_obj.address_obj_s)
        else:
            obj = User(name="jiang", email="*****@*****.**")
            session.add(obj)
            session.flush()
            address_1 = Address(name="address name 1", address="Rd 1", user_id=obj.id)
            address_2 = Address(name="address name 2", address="Rd 2", user_id=obj.id)
            session.add(address_1)
            session.add(address_2)
            session.commit()
            self.render_template(DASHBOARD_DIR_NAME, 'page_dashboard.html', name=user_name,
                                 address_obj_s=obj.address_obj_s)
Example #41
0
 def test_defer_many_cols(self):
     # with [ticket:2778], this goes from 50805 to 32817,
     # as it should be fewer function calls than the baseline
     A = self.classes.A
     s = Session()
     s.query(A).options(
         *[defer(letter) for letter in ['x', 'y', 'z', 'p', 'q', 'r']]).\
         all()
Example #42
0
 def test_defer_many_cols(self):
     # with [ticket:2778], this goes from 50805 to 32817,
     # as it should be fewer function calls than the baseline
     A = self.classes.A
     s = Session()
     s.query(A).options(
         *[defer(letter) for letter in ["x", "y", "z", "p", "q", "r"]]
     ).all()
Example #43
0
    def test_deref(self):
        trans = DBSession.query(Transaction).options(
            subqueryload(Transaction.incomeTagGroup).subqueryload(TagGroup.tags),
            subqueryload(Transaction.expenseTagGroup).subqueryload(TagGroup.tags)
        ).options(defer('_user_id'))

        tran = trans.first()
        print(tran)
        print(json.dumps({'fsd': tran.incomeTagGroup.tags[0]}))
 def test_subclass_option_pathing(self):
     from sqlalchemy.orm import defer
     sess = create_session()
     dilbert = sess.query(Person).\
             options(defer(Engineer.machines, Machine.name)).\
             filter(Person.name == 'dilbert').first()
     m = dilbert.machines[0]
     assert 'name' not in m.__dict__
     eq_(m.name, 'IBM ThinkPad')
    def dynamic_field_data(self, dynamic_field_id, field_definition_id):
        dynamic_field = self.session.query(models.Fields).filter_by(id=dynamic_field_id).all()

        dynamic_table = util.get_table(dynamic_field.foreign_key_table_object_id, 'id')

        return (self.session.query(dynamic_table).
                options(defer('id','name','order','description','date_created','last_modified','active',
                              'organization_id','display_name')).
                filter_by(id=field_definition_id).first())
Example #46
0
def _user_has_boards(self):
    # Cached version of User.boards()
    key = 'user/board/count/' + str(self.id)
    count = cache.get(key)
    if not count:
        count = Board.query.filter(
            Board.userid.in_(self.user_organizations_owned_ids())).options(
            defer(Board.description)).count()
        cache.set(key, count, timeout=300)
    return bool(count)
Example #47
0
    def test_defer_on_wildcard_subclass(self):
        # pretty much the same as load_only except doesn't
        # exclude the primary key

        s = Session()
        q = s.query(Manager).options(
            defer(".*"), undefer("status"))
        self.assert_compile(
            q,
            "SELECT managers.status AS managers_status "
            "FROM people JOIN managers ON "
            "people.person_id = managers.person_id ORDER BY people.person_id"
        )
Example #48
0
 def test_defer_super_name_on_subclass(self):
     s = Session()
     q = s.query(Manager).options(defer("name"))
     self.assert_compile(
         q,
         "SELECT managers.person_id AS managers_person_id, "
         "people.person_id AS people_person_id, "
         "people.company_id AS people_company_id, "
         "people.type AS people_type, managers.status AS managers_status, "
         "managers.manager_name AS managers_manager_name "
         "FROM people JOIN managers "
         "ON people.person_id = managers.person_id "
         "ORDER BY people.person_id"
     )
Example #49
0
def query_model(session, sa_class, only_pk=False):
    """
    Returns a query for *sa_class* that doesn't load any relationship
    attribute.
    """
    opts = (noload('*'),)
    if only_pk:
        pk = get_pk(sa_class)
        opts += tuple(
            defer(prop.key)
            for prop in class_mapper(sa_class).iterate_properties
            if isinstance(prop, ColumnProperty)
            if prop.key != pk)
    return session.query(sa_class).options(*opts)
Example #50
0
def login():
    form = LoginForm(request.form)
    if form.validate_on_submit():
        try:
            user = User.query.filter(User.email == form.email).options(defer("_password")).one()
        except NoResultFound:
            flash("No such email exists.")
        else:
            if user.match_password(form.password):
                login_user(user)
                flash("You are successfully logged in.")
                return redirect(url_for("home.home"))
            else:
                flash("Wrong email/password combination. Try again!")
    if current_user.is_authenticated():
        flash("You are already logged in.")
        redirect("/")
    return render_template("users/login.html", form=form)
Example #51
0
def committee_meeting_list(id):
    cte = Committee.query.get(id)
    if not cte:
        abort(404)

    query = CommitteeMeeting.query.filter(CommitteeMeeting.committee == cte).order_by(desc(CommitteeMeeting.date))

    # defer some expensive fields if they're not needed
    fields = get_api_fields()
    if fields:
        for f in ['body', 'summary']:
            if f not in fields:
                query = query.options(defer(f))

        if not any(f == 'committee' or f.startswith('committee.') for f in fields):
            query = query.options(noload('committee'))

    return api_list_items(query, CommitteeMeetingSchema)
Example #52
0
def create_dictionaries(discussion_id=None):
    db = Discussion.default_db
    by_main_lang = defaultdict(list)
    default_locales = get_config().get(
        'available_languages', 'fr_CA en_CA').split()
    only_for_lang = None
    for d_id, locales in db.query(
            Discussion.id, Discussion.preferred_locales).all():
        locales = locales.split() if locales else default_locales
        main_lang = locales[0].split('_')[0]
        by_main_lang[main_lang].append(d_id)
        if discussion_id == d_id:
            only_for_lang = main_lang
    for lang, discussion_ids in by_main_lang.iteritems():
        if only_for_lang and only_for_lang != lang:
            continue
        dirname = join(nlp_data, lang)
        if not exists(dirname):
            makedirs(dirname)
        corpus_fname = join(dirname, CORPUS_FNAME)
        if exists(corpus_fname):
            corpus = IdMmCorpus(corpus_fname)
            doc_count = db.query(Content).with_polymorphic(
                Content).options(defer(Content.like_count)).join(
                Discussion).filter(Discussion.id.in_(discussion_ids)
                                   ).count()
            if corpus.num_docs == doc_count:
                if only_for_lang:
                    return corpus
                continue
        tokenizer = Tokenizer(lang)
        bowizer = BOWizer(lang, tokenizer, False)
        posts = db.query(Content).join(Discussion).filter(
            Discussion.id.in_(discussion_ids))
        bowizer.phrases.add_vocab((
            tokenizer.tokenize_post(post) for post in posts))
        bowizer.dictionary.add_documents((
            bowizer.phrases[tokenizer.tokenize_post(post)]
            for post in posts))
        IdMmCorpus.serialize(corpus_fname, (
            (post.id, bowizer.post_to_bow(post))
            for post in posts))
        bowizer.save()
        return IdMmCorpus(corpus_fname)
def _select_options(model, *fields):
    """Projects given columns to be included in query output
    """
    pk_columns = set(_get_primary_keys(model))
    all_columns = set(_get_columns(model))
    relations = set(_get_relations(model))

    fields = set(fields) | pk_columns if fields else all_columns
    options = []

    # include PKs and defer unrequested attributes (including related)
    # NB: intentionally allows fields like "related.attribute" to pass through

    for key in (all_columns - fields):
        options.append(defer(getattr(model, key)))
    for key in (relations & fields):
        options.append(eagerload(getattr(model, key)))

    return options
Example #54
0
def defer_except(query, columns):
    """
    Deferred loads all columns in given query, except the ones given.

    This function is very useful when working with models with myriad of
    columns and you want to deferred load many columns.

        >>> from sqlalchemy_utils import defer_except
        >>> query = session.query(Article)
        >>> query = defer_except(Article, [Article.id, Article.name])

    :param columns: columns not to deferred load
    """
    model = query._entities[0].entity_zero.class_
    for property_ in inspect(model).attrs:
        if isinstance(property_, ColumnProperty):
            column = property_.columns[0]
            if column.name not in columns:
                query = query.options(defer(property_.key))
    return query
Example #55
0
 def create_filter_query(self, disable_eager_loads):
     strategy = lazyload if disable_eager_loads else subqueryload
     return db.session.query(db.Post) \
         .options(
             lazyload('*'),
             # use config optimized for official client
             # defer(db.Post.score),
             # defer(db.Post.favorite_count),
             # defer(db.Post.comment_count),
             defer(db.Post.last_favorite_time),
             defer(db.Post.feature_count),
             defer(db.Post.last_feature_time),
             defer(db.Post.last_comment_creation_time),
             defer(db.Post.last_comment_edit_time),
             defer(db.Post.note_count),
             defer(db.Post.tag_count),
             strategy(db.Post.tags).subqueryload(db.Tag.names),
             strategy(db.Post.tags).defer(db.Tag.post_count),
             strategy(db.Post.tags).lazyload(db.Tag.implications),
             strategy(db.Post.tags).lazyload(db.Tag.suggestions))
Example #56
0
def get_plate(json):
    ids = extract_ids(json)
    plates = []
    wells = []
    channels = []
    for plate_id in ids:
        p = Plate.query.get(plate_id)
        if p:
            plates.append(p)
            wells += p.wells
            channels += Channel.query.options(defer('data')).join(Well).join(Plate).filter(Plate.id == plate_id).all()
        else:
            socketio.emit('get_failed', {PLATE_NAMESPACE: [plate_id]}, namespace=make_namespace(PLATE_NAMESPACE))

    well_dump = well_list_schema.dumps(wells, many=True, separators=(',', ':'))
    emit_list(WELL_NAMESPACE, well_dump)

    channel_dump = channel_list_schema.dumps(channels, many=True, separators=(',', ':'))
    emit_list(CHANNEL_NAMESPACE, channel_dump)

    plate_dump = plate_schema.dumps(plates, many=True, separators=(',', ':'))
    emit_get(PLATE_NAMESPACE, plate_dump)
Example #57
0
def _user_boards(self):
    return Board.query.filter(
        Board.userid.in_(self.user_organizations_owned_ids())).options(
        defer(Board.description)).all()
Example #58
0
 def defer_all_but(cls, *names):
     names = names + ('id',)
     columns = [x.name for x in cls.__table__.columns]
     return [defer(x) for x in columns if x not in names]
Example #59
0
    def render(self, session, **arguments):
        bunker_bucket = {None: None}
        rack_bucket = defaultdict(dict)

        # Cache information for faster access later
        for bunker in session.query(Bunker).options(subqueryload('parents')):
            if "." not in bunker.name:
                continue
            bucket, building = bunker.name.split(".", 1)  # pylint: disable=W0612
            bunker_bucket[bunker] = bucket.upper()

        q = session.query(Building).options(subqueryload('parents'))
        buildings = q.all()  # pylint: disable=W0612

        def_env = NetworkEnvironment.get_unique_or_default(session)

        HwRack = aliased(Rack)
        NetLoc = aliased(Location)

        # Query pairs of (rack, network location used inside the rack)
        q = session.query(HwRack, NetLoc)
        q = q.filter(HardwareEntity.location_id == HwRack.id)
        q = q.filter(HardwareEntity.model_id == Model.id)
        q = q.filter(Model.model_type != VirtualMachineType.VirtualMachine)
        q = q.filter(Model.model_type != VirtualMachineType.VirtualAppliance)
        q = q.filter(Interface.hardware_entity_id == HardwareEntity.id)
        q = q.filter(AddressAssignment.interface_id == Interface.id)
        q = q.filter(AddressAssignment.network_id == Network.id)
        q = q.filter(Network.network_environment == def_env)
        q = q.filter(Network.location_id == NetLoc.id)
        q = q.options(defer(HwRack.comments),
                      defer(HwRack.fullname),
                      defer(HwRack.default_dns_domain_id),
                      defer(HwRack.rack_row),
                      defer(HwRack.rack_column),
                      joinedload(HwRack.parents),
                      defer(NetLoc.comments),
                      defer(NetLoc.fullname),
                      defer(NetLoc.default_dns_domain_id))
        q = q.distinct()

        rack_bucket = defaultdict(set)
        for rack, net_loc in q:
            bucket = bunker_bucket[net_loc.bunker]
            rack_bucket[rack].add(bucket)

        violation_ids = []
        updates = []
        for rack in sorted(rack_bucket.keys(), key=attrgetter("name")):
            buckets = rack_bucket[rack]
            if len(buckets) > 1:
                violation_ids.append(rack.id)
                continue

            bucket = buckets.pop()
            if bucket:
                bunker = bucket.lower() + "." + rack.building.name
                if not rack.bunker or rack.bunker.name != bunker:
                    updates.append("aq update rack --rack %s --bunker %s" %
                                   (rack, bunker))
            elif rack.bunker:
                if rack.room:
                    new_parent = "--room %s" % rack.room.name
                else:
                    new_parent = "--building %s" % rack.building.name
                updates.append("aq update rack --rack %s %s" %
                               (rack, new_parent))

        # Take a closer look at racks using networks from multiple buckets.
        # Load all the address assignments so we can give a detailed report.
        q = session.query(AddressAssignment)
        q = q.join(Network)
        q = q.filter_by(network_environment=def_env)
        q = q.reset_joinpoint()
        q = q.join(Interface, HardwareEntity, Model)
        q = q.filter(Model.model_type != VirtualMachineType.VirtualMachine)
        q = q.options(defer('service_address_id'),
                      contains_eager('network'),
                      defer('network.cidr'),
                      defer('network.name'),
                      defer('network.ip'),
                      defer('network.side'),
                      contains_eager('interface'),
                      defer('interface.mac'),
                      defer('interface.port_group'),
                      defer('interface.model_id'),
                      defer('interface.bootable'),
                      defer('interface.default_route'),
                      defer('interface.master_id'),
                      defer('interface.comments'),
                      contains_eager('interface.hardware_entity'),
                      defer('interface.hardware_entity.comments'),
                      defer('interface.hardware_entity.model_id'),
                      defer('interface.hardware_entity.serial_no'))
        q = q.filter(HardwareEntity.location_id.in_(violation_ids))

        addr_by_rack = defaultdict(dict)
        for addr in q:
            hw_rack = addr.interface.hardware_entity.location
            net_bucket = bunker_bucket[addr.network.location.bunker]
            if net_bucket not in addr_by_rack[hw_rack]:
                addr_by_rack[hw_rack][net_bucket] = []
            addr_by_rack[hw_rack][net_bucket].append(addr)

        errors = []
        for rack_id in violation_ids:
            rack = session.query(Rack).get((rack_id,))
            rack_bucket = bunker_bucket[rack.bunker]
            buckets = addr_by_rack[rack]
            if rack_bucket:
                errors.append("Warning: {0} is part of {1:l}, but also "
                              "has networks from:".format(rack, rack.bunker))
            else:
                errors.append("Warning: {0} is not part of a bunker, but "
                              "it uses bunkerized networks:".format(rack))
            for bucket in sorted(buckets.keys()):
                if bucket == rack_bucket:
                    continue
                hws = ["%s/%s" % (addr.interface.hardware_entity.printable_name,
                                  addr.interface.name)
                       for addr in buckets[bucket]]
                hws = list(set(hws))
                hws.sort()
                names = ", ".join(hws)
                if bucket is None:
                    bucket = "(No bucket)"
                errors.append("    {0}: {1}".format(bucket, names))
            errors.append("")

        result = "\n".join(errors)
        if updates:
            result += "\n\nRacks having incorrect bunker membership:\n\n"
            result += "\n".join(updates)
        return result
Example #60
0
 def custom_draft_in(cls, campaign):
     return cls.query.filter(cls.campaign == campaign,
         cls.draft != None,  # NOQA
         cls.__table__.c.template_text != None).options(
             defer('created_at'), defer('updated_at'), defer('email'), defer('md5sum'),
             defer('fullname'), defer('firstname'), defer('lastname'), defer('data'),
             defer('opentoken'), defer('opened'), defer('rsvptoken'), defer('rsvp'),
             defer('linkgroup'), defer('nickname')
         ).all()