예제 #1
0
  def _generate_stmt(self, id) :
    font_name = self.font_name
    font_size = self.font_size
    dots = self.dots
    glyph_box = self.glyph_box
    dot_padding = self.dot_padding
    dot_width = self.dot_width

    dot_padding_default = dot_padding.alias(name='dot_padding_default')

    dots_to_dot_widths = select([
      dots.c.id.label('id'),
      case([(dots.c.val == 0, 0.0)], else_ = ((glyph_box.c.width * font_size.c.val * dots.c.val / 20.0) + 
        case([(dot_padding.c.val != None, dot_padding.c.val)] , else_ = dot_padding_default.c.val) * (dots.c.val - 1)
      )).label('val')
    ]).select_from(dots.outerjoin(dot_padding, onclause = dots.c.id == dot_padding.c.id)).\
       where(safe_eq_comp(dots.c.id, id)).\
        where(and_(dot_padding_default.c.id == -1,
                  dots.c.id == font_name.c.id,
                  dots.c.id == font_size.c.id,
                  font_name.c.val == glyph_box.c.name,
                  glyph_box.c.unicode == "U+E1E7")).\
    cte(name='dots_to_dot_widths')

    self.register_stmt(dots_to_dot_widths)

    self.insert = simple_insert(dot_width, dots_to_dot_widths)
예제 #2
0
def hourly_query_split(*stations):
    q, dt = hourly_base_query()

    for s in stations:
        gage_name = s.station_name_web
        navd88correction = s.convert_to_navd88_feet
        dry_value = s.dry_elevation

        flag, val = hourly_columns(gage_name,
                                   dry_value,
                                   navd88correction=navd88correction)

        # split to 3 columns: Observed, Estimated, Dry. Missing is just missing.
        q = q.column(
            expression.case(value=flag, whens={
                'O': val
            }, else_=None).label(gage_name))
        q = q.column(
            expression.case(value=flag, whens={
                'E': val
            }, else_=None).label(gage_name + " est"))
        q = q.column(
            expression.case(value=flag, whens={
                'D': val
            }, else_=None).label(gage_name + " dry"))
    return q, dt
    def birth_time(cls):
        hour = cast(func.extract("hour", cls.birth_datetime), String)
        minute = cast(func.extract("minute", cls.birth_datetime), String)

        hour = case([(func.length(hour) == 1, "0" + hour)], else_=hour)
        minute = case([(func.length(minute) == 1, "0" + minute)], else_=minute)
        return hour + ":" + minute
예제 #4
0
  def _generate_stmt(self, id) :
    dynamic = self.dynamic
    anchor_x = self.anchor_x
    note_box = self.note_box
    dynamic_direction = self.dynamic_direction
    dynamic_padding = self.dynamic_padding
    staff_position = self.staff_position
    from_anchor_x = self.from_anchor_x
    
    dynamic_padding_default = dynamic_padding.alias(name='dynamic_padding_default')

    dynamic_to_staff_position = select([
      dynamic.c.id.label('id'),
      # ughh...2.0 magic number for staff
      ((dynamic_direction.c.val * case([(dynamic_padding.c.val != None, dynamic_padding.c.val)], else_=dynamic_padding_default.c.val)) +
        case([(dynamic_direction.c.val == 1,
               sql_min_max([note_box.c.y + note_box.c.height, 2.0], True)),
            (dynamic_direction.c.val == -1,
               sql_min_max([note_box.c.y, -2.0], False))])).label('val')
    ]).select_from(dynamic.outerjoin(dynamic_padding, onclause = dynamic.c.id == dynamic_padding.c.id)).\
       where(safe_eq_comp(note_box.c.id if from_anchor_x else dynamic.c.id, id)).\
       where(anchor_x.c.id == dynamic.c.id).\
       where(note_box.c.id == anchor_x.c.val).\
       where(dynamic_padding_default.c.id == -1).\
       where(dynamic.c.id == dynamic_direction.c.id).\
    cte(name='dynamic_to_staff_position')

    self.register_stmt(dynamic_to_staff_position)

    self.insert = simple_insert(staff_position, dynamic_to_staff_position)
  def _generate_stmt(self, id) :
    font_name = self.font_name
    font_size = self.font_size
    unicode = self.unicode
    glyph_box = self.glyph_box
    glyph_stencil = self.glyph_stencil
    alignment_directive = self.alignment_directive
    writer = self.writer
    extra_eq = self.extra_eq
    
    generics_to_stencils = select([
      font_name.c.id.label('id'),
      literal(writer).label('writer'),
      literal(0).label('sub_id'),
      font_name.c.val.label('font_name'),
      font_size.c.val.label('font_size'),
      unicode.c.val.label('unicode'),
      case([(alignment_directive.c.x != None, glyph_box.c.x + (alignment_directive.c.x * glyph_box.c.width))], else_=0).label('x'),
      case([(alignment_directive.c.y != None, glyph_box.c.y + glyph_box.c.height - (alignment_directive.c.y * glyph_box.c.height))], else_=0).label('y'),
    ]).select_from(font_name.outerjoin(alignment_directive, onclause = alignment_directive.c.id == font_name.c.id)).\
        where(safe_eq_comp(font_name.c.id, id)).\
        where(and_(glyph_box.c.name == font_name.c.val,
                  glyph_box.c.unicode == unicode.c.val,
                  font_name.c.id == font_size.c.id,
                  font_name.c.id == unicode.c.id,
                  *extra_eq
                  )).\
    cte(name='generics_to_stencils')

    self.register_stmt(generics_to_stencils)

    self.insert = simple_insert(glyph_stencil, generics_to_stencils)
예제 #6
0
  def pushDB(self, db, sensor_id, parent_domain_id):
    q = db.query(Domain.domain_id)
    q = q.filter(and_(Domain.parent_domain_id == parent_domain_id, Domain.domain_name == self.name))
    res = q.all()

    if len(res) == 0:
      o = Domain()
      o.domain_name = self.name
      o.parent_domain_id = parent_domain_id
      db.add(o)
      db.flush()

      self.oid = o.domain_id

      sd = Sensor_Domain()
      sd.domain_id = self.oid
      sd.sensor_id = sensor_id
      sd.first_seen = self.fs
      sd.last_seen = self.ls

      db.add(sd)
      db.flush()
    else:
      self.oid = res[0][0]

      q = db.query(Sensor_Domain)
      q = q.filter(and_(Sensor_Domain.domain_id == self.oid, Sensor_Domain.sensor_id == sensor_id))
      q.update({Sensor_Domain.first_seen: case([(Sensor_Domain.first_seen > self.fs, self.fs)], else_=Sensor_Domain.first_seen),
                Sensor_Domain.last_seen: case([(Sensor_Domain.last_seen < self.ls, self.ls)], else_=Sensor_Domain.last_seen)}, synchronize_session=False)

    return self.oid
예제 #7
0
def _get_orderby_clauses(order_by_list, session):
    """Sorts a set of runs based on their natural ordering and an overriding set of order_bys.
    Runs are naturally ordered first by start time descending, then by run id for tie-breaking.
    """

    clauses = []
    ordering_joins = []
    clause_id = 0
    # contrary to filters, it is not easily feasible to separately handle sorting
    # on attributes and on joined tables as we must keep all clauses in the same order
    if order_by_list:
        for order_by_clause in order_by_list:
            clause_id += 1
            (key_type, key, ascending) = SearchUtils.parse_order_by_for_search_runs(order_by_clause)
            if SearchUtils.is_attribute(key_type, "="):
                order_value = getattr(SqlRun, SqlRun.get_attribute_name(key))
            else:
                if SearchUtils.is_metric(key_type, "="):  # any valid comparator
                    entity = SqlLatestMetric
                elif SearchUtils.is_tag(key_type, "="):
                    entity = SqlTag
                elif SearchUtils.is_param(key_type, "="):
                    entity = SqlParam
                else:
                    raise MlflowException(
                        "Invalid identifier type '%s'" % key_type,
                        error_code=INVALID_PARAMETER_VALUE,
                    )

                # build a subquery first because we will join it in the main request so that the
                # metric we want to sort on is available when we apply the sorting clause
                subquery = session.query(entity).filter(entity.key == key).subquery()

                ordering_joins.append(subquery)
                order_value = subquery.c.value

            # sqlite does not support NULLS LAST expression, so we sort first by
            # presence of the field (and is_nan for metrics), then by actual value
            # As the subqueries are created independently and used later in the
            # same main query, the CASE WHEN columns need to have unique names to
            # avoid ambiguity
            if SearchUtils.is_metric(key_type, "="):
                clauses.append(
                    sql.case(
                        [(subquery.c.is_nan.is_(True), 1), (order_value.is_(None), 1)], else_=0
                    ).label("clause_%s" % clause_id)
                )
            else:  # other entities do not have an 'is_nan' field
                clauses.append(
                    sql.case([(order_value.is_(None), 1)], else_=0).label("clause_%s" % clause_id)
                )

            if ascending:
                clauses.append(order_value)
            else:
                clauses.append(order_value.desc())

    clauses.append(SqlRun.start_time.desc())
    clauses.append(SqlRun.run_uuid)
    return clauses, ordering_joins
예제 #8
0
파일: sortclause.py 프로젝트: encukou/qdex
 def sort(self, builder):
     column = self.column
     translationClass = column.translationClass
     attr = column.attr
     whens = []
     for language in column.languages:
         key = ('translation', translationClass, language)
         onFactory = lambda aliasedTable: and_(
                 aliasedTable.foreign_id == builder.mappedClass.id,
                 aliasedTable.local_language == language,
             )
         aliasedTable = builder.joinOn(key, onFactory, translationClass)
         aliasedColumn = getattr(aliasedTable, attr)
         whens.append((aliasedColumn != None, aliasedColumn))
     if attr == 'name':
         default = builder.mappedClass.identifier
     else:
         default = None
     query = builder.query
     if self.descending:
         order = case(whens, else_=default).desc().nullslast()
     else:
         order = case(whens, else_=default).asc().nullsfirst()
     query = query.order_by(order)
     builder.query = query
예제 #9
0
파일: query.py 프로젝트: fatrix/zato
def _http_soap(session, cluster_id):
    return session.query(
        HTTPSOAP.id, HTTPSOAP.name, HTTPSOAP.is_active,
        HTTPSOAP.is_internal, HTTPSOAP.transport, HTTPSOAP.host,
        HTTPSOAP.url_path, HTTPSOAP.method, HTTPSOAP.soap_action,
        HTTPSOAP.soap_version, HTTPSOAP.data_format, HTTPSOAP.security_id,
        HTTPSOAP.connection,
        case([(HTTPSOAP.ping_method != None, HTTPSOAP.ping_method)], else_=DEFAULT_HTTP_PING_METHOD).label('ping_method'), # noqa
        case([(HTTPSOAP.pool_size != None, HTTPSOAP.pool_size)], else_=DEFAULT_HTTP_POOL_SIZE).label('pool_size'),
        case([(HTTPSOAP.merge_url_params_req != None, HTTPSOAP.merge_url_params_req)], else_=True).label('merge_url_params_req'),
        case([(HTTPSOAP.url_params_pri != None, HTTPSOAP.url_params_pri)], else_=URL_PARAMS_PRIORITY.DEFAULT).label('url_params_pri'),
        case([(HTTPSOAP.params_pri != None, HTTPSOAP.params_pri)], else_=PARAMS_PRIORITY.DEFAULT).label('params_pri'),
        SecurityBase.sec_type,
        Service.name.label('service_name'),
        Service.id.label('service_id'),
        Service.impl_name.label('service_impl_name'),
        SecurityBase.name.label('security_name'),
        SecurityBase.username.label('username'),
        SecurityBase.password.label('password'),
        SecurityBase.password_type.label('password_type'),).\
        outerjoin(Service, Service.id==HTTPSOAP.service_id).\
        outerjoin(SecurityBase, HTTPSOAP.security_id==SecurityBase.id).\
        filter(Cluster.id==HTTPSOAP.cluster_id).\
        filter(Cluster.id==cluster_id).\
        order_by(HTTPSOAP.name)
def demographic_etl(config):
    # set up
    connection = get_connection(config)
    pedsnet_session = init_pedsnet(connection)
    init_pcornet(connection)

    # multiple aliases for pedsnet_pcornet_valueset_map
    # to allow the three named joins
    gender_value_map = aliased(ValueSetMap)
    ethnicity_value_map = aliased(ValueSetMap)
    race_value_map = aliased(ValueSetMap)

    # extract the data from the person table
    person = pedsnet_session.query(Person.person_id,
                                   Person.birth_date,
                                   Person.birth_time,
                                   coalesce(gender_value_map.target_concept, 'OT'),
                                   coalesce(ethnicity_value_map.target_concept, 'OT'),
                                   coalesce(race_value_map.target_concept, 'OT'),
                                   bindparam("biobank_flag", "N"),
                                   Person.gender_source_value,
                                   Person.ethnicity_source_value,
                                   Person.race_source_value,
                                   Person.site,
                                   bindparam("gender_identity", None),
                                   bindparam("raw_gender_identity", None),
                                   bindparam("sexual_orientation", None),
                                   bindparam("raw_sexual_orientation", None)
                                   ). \
        outerjoin(gender_value_map,
                  and_(gender_value_map.source_concept_class == 'Gender',
                       case([(and_(Person.gender_concept_id == None,
                                   gender_value_map.source_concept_id == None), True)],
                            else_=cast(Person.gender_concept_id, String(200)) ==
                                  gender_value_map.source_concept_id))). \
        outerjoin(ethnicity_value_map,
                  and_(ethnicity_value_map.source_concept_class == 'Hispanic',
                       case([(and_(Person.ethnicity_concept_id == None,
                                   ethnicity_value_map.source_concept_id == None), True)],
                            else_=cast(Person.ethnicity_concept_id, String(200)) ==
                                  ethnicity_value_map.source_concept_id))). \
        outerjoin(race_value_map,
                  and_(race_value_map.source_concept_class == 'Race',
                       case([(and_(Person.race_concept_id == None,
                                   race_value_map.source_concept_id == None), True)],
                            else_=cast(Person.race_concept_id, String(200)) ==
                                  race_value_map.source_concept_id))).all()

    # transform data to pcornet names and types
    # load to demographic table
    odo(person, Demographic.__table__,
        dshape='var * {patid: string, birth_date: date, birth_time: string, sex: string,'
               'hispanic: string, race: string, biobank_flag: string, raw_sex: string,'
               'raw_hispanic: string, raw_race:string, site: string, gender_identity: string,'
               'raw_gender_identity: string, sexual_orientation: string, raw_sexual_orientation: string}'
        )
    # close session

    pedsnet_session.close()
예제 #11
0
  def _generate_stmt(self, id) :
    #print "@@ON ID", id
    ## ugggh for y_position
    ledger_line = self.ledger_line
    n_lines = self.n_lines
    staff_space = self.staff_space
    staff_symbol = self.staff_symbol
    rhythmic_head_width = self.rhythmic_head_width
    y_position = self.y_position
    line_stencil = self.line_stencil

    ledger_line_to_line_stencil = select([
      ledger_line.c.id.label('id'),
      literal('ledger_line_to_line_stencil').label('writer'),
      literal(0).label('sub_id'),
      literal(-0.6).label('x0'),
      (case([(ledger_line.c.val < 0, staff_space.c.val * n_lines.c.val)], else_ = - staff_space.c.val) - y_position.c.val).label('y0'),
      (rhythmic_head_width.c.val + 1.0).label('x1'),
      (case([(ledger_line.c.val < 0, staff_space.c.val * n_lines.c.val)], else_ = - staff_space.c.val) - y_position.c.val).label('y1'),
      literal(0.13).label('thickness')
    ]).\
    where(safe_eq_comp(ledger_line.c.id, id)).\
    where(func.abs(ledger_line.c.val) > 0).\
    where(n_lines.c.id == staff_symbol.c.val).\
    where(staff_space.c.id == staff_symbol.c.val).\
    where(y_position.c.id == ledger_line.c.id).\
    where(staff_symbol.c.id == ledger_line.c.id).\
    where(rhythmic_head_width.c.id == staff_symbol.c.id).\
    cte(name="ledger_line_to_line_stencil", recursive = True)

    #where(safe_eq_comp(ledger_line.c.id, id))

    self.register_stmt(ledger_line_to_line_stencil)

    ledger_line_to_line_stencil_prev = ledger_line_to_line_stencil.alias(name="ledger_line_to_line_stencil_prev")

    ledger_line_to_line_stencil = ledger_line_to_line_stencil.union_all(
     select([
       ledger_line_to_line_stencil_prev.c.id,
       literal('ledger_line_to_line_stencil'),
       ledger_line_to_line_stencil_prev.c.sub_id + 1,
       ledger_line_to_line_stencil_prev.c.x0,
       ledger_line_to_line_stencil_prev.c.y0 + (staff_space.c.val * - 1.0 * ledger_line.c.val / func.abs(ledger_line.c.val)),
       ledger_line_to_line_stencil_prev.c.x1,
       ledger_line_to_line_stencil_prev.c.y1 + (staff_space.c.val * -1.0 * ledger_line.c.val / func.abs(ledger_line.c.val)),
       ledger_line_to_line_stencil_prev.c.thickness
     ]).\
     where(staff_space.c.id == staff_symbol.c.val).\
     where(staff_symbol.c.id == ledger_line_to_line_stencil_prev.c.id).\
     where(ledger_line_to_line_stencil_prev.c.id == ledger_line.c.id).\
     where(ledger_line_to_line_stencil_prev.c.sub_id < func.abs(ledger_line.c.val) - 1)
    )

    self.register_stmt(ledger_line_to_line_stencil)

    self.insert = simple_insert(line_stencil, ledger_line_to_line_stencil)
예제 #12
0
  def pushDB(self, db, domain_id, rr, value):
    rr = filter_rrtype[rr]

    for ttl, v in self.ttls.items():
      oid = None

      try:
        q = db.query(Entry)
        q = q.filter(and_(Entry.domain_id == domain_id, Entry.type == rr, Entry.ttl == ttl, Entry.value == value))
        q.update({Entry.first_seen: case([(Entry.first_seen > v[1], v[1])], else_=Entry.first_seen),
                  Entry.last_seen: case([(Entry.last_seen < v[2], v[2])], else_=Entry.last_seen),
                  Entry.count: Entry.count + v[0]}, synchronize_session=False)

        q = db.query(Entry.entry_id)
        o = q.filter(and_(Entry.domain_id == domain_id, Entry.type == rr, Entry.ttl == ttl, Entry.value == value)).one()

        oid = o[0]
      except NoResultFound:
        entry = Entry()
        entry.domain_id = domain_id
        entry.type = rr
        entry.ttl = ttl
        entry.value = value
        entry.first_seen = v[1]
        entry.last_seen = v[2]
        entry.count = v[0]
        db.add(entry)
        db.flush()

        oid = entry.entry_id

      for s, sv in v[3].items():
        try:
          q = db.query(exists().where((and_(DNS_Server.entry_id == oid, DNS_Server.ip == s)))).scalar()

          if not q:
            raise NoResultFound('')

          q = db.query(DNS_Server)
          q = q.filter(and_(DNS_Server.entry_id == oid, DNS_Server.ip == s))
          q.update({DNS_Server.first_seen: case([(DNS_Server.first_seen > sv[1], sv[1])], else_=DNS_Server.first_seen),
                    DNS_Server.last_seen: case([(DNS_Server.last_seen < sv[2], sv[2])], else_=DNS_Server.last_seen),
                    DNS_Server.count: DNS_Server.count + sv[0]}, synchronize_session=False)

        except NoResultFound:
          dns_server = DNS_Server()
          dns_server.entry_id = oid
          dns_server.ip = s
          dns_server.first_seen = sv[1]
          dns_server.last_seen = sv[2]
          dns_server.count = sv[0]
          db.add(dns_server)
          db.flush()

    db.flush()
  def _generate_stmt(self, id) : 
    natural_stem_direction = self.natural_stem_direction
    beam = self.beam
    stem_direction = self.stem_direction
    beam_specialize = self.beam_specialize

    my_beam = select([
      beam.c.val.label('beam')
    ]).where(safe_eq_comp(beam.c.id, id)).cte(name = 'my_beam')
    
    self.register_stmt(my_beam)

    others_beamed_with_me = select([
      beam.c.id.label('id'),
      beam.c.val.label('val')
    ]).where(beam.c.val == my_beam.c.beam).\
        cte(name = 'others_beamed_with_me')

    self.register_stmt(others_beamed_with_me)

    prevailing_direction = select([
      func.sum(natural_stem_direction.c.val).label('val'),
    ]).where(natural_stem_direction.c.id == others_beamed_with_me.c.id).\
    cte(name="prevailing_direction")

    self.register_stmt(prevailing_direction)

    stem_direction_for_beams = select([
      others_beamed_with_me.c.id.label('id'),
      prevailing_direction.c.val.label('val'),
    ]).\
    cte(name="stem_direction_for_beams")

    self.register_stmt(stem_direction_for_beams)

    natural_stem_direction_to_stem_direction = select([
      natural_stem_direction.c.id.label('id'),
      case([(stem_direction_for_beams.c.val != None,
          case([(stem_direction_for_beams.c.val > 0, 1)], else_ = -1))],
        else_ = natural_stem_direction.c.val).label('val')
    ]).select_from(natural_stem_direction.\
        outerjoin(stem_direction_for_beams,
                   onclause=natural_stem_direction.c.id == stem_direction_for_beams.c.id)).\
      where(safe_eq_comp(natural_stem_direction.c.id, id))
    
    natural_stem_direction_to_stem_direction = natural_stem_direction_to_stem_direction.union(
    select([
      stem_direction_for_beams.c.id.label('id'),
      case([(stem_direction_for_beams.c.val > 0, 1)], else_ = -1).label('val')
    ])).\
    cte(name='natural_stem_direction_to_stem_direction')

    self.register_stmt(natural_stem_direction_to_stem_direction)

    self.insert = simple_insert(stem_direction, natural_stem_direction_to_stem_direction)
    def birth_date(cls):
        year = cast(cls.year_of_birth, String)
        month = cast(cls.month_of_birth, String)
        day = cast(cls.day_of_birth, String)

        month = case([(month == "", "01")],
                     else_=case([(func.length(month) == 1, "0" + month)], else_=month))
        day = case([(day == "", "01")],
                   else_=case([(func.length(day) == 1, "0" + day)], else_=day))

        return year + "-" + month + "-" + day
예제 #15
0
def view_index(session, request, lang=None):
    """ Does the main index page for DDTSS, with list of languages and stats """
    user = get_user(request, session)
    if lang is None:
        lang = user.lastlanguage_ref
    if lang is None:
        lang = 'xx'
    if lang != 'xx':
        return redirect('ddtss_index_lang', lang)

    pending_translations = session.query(Languages, \
                                         func.sum(expression.case([(PendingTranslation.state == PendingTranslation.STATE_PENDING_TRANSLATION, 1)], else_=0)), \
                                         func.sum(expression.case([(PendingTranslation.state == PendingTranslation.STATE_PENDING_REVIEW, 1)], else_=0))) \
                                  .outerjoin(PendingTranslation) \
                                  .group_by(Languages) \
                                  .all()

    translated = session.query(Languages.language, func.count(Translation.description_id)) \
                        .join(Translation, Translation.language == Languages.language) \
                        .group_by(Languages.language) \
                        .all()

    # Convert (lang,count) pairs to dict
    translated = dict(translated)

    # Combine into one resultset
    languages = []
    total_pending_translation = 0
    total_pending_review = 0
    total_translated = 0
    for row in pending_translations:
        languages.append(dict(language=row[0].language,
                            fullname=row[0].fullname,
                            enabled=row[0].enabled_ddtss,
                            pending_translation=row[1],
                            pending_review=row[2],
                            translated=translated.get(row[0].language, 0)))
        total_pending_translation += row[1]
        total_pending_review += row[2]
        total_translated += translated.get(row[0].language, 0)

    # Sort by translated descending
    #languages.sort(key=lambda x:x['translated'], reverse=True)

    return render_to_response("ddtss/index.html",
                              {'languages': languages,
                               'user': user,
                               'total_pending_translation': total_pending_translation,
                               'total_pending_review': total_pending_review,
                               'total_translated': total_translated},
                              context_instance=RequestContext(request))
예제 #16
0
파일: data.py 프로젝트: UMWRG/HydraPlatform
def get_datasets(dataset_ids,**kwargs):
    """
        Get a single dataset, by ID
    """

    user_id = int(kwargs.get('user_id'))
    datasets = []
    if len(dataset_ids) == 0:
        return []
    try:
        dataset_rs = DBSession.query(Dataset.dataset_id,
                Dataset.data_type,
                Dataset.data_units,
                Dataset.data_dimen,
                Dataset.data_name,
                Dataset.hidden,
                Dataset.cr_date,
                Dataset.created_by,
                DatasetOwner.user_id,
                null().label('metadata'),
                case([(and_(Dataset.hidden=='Y', DatasetOwner.user_id is not None), None)],
                        else_=Dataset.start_time).label('start_time'),
                case([(and_(Dataset.hidden=='Y', DatasetOwner.user_id is not None), None)],
                        else_=Dataset.frequency).label('frequency'),
                case([(and_(Dataset.hidden=='Y', DatasetOwner.user_id is not None), None)],
                        else_=Dataset.value).label('value')).filter(
                Dataset.dataset_id.in_(dataset_ids)).outerjoin(DatasetOwner,
                                    and_(DatasetOwner.dataset_id==Dataset.dataset_id,
                                    DatasetOwner.user_id==user_id)).all()

        #convert the value row into a string as it is returned as a binary
        for dataset_row in dataset_rs:
            dataset_dict = dataset_row._asdict()

            if dataset_row.value is not None:
                dataset_dict['value'] = str(dataset_row.value)

            if dataset_row.hidden == 'N' or (dataset_row.hidden == 'Y' and dataset_row.user_id is not None):
                metadata = DBSession.query(Metadata).filter(Metadata.dataset_id == dataset_row.dataset_id).all()
                dataset_dict['metadata'] = metadata
            else:
                dataset_dict['metadata'] = []

            datasets.append(namedtuple('Dataset', dataset_dict.keys())(**dataset_dict))

            
    except NoResultFound:
        raise ResourceNotFoundError("Datasets not found.")

    return datasets
예제 #17
0
def daily_data_expr(flag_expr, value_expr, dry_elev):
    if dry_elev is None:
        dry_elev = -1000
    flag_expr = expression.case([
        (flag_expr == _M, _M),
        (flag_expr == None, _O),
    ],
                                else_=_E)
    flag_expr = expression.case([
        (func.avg(value_expr) == None, _M),
        (func.avg(value_expr) < dry_elev, _D),
    ],
                                else_=func.max(flag_expr))
    return (flag_expr, func.avg(value_expr))
예제 #18
0
파일: data.py 프로젝트: UMWRG/HydraPlatform
def get_dataset(dataset_id,**kwargs):
    """
        Get a single dataset, by ID
    """

    user_id = int(kwargs.get('user_id'))

    if dataset_id is None:
        return None
    try:
        dataset_rs = DBSession.query(Dataset.dataset_id,
                Dataset.data_type,
                Dataset.data_units,
                Dataset.data_dimen,
                Dataset.data_name,
                Dataset.hidden,
                Dataset.cr_date,
                Dataset.created_by,
                DatasetOwner.user_id,
                null().label('metadata'),
                case([(and_(Dataset.hidden=='Y', DatasetOwner.user_id is not None), None)],
                        else_=Dataset.start_time).label('start_time'),
                case([(and_(Dataset.hidden=='Y', DatasetOwner.user_id is not None), None)],
                        else_=Dataset.frequency).label('frequency'),
                case([(and_(Dataset.hidden=='Y', DatasetOwner.user_id is not None), None)],
                        else_=Dataset.value).label('value')).filter(
                Dataset.dataset_id==dataset_id).outerjoin(DatasetOwner,
                                    and_(DatasetOwner.dataset_id==Dataset.dataset_id,
                                    DatasetOwner.user_id==user_id)).one()

        rs_dict = dataset_rs._asdict()

        #convert the value row into a string as it is returned as a binary
        if dataset_rs.value is not None:
            rs_dict['value'] = str(dataset_rs.value)

        if dataset_rs.hidden == 'N' or (dataset_rs.hidden == 'Y' and dataset_rs.user_id is not None):
            metadata = DBSession.query(Metadata).filter(Metadata.dataset_id==dataset_id).all()
            rs_dict['metadata'] = metadata
        else:
            rs_dict['metadata'] = []

    except NoResultFound:
        raise HydraError("Dataset %s does not exist."%(dataset_id))


    dataset = namedtuple('Dataset', rs_dict.keys())(**rs_dict)
    
    return dataset
예제 #19
0
def leaderboard_query(session, start_date, until_date):
    """
    This is, admittedly, a really ugly sql query. Query optimization has not
    been performed, but it shouldn't be anything more complicated than a few
    indices. Good luck.
    """
    #start_date = datetime.strptime(start_date, '%Y-%m-%d')
    #until_date = datetime.strptime(until_date_str, '%Y-%m-%d')
    subq = session\
        .query(
            Instance,
            InstanceType,
            User,
            case([(Instance.end_date != None, Instance.end_date)], else_=now()).label('stop_date'))\
        .join(Instance.user)\
        .join(Instance.type)\
        .subquery()

    uptime_column = case(
        [
            (subq.c.created_date > until_date, 0),
            (subq.c.stop_date < start_date, 0)
        ],
        else_=extract('epoch',
            func.LEAST(subq.c.stop_date, cast(until_date, DateTime)) -
            func.GREATEST(subq.c.created_date, cast(start_date, DateTime))
        )
    )

    print subq.c
    subq2 = session.query(
        subq.c.user_id,
        sum(case([(uptime_column == 0, 0)], else_=1)).label('instance_count'),
        #func.count(subq.c.instance_id).label('instance_count'),
        sum(uptime_column).label('uptime'),
        sum(uptime_column * subq.c.cpu).label('cpu_seconds')
    ).group_by(subq.c.user_id).order_by(desc('cpu_seconds')).subquery()

    q = session.query(
        subq2.c.user_id,
        subq2.c.uptime,
        subq2.c.cpu_seconds,
        subq2.c.instance_count,
        User.username,
        User.is_staff,
        User.name
    ).join(User)

    return q
예제 #20
0
def daily_data_expr(flag_expr, value_expr, dry_elev):
    if dry_elev is None:
        dry_elev = -1000
    flag_expr = expression.case([
                                (flag_expr == _M, _M),
                                (flag_expr == None, _O),
                                ],
                                else_=_E)
    flag_expr = expression.case([
                            (func.avg(value_expr) == None, _M),
                            (func.avg(value_expr) < dry_elev, _D),
                            ],
                           else_=func.max(flag_expr)
                           )
    return (flag_expr, func.avg(value_expr))
예제 #21
0
파일: models.py 프로젝트: intgr/lemur
 def expired(cls):
     return case(
         [
             (cls.not_after <= arrow.utcnow(), True)
         ],
         else_=False
     )
예제 #22
0
 def deprecated(cls):
     return case(
         [
             (cls.name in BAD_CIPHERS, True)
         ],
         else_=False
     )
예제 #23
0
파일: models.py 프로젝트: ejcx/lemur
 def expired(cls):
     return case(
         [
             (cls.now_after <= datetime.datetime.now(), True)
         ],
         else_=False
     )
def pivot(query, column_expressions, column_labels):
    query = query.group_by(query.statement)
    columns = []
    for i in range(0, len(column_expressions)):
        column_expressions[i] = func.count(case([(column_expressions[i], 1)]))
        columns.append(column_expressions[i].label(column_labels[i]))
    return query.add_columns(*columns)
예제 #25
0
파일: models.py 프로젝트: ejcx/lemur
 def revoked(cls):
     return case(
         [
             (cls.status == 'revoked', True)
         ],
         else_=False
     )
예제 #26
0
def _story_build_summary_query():
    # first create a subquery for task statuses
    select_items = []
    select_items.append(Story)
    select_items.append(
        expr.case(
            [(func.sum(Task.status.in_(
                ['todo', 'inprogress', 'review'])) > 0,
              'active'),
             ((func.sum(Task.status == 'merged')) > 0, 'merged')],
            else_='invalid'
        ).label('status')
    )
    for task_status in Task.TASK_STATUSES:
        select_items.append(expr.cast(
            func.sum(Task.status == task_status), Integer
        ).label(task_status))
    select_items.append(expr.null().label('task_statuses'))

    result = select(select_items, None,
                    expr.Join(Story, Task, onclause=Story.id == Task.story_id,
                              isouter=True)) \
        .group_by(Story.id) \
        .alias('story_summary')

    return result
예제 #27
0
  def _generate_stmt(self, id) :
    font_name = self.font_name
    font_size = self.font_size
    accidental = self.accidental
    glyph_box = self.glyph_box
    accidental_width = self.accidental_width

    accidental_to_accidental_widths = select([
      accidental.c.id.label('id'),
      (glyph_box.c.width * font_size.c.val / 20.0).label('val')
    ]).select_from(accidental.join(font_name, onclause = accidental.c.id == font_name.c.id).\
                   join(font_size, onclause = accidental.c.id == font_size.c.id).\
                   join(glyph_box, onclause = font_name.c.val == glyph_box.c.name)).where(
             and_(glyph_box.c.unicode == case([(accidental.c.val == -1, "U+E260"),
                                           (accidental.c.val == 0, "U+E261"),
                                           (accidental.c.val == 1, "U+E262")],
                                           ))).\
    where(safe_eq_comp(accidental.c.id, id)).\
    cte(name='accidental_to_accidental_widths')

    self.register_stmt(accidental_to_accidental_widths)

    #uggghhhh....
    #real_accidental_to_accidental_widths = realize(accidental_to_accidental_widths, accidental_width, 'val')
    
    #self.register_stmt(real_accidental_to_accidental_widths)
    #self.insert = simple_insert(accidental_width, real_accidental_to_accidental_widths)
    self.insert = simple_insert(accidental_width, accidental_to_accidental_widths)
예제 #28
0
  def _generate_stmt(self, id) :  
    name = self.name
    font_name = self.font_name
    font_size = self.font_size
    time_signature = self.time_signature
    glyph_box = self.glyph_box
    time_signature_inter_number_padding = self.time_signature_inter_number_padding
    height = self.height

    glyph_box_a_1 = glyph_box.alias(name='glyph_box_a_1')
    glyph_box_a_2 = glyph_box.alias(name='glyph_box_a_2')
    time_signature_inter_number_padding_default = time_signature_inter_number_padding.alias(name="time_signature_inter_number_padding_default")

    time_signatures_to_heights = select([
      name.c.id.label('id'),
      (glyph_box_a_1.c.height * font_size.c.val / 20.0) +
                 case([(time_signature_inter_number_padding.c.val != None, time_signature_inter_number_padding.c.val)], else_ = time_signature_inter_number_padding_default.c.val) + 
                 ((glyph_box_a_2.c.height * font_size.c.val / 20.0)).label('val')
    ]).select_from(name.outerjoin(time_signature_inter_number_padding, onclause = name.c.id == time_signature_inter_number_padding.c.id)).\
        where(and_(name.c.val == 'time_signature',
                  name.c.id == font_name.c.id,
                  name.c.id == font_size.c.id,
                  name.c.id == time_signature.c.id,
                  time_signature_inter_number_padding_default.c.id == -1,
                  font_name.c.val == glyph_box_a_1.c.name,
                  font_name.c.val == glyph_box_a_2.c.name,
                  conversion_tools.int_to_unicode(time_signature.c.num) == glyph_box_a_1.c.unicode,
                  conversion_tools.int_to_unicode(time_signature.c.den) == glyph_box_a_2.c.unicode)).\
        where(safe_eq_comp(name.c.id, id)).\
    cte(name='time_signatures_to_heights')

    self.register_stmt(time_signatures_to_heights)

    self.insert = simple_insert(height, time_signatures_to_heights)
예제 #29
0
  def _generate_stmt(self, id) :
    font_name = self.font_name
    font_size = self.font_size
    duration_log = self.duration_log
    glyph_box = self.glyph_box
    name = self.name
    rhythmic_event_dimension = self.rhythmic_event_dimension
    dimension = self.dimension

    duration_log_to_rhythmic_event_dimensions = select([
      duration_log.c.id.label('id'),
      (glyph_box.c[dimension] * font_size.c.val / 20.0).label('val')
    ]).select_from(duration_log.join(font_name, onclause = duration_log.c.id == font_name.c.id).\
                   join(name, onclause = duration_log.c.id == name.c.id).\
                   join(font_size, onclause = duration_log.c.id == font_size.c.id).\
                   join(glyph_box, onclause = font_name.c.val == glyph_box.c.name)).\
          where(safe_eq_comp(duration_log.c.id, id)).\
          where(
             and_(glyph_box.c.unicode == case([(and_(duration_log.c.val == -1, name.c.val == 'note'), "U+E0A3"),
                                            (and_(duration_log.c.val == 0, name.c.val == 'note'), "U+E0A2"),
                                            (and_(duration_log.c.val == 0, name.c.val == 'rest'), "U+E4E3"),
                                            (and_(duration_log.c.val == -1, name.c.val == 'rest'), "U+E4E4"),
                                            (and_(duration_log.c.val == -2, name.c.val == 'rest'), "U+E4E5"),
                                            (and_(duration_log.c.val == -3, name.c.val == 'rest'), "U+E4E6"),
                                            (and_(duration_log.c.val == -4, name.c.val == 'rest'), "U+E4E7"),
                                            (and_(duration_log.c.val == -5, name.c.val == 'rest'), "U+E4E8"),
                                            (and_(duration_log.c.val == -6, name.c.val == 'rest'), "U+E4E9"),
                                            (and_(duration_log.c.val == -7, name.c.val == 'rest'), "U+E4EA"),
                                            (name.c.val == 'note', "U+E0A4")],
                                           else_ = 0))).\
    cte(name='duration_log_to_rhythmic_event_dimensions')

    self.register_stmt(duration_log_to_rhythmic_event_dimensions)

    self.insert = simple_insert(rhythmic_event_dimension, duration_log_to_rhythmic_event_dimensions)
예제 #30
0
  def _generate_stmt(self, id) :
    dynamic = self.dynamic
    unicode = self.unicode
    
    dynamic_to_unicode = select([
      dynamic.c.id.label('id'),
      case([(dynamic.c.val == 'pppppp', "U+E527"),
        (dynamic.c.val == 'ppppp', "U+E528"),
        (dynamic.c.val == 'pppp', "U+E529"),
        (dynamic.c.val == 'ppp', "U+E52A"),
        (dynamic.c.val == 'pp', "U+E52B"),
        (dynamic.c.val == 'p', "U+E520"),
        (dynamic.c.val == 'mp', "U+E52C"),
        (dynamic.c.val == 'mf', "U+E52D"),
        (dynamic.c.val == 'p', "U+E522"),
        (dynamic.c.val == 'pf', "U+E52E"),
        (dynamic.c.val == 'f', "U+E522"),
        (dynamic.c.val == 'ff', "U+E52F"),
        (dynamic.c.val == 'fff', "U+E530"),
        (dynamic.c.val == 'ffff', "U+E531"),
        (dynamic.c.val == 'fffff', "U+E532"),
        (dynamic.c.val == 'ffffff', "U+E533"),
        (dynamic.c.val == 'fp', "U+E534"),
        (dynamic.c.val == 'fz', "U+E535"),
        (dynamic.c.val == 'sf', "U+E536"),
        (dynamic.c.val == 'sfp', "U+E537"),
        (dynamic.c.val == 'sfpp', "U+E538"),
        (dynamic.c.val == 'sfz', "U+E539"),
      ])
    ]).where(safe_eq_comp(dynamic.c.id, id)).\
    cte(name='dynamic_to_unicode')

    self.register_stmt(dynamic_to_unicode)

    self.insert = simple_insert(unicode, dynamic_to_unicode)
예제 #31
0
async def create_conversion_batch(entity_name, entity_id, format, user_id):
    entity_name = entity_name.upper()
    if entity_name == 'AUTHOR':
        author = model.Author.__table__
        q = select([case([(author.c.first_name == None, author.c.last_name)],
                   else_ = author.c.first_name + ' ' + author.c.last_name)])\
            .where(author.c.id == entity_id)
    elif entity_name == 'SERIES':
        series = model.Series.__table__
        q = select([series.c.title]).where(series.c.id == entity_id)
    elif entity_name == 'BOOKSHELF':
        shelf = model.Bookshelf.__table__
        q = select([shelf.c.name]).where(shelf.c.id == entity_id)
    else:
        raise ValueError('Invalid entity name')
    
    format_id = await get_format_id(format)
    
    async with engine.acquire() as conn:
        batch = model.ConversionBatch.__table__
        res = await conn.execute(q)
        name = await res.scalar()
        name = "Books for %s %s" % (entity_name.lower(), name)
        res = await conn.execute(batch.insert()\
                                 .values(name=name, for_entity=entity_name,
                                    entity_id=entity_id, format_id=format_id,
                                    created_by_id = user_id,
                                    modified_by_id = user_id, version_id =1 )\
                                 .returning(batch.c.id))
        
        return await res.scalar()
예제 #32
0
def _add_ordering(sql_query, table, column_type, column_name, order):
    # Special case for this column, which sorts contigs correctly:
    if column_name == 'contig':
        get_contig_num = cast(
            text("SUBSTRING({} FROM '\d+')".format(table.c.contig)),
            type_=Integer)
        starts_with_chr = (text("SUBSTRING({} FROM '^chr(\d+)')"
                                .format(table.c.contig)) != literal(''))
        starts_with_number = (text("SUBSTRING({} FROM '^\d+')"
                                   .format(table.c.contig)) != literal(''))
        # 10000 used here to mean "should be at the end of all the numbers",
        # assuming we never hit a chromosome number >= 10000.
        contig_num_col = case(
            [(starts_with_chr, get_contig_num),
             (starts_with_number, get_contig_num)],
            else_=literal(10000)
        )
        contig_len_col = func.length(table.c.contig)
        contig_col = table.c.contig
        if order == 'desc':
            contig_len_col = desc(contig_len_col)
            contig_col = desc(contig_col)
        return sql_query.order_by(contig_num_col, contig_len_col, contig_col)
    sqla_type = vcf_type_to_sqla_type(column_type)
    column = cast(table.c[column_name], type_=sqla_type)
    column = {'asc': asc(column), 'desc': desc(column)}.get(order)
    return sql_query.order_by(column)
예제 #33
0
def _story_build_summary_query():
    # first create a subquery for task statuses
    select_items = []
    select_items.append(Story)
    select_items.append(
        expr.case(
            [(func.sum(Task.status.in_(
                ['todo', 'inprogress', 'review'])) > 0,
              'active'),
             ((func.sum(Task.status == 'merged')) > 0, 'merged')],
            else_='invalid'
        ).label('status')
    )
    for task_status in Task.TASK_STATUSES:
        select_items.append(expr.cast(
            func.sum(Task.status == task_status), Integer
        ).label(task_status))
    select_items.append(expr.null().label('task_statuses'))

    result = select(select_items, None,
                    expr.Join(Story, Task, onclause=Story.id == Task.story_id,
                              isouter=True)) \
        .group_by(Story.id) \
        .alias('story_summary')

    return result
예제 #34
0
    def get_proxy_address(
        self,
        user_id,
        ip_address=None,
        best=4,
        conn_factor=0.2
    ):
        """Get a usable proxy address for audio resource of user by user_id.
        If there is no available server, None will be returned

        We sort the connection by

            user_rate - (have_conn*conn_factor) then
            res_rate - (have_conn*conn_factor)

        Which means, less user proxy will be selected first, also, if there
        is already a proxy connection there, they will have higher priority
        (introduced by the conn_factor).

        """
        from sqlalchemy.sql.expression import or_, and_, cast, case
        from sqlalchemy.types import Float

        Port = tables.Port
        Proxy = tables.Proxy
        ProxyConnection = tables.ProxyConnection

        # calculate the connection factor
        factor_case = case([
            (ProxyConnection.server_id, conn_factor)
        ], else_=0)

        # Cast the type to make sure the result will be float
        res_rate = (Proxy.resource_count / cast(Proxy.resource_limit, Float))
        res_rate -= factor_case

        user_rate = (Proxy.user_count / cast(Proxy.user_limit, Float))
        user_rate -= factor_case

        query = self.session \
            .query(Port) \
            .join((Proxy, Proxy.id == Port.server_id)) \
            .outerjoin((ProxyConnection,
                        and_(ProxyConnection.server_id == Proxy.id,
                             ProxyConnection.user_id == user_id))) \
            .order_by(user_rate) \
            .order_by(res_rate) \
            .filter(or_(Proxy.user_count < Proxy.user_limit,
                        Proxy.user_limit == 0)) \
            .filter(Proxy.alive) \
            .filter(Proxy.active) \
            .filter(Port.name == 'web')

        # find a random proxy
        ports = query.limit(best).all()
        if not ports:
            return None
        port = random.choice(ports)
        return port.address
예제 #35
0
 def find_user_authorization_by_org_id_and_corp_type(cls, org_id: int, corp_type: str):
     """Return authorization view object."""
     return db.session.query(Authorization).filter(
         and_(Authorization.org_id == org_id,
              or_(Authorization.corp_type_code == corp_type, Authorization.corp_type_code.is_(None)))).order_by(
                  expression.case(((Authorization.org_membership == ADMIN, 1),
                                   (Authorization.org_membership == COORDINATOR, 2),
                                   (Authorization.org_membership == USER, 3)))).first()
예제 #36
0
def hourly_data_expr(flag_expr, value_expr, dry_elev):
    if dry_elev is None:
        dry_elev = -1000
    flag_expr = expression.case([(flag_expr == _M, _M),
                                 (value_expr == None, _M),
                                 (value_expr < dry_elev, _D),
                                 (flag_expr == None, _O)],
                                else_=_E)
    return (flag_expr, value_expr)
예제 #37
0
        def column_expression(self, col):
            if geometry_support == 'sde-char':
                else_expression = ToChar(STAsText(col))
            else:
                else_expression = STAsText(col)

            case_expression = expression.case([(STIsEmpty(col) == 1, None),
                                               (STIsEmpty(col) == 0,
                                                else_expression)])

            return case_expression
예제 #38
0
def upgrade():
    # ### commands auto generated by Alembic - please adjust! ###
    op.add_column('accounts', sa.Column('reserve_type', sa.SmallInteger(), nullable=True, default=0))
    op.create_index('ix_accounts_acquisition', 'accounts', ['reserve_type', 'instance', 'hibernated', 'created'], unique=False)

    query = Account.__table__.update().\
            values({'reserve_type':expression.case([
                (Account.level < 30,RESERVE_TYPE_SLAVE),
                ],
                else_=RESERVE_TYPE_CAPTAIN)})
    op.execute(query)
예제 #39
0
    def find_user_authorization_by_business_number_and_corp_type(cls, business_identifier: str, corp_type: str):
        """Return authorization view object using corp type and business identifier.

        Mainly used for service accounts.Sorted using the membership since service accounts gets all access

        """
        return cls.query.filter_by(corp_type_code=corp_type, business_identifier=business_identifier) \
            .order_by(expression.case(((Authorization.org_membership == OWNER, 1),
                                       (Authorization.org_membership == ADMIN, 2),
                                       (Authorization.org_membership == MEMBER, 3))))\
            .first()
예제 #40
0
    def find_user_authorization_by_business_number_and_product(cls, business_identifier: str, product_code: str):
        """Return authorization view object using corp type and business identifier.

        Mainly used for service accounts.Sorted using the membership since service accounts gets all access

        """
        return cls.query.filter_by(product_code=product_code, business_identifier=business_identifier) \
            .order_by(expression.case(((Authorization.org_membership == ADMIN, 1),
                                       (Authorization.org_membership == COORDINATOR, 2),
                                       (Authorization.org_membership == USER, 3)))) \
            .first()
예제 #41
0
    def normalize_amount_damage(cls) -> sase.Update:
        """
        Apply feature normalization on the amount_damage in the table.

        :return: A sql statement to update the amount_damage_norm column with normalized amount_damage per country/car
        """
        # First compute the minimum and maximum amount damage per country
        min_max = sase.select([
            cls.country,
            sase.case([(sase.func.max(sase.cast(cls.amount_damage, Numeric(14, 2))) == 0, 1)],
                      else_=sase.func.max(sase.cast(cls.amount_damage, Numeric(14, 2)))).label("max_dmg"),
            sase.func.min(sase.cast(cls.amount_damage, Numeric(14, 2))).label("min_dmg")
        ]).group_by(cls.country).alias("min_max")

        # Second, use the min and max damage to normalize the damage per car per country and store in separate column
        norm = sase.update(cls). \
            where(min_max.c.country == cls.country). \
            values(amount_damage_norm=(
                (sase.cast(cls.amount_damage, Numeric(14, 2)) - min_max.c.min_dmg) /
                (sase.case([(min_max.c.max_dmg == 0, 1)], else_=min_max.c.max_dmg) - min_max.c.min_dmg)
            ))

        return norm
def upgrade():
    # re-size existing data if necessary
    identifier_map = table('cisco_csr_identifier_map',
                           column('ipsec_site_conn_id', sa.String(36)))
    ipsec_site_conn_id = identifier_map.columns['ipsec_site_conn_id']

    op.execute(identifier_map.update(values={
        ipsec_site_conn_id: expr.case([(func.length(ipsec_site_conn_id) > 36,
                                      func.substr(ipsec_site_conn_id, 1, 36))],
                                      else_=ipsec_site_conn_id)}))

    # Need to drop foreign key constraint before mysql will allow changes

    with migration.remove_fks_from_table('cisco_csr_identifier_map'):
        op.alter_column(table_name='cisco_csr_identifier_map',
                        column_name='ipsec_site_conn_id',
                        type_=sa.String(36),
                        existing_nullable=False)
예제 #43
0
def upgrade():
    # re-size existing data if necessary
    identifier_map = table('cisco_csr_identifier_map',
                           column('ipsec_site_conn_id', sa.String(36)))
    ipsec_site_conn_id = identifier_map.columns['ipsec_site_conn_id']
    op.execute(
        identifier_map.update(
            values={
                ipsec_site_conn_id:
                expr.case([(func.length(ipsec_site_conn_id) > 36,
                            func.substr(ipsec_site_conn_id, 1, 36))],
                          else_=ipsec_site_conn_id)
            }))

    op.alter_column(table_name='cisco_csr_identifier_map',
                    column_name='ipsec_site_conn_id',
                    type_=sa.String(36),
                    existing_nullable=True)
예제 #44
0
def getAnswerSummary(lectureId, student):
    """Fetch answerSummary row for student"""
    try:
        dbAnsSummary = (Session.query(
            db.AnswerSummary).with_lockmode('update').filter(
                db.AnswerSummary.lectureId == lectureId).filter(
                    db.AnswerSummary.studentId == student.studentId).one())
    except NoResultFound:
        dbAnsSummary = db.AnswerSummary(
            lectureId=lectureId,
            studentId=student.studentId,
            grade=0,
        )
        Session.add(dbAnsSummary)

    # Update based on answer table
    (
        dbAnsSummary.lecAnswered,
        dbAnsSummary.lecCorrect,
        dbAnsSummary.practiceAnswered,
        dbAnsSummary.practiceCorrect,
        maxTimeEnd,
    ) = Session.query(
        func.count(),
        func.ifnull(func.sum(db.Answer.correct), 0),
        func.ifnull(func.sum(db.Answer.practice), 0),
        func.ifnull(
            func.sum(
                expression.case([(db.Answer.practice & db.Answer.correct, 1)],
                                else_=0)), 0),
        func.max(db.Answer.timeEnd),
    ).filter(db.Answer.lectureId == lectureId).filter(
        db.Answer.studentId == student.studentId).one()

    dbAnsSummary.lecAnswered = int(dbAnsSummary.lecAnswered)
    dbAnsSummary.lecCorrect = int(dbAnsSummary.lecCorrect)
    dbAnsSummary.practiceAnswered = int(dbAnsSummary.practiceAnswered)
    dbAnsSummary.practiceCorrect = int(dbAnsSummary.practiceCorrect)
    if not maxTimeEnd:
        maxTimeEnd = datetime.datetime.utcfromtimestamp(0)

    return (dbAnsSummary, maxTimeEnd)
예제 #45
0
def number_of_solved_instances_ranking(db,
                                       experiment,
                                       instances,
                                       solver_configs,
                                       cost='resultTime',
                                       fixed_limit=None):
    """ Ranking by the number of instances correctly solved.
        This is determined by an resultCode that starts with '1' and a 'finished' status
        of a job.
    """
    instance_ids = [i.idInstance for i in instances]
    solver_config_ids = [i.idSolverConfig for i in solver_configs]

    if not solver_config_ids: return []

    table = db.metadata.tables['ExperimentResults']
    table_has_prop = db.metadata.tables['ExperimentResult_has_Property']
    table_has_prop_value = db.metadata.tables[
        'ExperimentResult_has_PropertyValue']
    c_solver_config_id = table.c['SolverConfig_idSolverConfig']
    c_result_time = table.c['resultTime']
    c_experiment_id = table.c['Experiment_idExperiment']
    c_result_code = table.c['resultCode']
    c_status = table.c['status']
    c_instance_id = table.c['Instances_idInstance']
    c_solver_config_id = table.c['SolverConfig_idSolverConfig']
    if cost == 'resultTime':
        cost_column = table.c['resultTime']
        cost_limit_column = table.c['CPUTimeLimit']

        if fixed_limit:
            cost_column = expression.case(
                [(table.c['resultTime'] > fixed_limit, fixed_limit)],
                else_=table.c['resultTime'])
            cost_limit_column = literal(fixed_limit)
            c_result_code = expression.case(
                [(table.c['resultTime'] > fixed_limit, literal(-21))],
                else_=table.c['resultCode'])
            c_status = expression.case(
                [(table.c['resultTime'] > fixed_limit, literal(21))],
                else_=table.c['status'])
    elif cost == 'wallTime':
        cost_column = table.c['wallTime']
        cost_limit_column = table.c['wallClockTimeLimit']

        if fixed_limit:
            cost_column = expression.case(
                [(table.c['wallTime'] > fixed_limit, fixed_limit)],
                else_=table.c['wallTime'])
            cost_limit_column = literal(fixed_limit)
            c_result_code = expression.case(
                [(table.c['wallTime'] > fixed_limit, literal(-22))],
                else_=table.c['resultCode'])
            c_status = expression.case(
                [(table.c['wallTime'] > fixed_limit, literal(22))],
                else_=table.c['status'])
    elif cost == 'cost':
        cost_column = table.c['cost']
        inf = float('inf')
        cost_limit_column = table.c['CPUTimeLimit']
    else:
        cost_column = table_has_prop_value.c['value']
        inf = float('inf')
        cost_limit_column = table.c['CPUTimeLimit']

    results = {}
    if cost in ('resultTime', 'wallTime', 'cost'):
        s = select([c_solver_config_id, functions.sum(cost_column), functions.count()],
                   and_(c_experiment_id == experiment.idExperiment, c_result_code.like(u'1%'), c_status == 1,
                        c_instance_id.in_(instance_ids), c_solver_config_id.in_(solver_config_ids))) \
            .select_from(table) \
            .group_by(c_solver_config_id)

        query_results = db.session.connection().execute(s)
        for row in query_results:
            results[row[0]] = (row[1], row[2])
    else:
        table = table.join(
            table_has_prop,
            and_(table_has_prop.c['idProperty'] == int(cost),
                 table_has_prop.c['idExperimentResults'] ==
                 table.c['idJob'])).join(table_has_prop_value)

        s = select([c_solver_config_id, cost_column],
                   and_(c_experiment_id == experiment.idExperiment, c_result_code.like(u'1%'), c_status == 1,
                        c_instance_id.in_(instance_ids), c_solver_config_id.in_(solver_config_ids))) \
            .select_from(table)

        sum_by_sc_id = dict((i, 0) for i in solver_config_ids)
        count_by_sc_id = dict((i, 0) for i in solver_config_ids)

        query_results = db.session.connection().execute(s)
        for row in query_results:
            sum_by_sc_id[row[0]] += float(row[1])
            count_by_sc_id[row[0]] += 1

        for i in solver_config_ids:
            results[i] = (sum_by_sc_id[i], count_by_sc_id[i])

    def sgn(x):
        if x > 0:
            return 1
        elif x < 0:
            return -1
        else:
            return 0

    def comp(s1, s2):
        num_solved_s1, num_solved_s2 = 0, 0
        if results.has_key(s1.idSolverConfig):
            num_solved_s1 = results[s1.idSolverConfig][1]
        if results.has_key(s2.idSolverConfig):
            num_solved_s2 = results[s2.idSolverConfig][1]

        if num_solved_s1 > num_solved_s2:
            return 1
        elif num_solved_s1 < num_solved_s2:
            return -1
        else:
            # break ties by cumulative cost over all solved instances
            if results.has_key(s1.idSolverConfig) and results.has_key(
                    s2.idSolverConfig):
                return sgn((results[s2.idSolverConfig][0] or 0.0) -
                           (results[s1.idSolverConfig][0] or 0.0))
            else:
                return 0

    return list(sorted(solver_configs, cmp=comp, reverse=True))
예제 #46
0
def get_ranking_data(db,
                     experiment,
                     ranked_solvers,
                     instances,
                     calculate_par10,
                     calculate_avg_stddev,
                     cost,
                     par_factor=1,
                     fixed_limit=None):
    instance_ids = [i.idInstance for i in instances]
    solver_config_ids = [s.idSolverConfig for s in ranked_solvers]
    if not solver_config_ids: return [], None

    max_num_runs = experiment.get_max_num_runs(db)
    max_num_runs_per_solver = max_num_runs * len(instance_ids)

    table = db.metadata.tables['ExperimentResults']
    from_table = table
    table_has_prop = db.metadata.tables['ExperimentResult_has_Property']
    table_has_prop_value = db.metadata.tables[
        'ExperimentResult_has_PropertyValue']
    status_column = table.c['status']
    result_code_column = table.c['resultCode']
    if cost == 'resultTime':
        cost_column = table.c['resultTime']
        cost_property = db.ExperimentResult.resultTime
        cost_limit_column = table.c['CPUTimeLimit']

        if fixed_limit:
            cost_column = expression.case(
                [(table.c['resultTime'] > fixed_limit, fixed_limit)],
                else_=table.c['resultTime'])
            cost_limit_column = literal(fixed_limit)
            status_column = expression.case(
                [(table.c['resultTime'] > fixed_limit, literal(21))],
                else_=table.c['status'])
            result_code_column = expression.case(
                [(table.c['resultTime'] > fixed_limit, literal(-21))],
                else_=table.c['resultCode'])
    elif cost == 'wallTime':
        cost_column = table.c['wallTime']
        cost_property = db.ExperimentResult.wallTime
        cost_limit_column = table.c['wallClockTimeLimit']

        if fixed_limit:
            cost_column = expression.case(
                [(table.c['wallTime'] > fixed_limit, fixed_limit)],
                else_=table.c['wallTime'])
            cost_limit_column = literal(fixed_limit)
            status_column = expression.case(
                [(table.c['wallTime'] > fixed_limit, literal(22))],
                else_=table.c['status'])
            result_code_column = expression.case(
                [(table.c['wallTime'] > fixed_limit, literal(-22))],
                else_=table.c['resultCode'])
    elif cost == 'cost':
        cost_column = table.c['cost']
        cost_property = db.ExperimentResult.cost
        inf = float('inf')
        cost_limit_column = table.c['CPUTimeLimit']  # doesnt matter
    else:
        cost_column = table_has_prop_value.c['value']
        cost_property = db.ResultPropertyValue.value
        inf = float('inf')
        cost_limit_column = table.c['CPUTimeLimit']
        from_table = table.join(
            table_has_prop,
            and_(table_has_prop.c['idProperty'] == int(cost),
                 table_has_prop.c['idExperimentResults'] ==
                 table.c['idJob'])).join(table_has_prop_value)

    vbs_num_solved = 0
    vbs_cumulated_cpu = 0
    from sqlalchemy import func, or_, not_

    property_limit = 0
    if cost in ('resultTime', 'wallTime', 'cost'):
        best_instance_runtimes = db.session.query(func.min(cost_property), db.ExperimentResult.Instances_idInstance) \
            .filter(db.ExperimentResult.Experiment_idExperiment == experiment.idExperiment) \
            .filter(result_code_column.like(u'1%')) \
            .filter(db.ExperimentResult.Instances_idInstance.in_(instance_ids)) \
            .filter(db.ExperimentResult.SolverConfig_idSolverConfig.in_(solver_config_ids)) \
            .group_by(db.ExperimentResult.Instances_idInstance).all()
    else:
        s = select(
            [cost_property, table.c['Instances_idInstance']],
            and_(table.c['Experiment_idExperiment'] == experiment.idExperiment,
                 table.c['resultCode'].like(u'1%'),
                 table.c['Instances_idInstance'].in_(instance_ids),
                 table.c['SolverConfig_idSolverConfig'].in_(
                     solver_config_ids))).select_from(from_table)

        min_by_instance = dict((i, float("inf")) for i in instance_ids)
        for row in db.session.connection().execute(s):
            property_limit = max(property_limit, float(row[0]))
            min_by_instance[row[1]] = min(min_by_instance[row[1]],
                                          float(row[0]))

        best_instance_runtimes = []
        for i in instance_ids:
            best_instance_runtimes.append((min_by_instance[i], i))

    vbs_num_solved = len(best_instance_runtimes) * max_num_runs
    vbs_cumulated_cpu = sum(r[0] for r in best_instance_runtimes
                            if r[0] is not None) * max_num_runs
    vbs_median = numpy.median(
        [r[0] for r in best_instance_runtimes if r[0] is not None])
    vbs_average = numpy.average(
        [r[0] for r in best_instance_runtimes if r[0] is not None])
    best_runtime_by_instance = dict()
    for bir in best_instance_runtimes:
        best_runtime_by_instance[bir[1]] = float(
            bir[0]) if bir[0] is not None else None

    #num_unsolved_instances = len(instances) - len(best_instance_runtimes)

    vbs_parX = 0.0

    # Virtual best solver data
    data = [(
        'Virtual Best Solver (VBS)',  # name of the solver
        vbs_num_solved,  # number of successful runs
        0.0 if max_num_runs_per_solver == 0 else vbs_num_solved /
        float(max_num_runs_per_solver),  # % of all runs
        1.0,  # % of vbs runs
        vbs_cumulated_cpu,  # cumulated CPU time
        (0.0 if vbs_num_solved == 0 else vbs_average),
        (0.0 if vbs_num_solved == 0 else vbs_median),
        0.0,  # avg stddev
        0.0,
        0.0,
        vbs_parX)]

    # single query fetch of all/most required data
    s = select(
        [
            expression.label(
                'cost', cost_column), table.c['SolverConfig_idSolverConfig'],
            table.c['Instances_idInstance']
        ],
        and_(result_code_column.like(u'1%'),
             table.c['Instances_idInstance'].in_(instance_ids),
             table.c['SolverConfig_idSolverConfig'].in_(solver_config_ids),
             table.c['Experiment_idExperiment'] == experiment.idExperiment,
             status_column == 1)).select_from(from_table)
    successful_runs = db.session.connection().execute(s)

    vbs_uses_solver_count = dict((id, 0) for id in solver_config_ids)
    runs_by_solver_and_instance = {}
    for run in successful_runs:
        if not runs_by_solver_and_instance.has_key(
                run.SolverConfig_idSolverConfig):
            runs_by_solver_and_instance[run.SolverConfig_idSolverConfig] = {}
        if not runs_by_solver_and_instance[
                run.SolverConfig_idSolverConfig].has_key(
                    run.Instances_idInstance):
            runs_by_solver_and_instance[run.SolverConfig_idSolverConfig][
                run.Instances_idInstance] = []
        runs_by_solver_and_instance[run.SolverConfig_idSolverConfig][
            run.Instances_idInstance].append(run)
        if (float(run.cost) if run.cost is not None else
                None) == best_runtime_by_instance[run.Instances_idInstance]:
            vbs_uses_solver_count[run.SolverConfig_idSolverConfig] += 1

    if calculate_avg_stddev:
        finished_runs_by_solver_and_instance = {}
        s = select(
            [
                expression.label('cost', cost_column),
                table.c['SolverConfig_idSolverConfig'],
                table.c['Instances_idInstance']
            ],
            and_(table.c['Instances_idInstance'].in_(instance_ids),
                 table.c['SolverConfig_idSolverConfig'].in_(solver_config_ids),
                 table.c['Experiment_idExperiment'] == experiment.idExperiment,
                 not_(status_column.in_((-1, 0))))).select_from(from_table)
        finished_runs = db.session.connection().execute(s)
        for run in finished_runs:
            if not finished_runs_by_solver_and_instance.has_key(
                    run.SolverConfig_idSolverConfig):
                finished_runs_by_solver_and_instance[
                    run.SolverConfig_idSolverConfig] = {}
            if not finished_runs_by_solver_and_instance[
                    run.SolverConfig_idSolverConfig].has_key(
                        run.Instances_idInstance):
                finished_runs_by_solver_and_instance[
                    run.SolverConfig_idSolverConfig][
                        run.Instances_idInstance] = []
            finished_runs_by_solver_and_instance[
                run.SolverConfig_idSolverConfig][
                    run.Instances_idInstance].append(run)

    failed_runs_by_solver = dict(
        (sc.idSolverConfig, list()) for sc in ranked_solvers)
    s = select(
        [
            expression.label('cost', cost_column),
            expression.label('cost_limit', cost_limit_column),
            table.c['SolverConfig_idSolverConfig']
        ],
        and_(
            table.c['Experiment_idExperiment'] == experiment.idExperiment,
            table.c['Instances_idInstance'].in_(instance_ids),
            table.c['SolverConfig_idSolverConfig'].in_(solver_config_ids),
            and_(or_(status_column != 1, not_(result_code_column.like(u'1%'))),
                 not_(status_column.in_([-1, 0]))))).select_from(from_table)
    failed_runs = db.session.connection().execute(s)
    for run in failed_runs:
        failed_runs_by_solver[run.SolverConfig_idSolverConfig].append(run)

    for solver in ranked_solvers:
        if runs_by_solver_and_instance.has_key(solver.idSolverConfig):
            successful_runs = [run for ilist in runs_by_solver_and_instance[solver.idSolverConfig].values() \
                               for run in ilist]
        else:
            successful_runs = []
        successful_runs_sum = sum(float(j.cost) for j in successful_runs)

        penalized_average_runtime = 0.0
        if calculate_par10:
            if len(successful_runs) + len(
                    failed_runs_by_solver[solver.idSolverConfig]) == 0:
                # this should mean there are no jobs of this solver yet
                penalized_average_runtime = 0.0
            else:
                penalized_average_runtime = (sum([j.cost_limit * par_factor if cost in ('resultTime',
                                                                                        'wallTime') else experiment.costPenalty * par_factor if cost == 'cost' else property_limit * par_factor
                                                  for j in
                                                  failed_runs_by_solver[solver.idSolverConfig]]) + successful_runs_sum) \
                                            / (len(successful_runs) + len(failed_runs_by_solver[solver.idSolverConfig]))

        par1_median_runtime = numpy.median([j.cost_limit if cost in ('resultTime', 'wallTime') else \
                                                experiment.costPenalty if cost == 'cost' else property_limit for j in
                                            failed_runs_by_solver[solver.idSolverConfig]] + [float(j.cost) for j in
                                                                                             successful_runs])
        #average_runtime = numpy.average([float(j.cost) for j in successful_runs])
        cumulated_par1 = sum([j.cost_limit if cost in ('resultTime', 'wallTime') else \
                                  experiment.costPenalty if cost == 'cost' else \
                                      property_limit for j in
                              failed_runs_by_solver[solver.idSolverConfig]]) + successful_runs_sum
        if len(successful_runs) + len(
                failed_runs_by_solver[solver.idSolverConfig]) == 0:
            par1 = 0.0
        else:
            par1 = cumulated_par1 / float(
                (len(successful_runs) +
                 len(failed_runs_by_solver[solver.idSolverConfig])))

        avg_stddev_runtime = 0.0
        avg_cv = 0.0
        avg_qcd = 0.0
        if calculate_avg_stddev:
            count = 0
            for instance in instance_ids:
                if solver.idSolverConfig in finished_runs_by_solver_and_instance and \
                        finished_runs_by_solver_and_instance[solver.idSolverConfig].has_key(instance):
                    instance_runtimes = finished_runs_by_solver_and_instance[
                        solver.idSolverConfig][instance]
                    runtimes = [j[0] or 0.0 for j in instance_runtimes]
                    stddev = numpy.std(runtimes)
                    avg_stddev_runtime += stddev
                    avg_cv += stddev / numpy.average(runtimes)
                    quantiles = mquantiles(runtimes, [0.25, 0.5, 0.75])
                    avg_qcd += (quantiles[2] - quantiles[0]) / quantiles[1]
                    count += 1
            if count > 0:
                avg_stddev_runtime /= float(count)
                avg_cv /= float(count)
                avg_qcd /= float(count)

        data.append((
            solver,
            len(successful_runs),
            0 if len(successful_runs) == 0 else len(successful_runs) /
            float(max_num_runs_per_solver),
            0 if vbs_num_solved == 0 else len(successful_runs) /
            float(vbs_num_solved),
            cumulated_par1,
            par1,
            par1_median_runtime,
            avg_stddev_runtime,
            avg_cv,
            avg_qcd,
            penalized_average_runtime,
        ))

    #if calculate_par10: data.sort(key=lambda x: x[7])
    return data, vbs_uses_solver_count
예제 #47
0
class Role(Base, PermissionsMixin):
    __tablename__ = "role"
    __table_args__ = (
        sql_schema.UniqueConstraint("id", "community_id"),
        sql_schema.ForeignKeyConstraint(
            ["parent_id", "community_id"],
            ["role.id", "role.community_id"],
        ),
    )

    id = Column(sql_types.Integer, primary_key=True)
    community_id = Column(sql_types.Integer,
                          ForeignKey("community.id"),
                          nullable=False)
    parent_id = Column(sql_types.Integer, nullable=False)

    name = Column(sql_types.String(80), nullable=False)

    @hybrid_property
    def permissions(self):
        return reduce(lambda a, b: a | b,
                      [getattr(self, m.name) for m in Permissions])

    @permissions.setter
    def permissions(self, value):
        for member in Permissions:
            setattr(self, member.name, member & value)

    @orm.validates(*[m.name for m in Permissions])
    def validate_permissions(self, key, value):
        parent_val = getattr(self.parent, key)
        if self.parent.CAN_DELEGATE and value & parent_val == value:
            return value
        else:
            raise Exception(
                f"{self.name}.{key} cannot be changed to {value} because parent has {parent_val}"
            )

    groups = orm.relationship(
        "Group",
        lazy=True,
        backref=orm.backref("role", lazy=True),
    )
    children = orm.relationship(
        "Role",
        lazy=True,
        overlaps="abilities,community",
        backref=orm.backref("parent",
                            lazy=True,
                            remote_side=[id],
                            overlaps="abilities,community"),
    )

    __mapper_args__ = {
        "polymorphic_on":
        sql_expr.case(
            (parent_id == id, "root_role"),
            else_="role",
        ),
        "polymorphic_identity":
        "role",
    }

    def __repr__(self):
        return f"Role(" f"name={self.name}" f")"

    def permits(self, request):
        return self.permissions & request == request

    def print_permissions(self):
        out = ""
        for member in Permissions:
            if self.permits(member):
                out += f"{member.name}, "
        return out[:-2]
예제 #48
0
def view_index_lang(session, request, language):
    """ Does the main index page for a single language in DDTSS """
    lang = session.query(Languages).get(language)
    if not lang or not lang.enabled_ddtss:
        raise Http404()

    user = get_user(request, session)

    if user.lastlanguage != lang:
        user.lastlanguage = lang
    user.lastseen = int(time.time())

    if request.method == 'POST':
        form = FetchForm(request.POST)

        if not form.is_valid():
            # Maybe return HTTP 400 - Bad request?
            return show_message_screen(request, 'Bad request %r' % form.errors, 'ddtss_index_lang', language)

        if not lang.translation_model.user_allowed(user, language, lang.translation_model.ACTION_TRANSLATE):
            return show_message_screen(request, 'User is not permitted to translate', 'ddtss_index_lang', language)

        pack = form.cleaned_data['package'].strip()
        force = form.cleaned_data['force']

        if pack == '':
            description_id = lang.get_next_to_translate(session)
        elif re.match('^\d+$', pack):
            description_id = int(pack)
        else:
            packageversion = session.query(PackageVersion).filter(PackageVersion.package == pack). \
                                     join(ActiveDescription, ActiveDescription.description_id == PackageVersion.description_id).first()

            if not packageversion:
                return show_message_screen(request, 'No Package %s found' % pack, 'ddtss_index_lang', language)

            description_id = packageversion.description_id

        description = session.query(Description).filter(Description.description_id == description_id).first()

        if not description:
            return show_message_screen(request, 'No description-id %s found' % str(description_id), 'ddtss_index_lang', language)

        if language not in description.translation or force:
            trans = session.query(PendingTranslation).filter_by(language=lang, description_id=description_id).with_lockmode('update').first()
            if not trans:
                message = Messages(
                        message="",
                        actionstring="fetched",
                        to_user=None,
                        language=language,
                        for_description=description_id,
                        from_user=user.username,
                        in_reply_to=None,
                        timestamp=int(time.time()))
                session.add(message)

                trans = PendingTranslation(
                        description_id=description_id,
                        language=lang,
                        firstupdate=int(time.time()),
                        lastupdate=int(time.time()),
                        owner_username=user.username,
                        owner_locktime=int(time.time()),
                        iteration=0,
                        state=0)
                trans.short, trans.long = PendingTranslation.make_suggestion(description, language)
                session.add(trans)
                session.commit()
                return show_message_screen(request, 'Fetched package %s (%s)' % (description.package, str(description_id)), 'ddtss_translate', language, str(description_id))

        return show_message_screen(request, 'Package %s already translated (and not forced)' % (pack), 'ddtss_index_lang', language)

    session.commit()

    # TODO: Don't load actual descriptions
    translations = session.query(PendingTranslation,
                                 func.sum(expression.case([(PendingTranslationReview.username == user.username, 1)], else_=0)).label('reviewed'),
                                 func.count().label('reviews')) \
                          .outerjoin(PendingTranslationReview) \
                          .filter(PendingTranslation.language_ref == language) \
                          .group_by(PendingTranslation) \
                          .options(subqueryload(PendingTranslation.reviews)) \
                          .options(subqueryload(PendingTranslation.description)) \
                          .options(subqueryload('description.milestones')) \
                          .all()

    pending_translations = []
    pending_review = []
    reviewed = []

    for trans, reviewed_by_me, reviews in translations:
        if trans.state == PendingTranslation.STATE_PENDING_REVIEW:
            if reviewed_by_me or trans.owner_username == user.username:
                reviewed.append(trans)
            else:
                pending_review.append(trans)
        elif trans.state == PendingTranslation.STATE_PENDING_TRANSLATION:
            pending_translations.append(trans)

    reviewed.sort(key=lambda t: t.lastupdate, reverse=True)
    pending_review.sort(key=lambda t: t.lastupdate, reverse=False)
    pending_translations.sort(key=lambda t: t.firstupdate, reverse=False)

    global_messages = Messages.global_messages(session) \
                          .order_by(Messages.timestamp.desc()) \
                          .limit(20) \
                          .all()

    team_messages = Messages.team_messages(session, language) \
                          .order_by(Messages.timestamp.desc()) \
                          .limit(20) \
                          .all()

    user_messages = Messages.user_messages(session, user.username) \
                          .order_by(Messages.timestamp.desc()) \
                          .limit(20) \
                          .all()

    milestones = []
    for type, name, milestone in (('user_milestone', 'User', user.milestone),
                                  ('lang_milestone_high', 'Team high', lang.milestone_high),
                                  ('lang_milestone_medium', 'Team medium', lang.milestone_medium),
                                  ('lang_milestone_low', 'Team low', lang.milestone_low)):
        if not milestone:
            continue
        m = session.query(DescriptionMilestone).filter(DescriptionMilestone.milestone == milestone).first()
        if not m:
            continue

        info = m.info_language(lang)
        info['type'] = type
        info['typename'] = name
        milestones.append(info)

    involveddescriptions = [x for x, in Messages.involveddescriptions(session, user.username).all()]
    recently_translated = Messages.recently_translated(session, language).limit(10).all()

    response = render_to_response("ddtss/index_lang.html", dict(
        lang=lang,
        user=user,
        auth=user.get_authority(language),
        pending_translations=pending_translations,
        pending_review=pending_review,
        reviewed=reviewed,
        involveddescriptions=involveddescriptions,
        milestones=milestones,
        recently_translated=recently_translated,
        global_messages=global_messages,
        team_messages=team_messages,
        user_messages=user_messages), context_instance=RequestContext(request))

    return save_user(response, user)
예제 #49
0
            def get_results_by_instance(self, db, solver_configs, instance, penalize_incorrect, penalize_factor=1,
                                        result_property='resultTime'):
                results_by_instance = dict((sc.idSolverConfig, dict()) for sc in solver_configs)
                if not solver_configs: return results_by_instance
                solver_config_ids = [sc.idSolverConfig for sc in solver_configs]

                table = db.metadata.tables['ExperimentResults']
                table_result_codes = db.metadata.tables['ResultCodes']
                table_status_codes = db.metadata.tables['StatusCodes']
                c = table.c
                c_rc = table_result_codes.c
                c_sc = table_status_codes.c

                if result_property == 'resultTime':
                    c_cost = c['resultTime']
                    c_limit = c['CPUTimeLimit']
                    pass
                elif result_property == 'wallTime':
                    c_cost = c['wallTime']
                    c_limit = c['wallClockTimeLimit']
                    pass
                elif result_property == 'cost':
                    c_cost = c['cost']
                    c_limit = -1
                    pass
                else:
                    # result property table
                    c_cost = 0
                    c_limit = -1
                    pass

                join_expression = table.join(table_result_codes).join(table_status_codes)
                select_statement = select([
                                              c['idJob'],
                                              c['status'],
                                              c['resultCode'],
                                              expression.label('result_code_description', c_rc['description']),
                                              expression.label('status_code_description', c_sc['description']),
                                              c['Instances_idInstance'],
                                              c['SolverConfig_idSolverConfig'],
                                              c['run'],
                                              expression.label('cost',
                                                               expression.case([(
                                                                                    c['status'] > 0,
                                                                                    # only consider cost of "finished" jobs
                                                                                    c_cost if not penalize_incorrect else \
                                                                                        expression.case(
                                                                                            [(
                                                                                             table.c['resultCode'].like(
                                                                                                 '1%'), c_cost)],
                                                                                            else_=c_limit * penalize_factor
                                                                                        )
                                                                                )],
                                                                               else_=None)
                                              ),
                                              expression.label('limit', c_limit),
                                          ],
                                          and_(
                                              c['Experiment_idExperiment'] == self.idExperiment,
                                              c['Instances_idInstance'] == instance.idInstance,
                                              c['SolverConfig_idSolverConfig'].in_(solver_config_ids),

                                          ),
                                          from_obj=join_expression,
                )

                results = db.session.connection().execute(select_statement)
                for row in results:
                    results_by_instance[row.SolverConfig_idSolverConfig][row.run] = row

                return results_by_instance
예제 #50
0
def _get_orderby_clauses(order_by_list, session):
    """Sorts a set of runs based on their natural ordering and an overriding set of order_bys.
    Runs are naturally ordered first by start time descending, then by run id for tie-breaking.
    """

    clauses = []
    ordering_joins = []
    clause_id = 0
    observed_order_by_clauses = set()
    # contrary to filters, it is not easily feasible to separately handle sorting
    # on attributes and on joined tables as we must keep all clauses in the same order
    if order_by_list:
        for order_by_clause in order_by_list:
            clause_id += 1
            (key_type, key, ascending
             ) = SearchUtils.parse_order_by_for_search_runs(order_by_clause)
            if SearchUtils.is_string_attribute(
                    key_type, key, "=") or SearchUtils.is_numeric_attribute(
                        key_type, key, "="):
                order_value = getattr(SqlRun, SqlRun.get_attribute_name(key))
            else:
                if SearchUtils.is_metric(key_type,
                                         "="):  # any valid comparator
                    entity = SqlLatestMetric
                elif SearchUtils.is_tag(key_type, "="):
                    entity = SqlTag
                elif SearchUtils.is_param(key_type, "="):
                    entity = SqlParam
                else:
                    raise MlflowException(
                        "Invalid identifier type '%s'" % key_type,
                        error_code=INVALID_PARAMETER_VALUE,
                    )

                # build a subquery first because we will join it in the main request so that the
                # metric we want to sort on is available when we apply the sorting clause
                subquery = session.query(entity).filter(
                    entity.key == key).subquery()

                ordering_joins.append(subquery)
                order_value = subquery.c.value

            # sqlite does not support NULLS LAST expression, so we sort first by
            # presence of the field (and is_nan for metrics), then by actual value
            # As the subqueries are created independently and used later in the
            # same main query, the CASE WHEN columns need to have unique names to
            # avoid ambiguity
            if SearchUtils.is_metric(key_type, "="):
                clauses.append(
                    sql.case(
                        [
                            # Ideally the use of "IS" is preferred here but owing to sqlalchemy
                            # translation in MSSQL we are forced to use "=" instead.
                            # These 2 options are functionally identical / unchanged because
                            # the column (is_nan) is not nullable. However it could become an issue
                            # if this precondition changes in the future.
                            (subquery.c.is_nan == sqlalchemy.true(), 1),
                            (order_value.is_(None), 1),
                        ],
                        else_=0,
                    ).label("clause_%s" % clause_id))
            else:  # other entities do not have an 'is_nan' field
                clauses.append(
                    sql.case([(order_value.is_(None), 1)],
                             else_=0).label("clause_%s" % clause_id))

            if (key_type, key) in observed_order_by_clauses:
                raise MlflowException(
                    "`order_by` contains duplicate fields: {}".format(
                        order_by_list))
            observed_order_by_clauses.add((key_type, key))

            if ascending:
                clauses.append(order_value)
            else:
                clauses.append(order_value.desc())

    if (SearchUtils._ATTRIBUTE_IDENTIFIER,
            SqlRun.start_time.key) not in observed_order_by_clauses:
        clauses.append(SqlRun.start_time.desc())
    clauses.append(SqlRun.run_uuid)
    return clauses, ordering_joins
예제 #51
0
class Release(db.Model):

    __tablename__ = "releases"

    @declared_attr
    def __table_args__(cls):  # noqa
        return (
            Index("release_created_idx", cls.created.desc()),
            Index("release_project_created_idx", cls.project_id,
                  cls.created.desc()),
            Index("release_version_idx", cls.version),
            UniqueConstraint("project_id", "version"),
        )

    __repr__ = make_repr("project", "version")
    __parent__ = dotted_navigator("project")
    __name__ = dotted_navigator("version")

    project_id = Column(
        ForeignKey("projects.id", onupdate="CASCADE", ondelete="CASCADE"),
        nullable=False,
    )
    version = Column(Text, nullable=False)
    canonical_version = Column(Text, nullable=False)
    is_prerelease = orm.column_property(func.pep440_is_prerelease(version))
    author = Column(Text)
    author_email = Column(Text)
    maintainer = Column(Text)
    maintainer_email = Column(Text)
    home_page = Column(Text)
    license = Column(Text)
    summary = Column(Text)
    keywords = Column(Text)
    platform = Column(Text)
    download_url = Column(Text)
    _pypi_ordering = Column(Integer)
    requires_python = Column(Text)
    created = Column(DateTime(timezone=False),
                     nullable=False,
                     server_default=sql.func.now())

    description_id = Column(
        ForeignKey("release_descriptions.id",
                   onupdate="CASCADE",
                   ondelete="CASCADE"),
        nullable=False,
    )
    description = orm.relationship(
        "Description",
        backref=orm.backref(
            "release",
            cascade="all, delete-orphan",
            passive_deletes=True,
            passive_updates=True,
            single_parent=True,
            uselist=False,
        ),
    )

    yanked = Column(Boolean, nullable=False, server_default=sql.false())

    yanked_reason = Column(Text, nullable=False, server_default="")

    _classifiers = orm.relationship(
        Classifier,
        backref="project_releases",
        secondary=lambda: release_classifiers,
        order_by=expression.case(
            {c: i
             for i, c in enumerate(sorted_classifiers)},
            value=Classifier.classifier,
        ),
        passive_deletes=True,
    )
    classifiers = association_proxy("_classifiers", "classifier")

    files = orm.relationship(
        "File",
        backref="release",
        cascade="all, delete-orphan",
        lazy="dynamic",
        order_by=lambda: File.filename,
        passive_deletes=True,
    )

    dependencies = orm.relationship(
        "Dependency",
        backref="release",
        cascade="all, delete-orphan",
        passive_deletes=True,
    )

    vulnerabilities = orm.relationship(
        VulnerabilityRecord,
        back_populates="releases",
        secondary="release_vulnerabilities",
        passive_deletes=True,
    )

    _requires = _dependency_relation(DependencyKind.requires)
    requires = association_proxy("_requires", "specifier")

    _provides = _dependency_relation(DependencyKind.provides)
    provides = association_proxy("_provides", "specifier")

    _obsoletes = _dependency_relation(DependencyKind.obsoletes)
    obsoletes = association_proxy("_obsoletes", "specifier")

    _requires_dist = _dependency_relation(DependencyKind.requires_dist)
    requires_dist = association_proxy("_requires_dist", "specifier")

    _provides_dist = _dependency_relation(DependencyKind.provides_dist)
    provides_dist = association_proxy("_provides_dist", "specifier")

    _obsoletes_dist = _dependency_relation(DependencyKind.obsoletes_dist)
    obsoletes_dist = association_proxy("_obsoletes_dist", "specifier")

    _requires_external = _dependency_relation(DependencyKind.requires_external)
    requires_external = association_proxy("_requires_external", "specifier")

    _project_urls = _dependency_relation(DependencyKind.project_url)
    project_urls = association_proxy("_project_urls", "specifier")

    uploader_id = Column(
        ForeignKey("users.id", onupdate="CASCADE", ondelete="SET NULL"),
        nullable=True,
        index=True,
    )
    uploader = orm.relationship(User)
    uploaded_via = Column(Text)

    @property
    def urls(self):
        _urls = OrderedDict()

        if self.home_page:
            _urls["Homepage"] = self.home_page
        if self.download_url:
            _urls["Download"] = self.download_url

        for urlspec in self.project_urls:
            name, _, url = urlspec.partition(",")
            name = name.strip()
            url = url.strip()
            if name and url:
                _urls[name] = url

        return _urls

    @property
    def github_repo_info_url(self):
        for url in self.urls.values():
            parsed = urlparse(url)
            segments = parsed.path.strip("/").split("/")
            if parsed.netloc in {"github.com", "www.github.com"
                                 } and len(segments) >= 2:
                user_name, repo_name = segments[:2]
                return f"https://api.github.com/repos/{user_name}/{repo_name}"

    @property
    def has_meta(self):
        return any([
            self.license,
            self.keywords,
            self.author,
            self.author_email,
            self.maintainer,
            self.maintainer_email,
            self.requires_python,
        ])
예제 #52
0
 def msg_count(self):
     return expression.case([(self.msg_id == 0, 0)], else_=self.msg_id - self.start_msg_id)
예제 #53
0
 def unsynced_count(self):
     return expression.case([(self.msg_id == 0, 0),
                             (self.sync_msg_id == 0, self.msg_id - self.start_msg_id)],
                            else_=self.msg_id - self.sync_msg_id)
예제 #54
0
 def unhandled_count(self):
     return expression.case([(self.msg_id == 0, 0),
                             (self.handler_msg_id == 0, self.msg_id - self.start_msg_id)],
                            else_=self.msg_id - self.handler_msg_id)
예제 #55
0
    def _get_paid_not_approved_query(cls, query_type, start_date, end_date):
        """
        Gets the query for paid but not approved shares between and including
        start and end date.

        Args:
            query_type: The type of the query to be build. 'data' for
                retrieving rows and 'shares_count' for an aggregate count
                query.
            start_date: The first date of which paid and not approved shares
                are considered.
            end_date: The last date of which paid and not approved shares are
                considered.

        Returns:
            A query according to the specified query_type.

            For 'data' the query is build to retrieve rows with attributes 'id'
            for member id, 'lastname' for the member's lastname, 'firstname'
            for the member's firstname, 'shares_count' for the number of shares
            and 'payment_received_date' for the date on which the payment was
            received

            For 'shares_count' an aggregate count query is returned to retrieve
            the number of shares of all relevant shares packages.
        """
        # Shares which of the day of the request have not been approved are not
        # yet stored in Shares but only available on the C3sMember.

        shares_count = expression.case(
            # "== None" for SqlAlchemy instead of Python "is None"
            # pylint: disable=singleton-comparison
            [(Shares.id == None, C3sMember.num_shares)],
            else_=Shares.number
        )
        payment_received_date = expression.case(
            [(
                # "== None" for SqlAlchemy instead of Python "is None"
                # pylint: disable=singleton-comparison
                Shares.id == None,
                # C3sMember.payment_received_date has the data type DateTime
                # but Date is required as it is used in
                # Shares.payment_received_date. As CAST on DateTime '2017-01-02
                # 12:23:34.456789' returns '2017' in SQLite and therefore
                # cannot be used substring is used instead and then SQLAlchemy
                # is forced by type_coerce to parse it as a Date column.
                expression.type_coerce(
                    func.substr(C3sMember.payment_received_date, 1, 10), Date)
            )],
            else_=Shares.payment_received_date
        )
        # SqlAlchemy equality to None must be used as "== None" instead of
        # Python "is not None".
        date_of_acquisition = expression.case(
            # "== None" for SqlAlchemy instead of Python "is None"
            # pylint: disable=singleton-comparison
            [(Shares.id == None, C3sMember.membership_date)],
            else_=Shares.date_of_acquisition
        )

        if query_type == 'data':
            # pylint: disable=no-member
            query = DBSession.query(
                C3sMember.id,
                C3sMember.lastname,
                C3sMember.firstname,
                shares_count.label('shares_count'),
                payment_received_date.label('payment_received_date'),
            )
        if query_type == 'shares_count':
            # pylint: disable=no-member
            query = DBSession.query(
                func.sum(shares_count)
            )
        # Use outer joins as Shares do not have to exist yet.
        return query.select_from(C3sMember) \
            .outerjoin(members_shares) \
            .outerjoin(Shares) \
            .filter(
                expression.and_(
                    # membership not approved in time period
                    expression.or_(
                        # membership or share approved later than end date
                        date_of_acquisition > end_date,
                        # or membership or share not approved yet (default
                        # date)
                        date_of_acquisition == date(1970, 1, 1),
                    ),
                    # payment received in time period
                    payment_received_date >= start_date,
                    payment_received_date <= end_date,
                )
            )
예제 #56
0
파일: views.py 프로젝트: hugovk/warehouse
def search(request):
    metrics = request.find_service(IMetricsService, context=None)

    querystring = request.params.get("q", "").replace("'", '"')
    order = request.params.get("o", "")
    classifiers = request.params.getall("c")
    query = get_es_query(request.es, querystring, order, classifiers)

    try:
        page_num = int(request.params.get("page", 1))
    except ValueError:
        raise HTTPBadRequest("'page' must be an integer.")

    try:
        page = ElasticsearchPage(query,
                                 page=page_num,
                                 url_maker=paginate_url_factory(request))
    except elasticsearch.TransportError:
        metrics.increment("warehouse.views.search.error")
        raise HTTPServiceUnavailable

    if page.page_count and page_num > page.page_count:
        raise HTTPNotFound

    available_filters = collections.defaultdict(list)

    classifiers_q = (request.db.query(Classifier).with_entities(
        Classifier.classifier).filter(
            exists([release_classifiers.c.trove_id
                    ]).where(release_classifiers.c.trove_id == Classifier.id),
            Classifier.classifier.notin_(deprecated_classifiers.keys()),
        ).order_by(
            expression.case(
                {c: i
                 for i, c in enumerate(sorted_classifiers)},
                value=Classifier.classifier,
            )))

    for cls in classifiers_q:
        first, *_ = cls.classifier.split(" :: ")
        available_filters[first].append(cls.classifier)

    def filter_key(item):
        try:
            return 0, SEARCH_FILTER_ORDER.index(item[0]), item[0]
        except ValueError:
            return 1, 0, item[0]

    def form_filters_tree(split_list):
        """
        Takes a list of lists, each of them containing a filter and
        one of its children.
        Returns a dictionary, each key being a filter and each value being
        the filter's children.
        """
        d = {}
        for list_ in split_list:
            current_level = d
            for part in list_:
                if part not in current_level:
                    current_level[part] = {}
                current_level = current_level[part]
        return d

    def process_available_filters():
        """
        Processes available filters and returns a list of dictionaries.
        The value of a key in the dictionary represents its children
        """
        sorted_filters = sorted(available_filters.items(), key=filter_key)
        output = []
        for f in sorted_filters:
            classifier_list = f[1]
            split_list = [i.split(" :: ") for i in classifier_list]
            tree = form_filters_tree(split_list)
            output.append(tree)
        return output

    metrics = request.find_service(IMetricsService, context=None)
    metrics.histogram("warehouse.views.search.results", page.item_count)

    return {
        "page": page,
        "term": querystring,
        "order": order,
        "available_filters": process_available_filters(),
        "applied_filters": request.params.getall("c"),
    }
예제 #57
0
def community_search(json):
    PER_PAGE = 20
    portals_ids = []
    for p in g.user.active_portals_subscribed:
        portals_ids.append(p.id)

    companies_ids = []
    for c in g.user.companies_employer_active:
        companies_ids.append(c.id)

    # use load_for_infinite_scroll
    # TODO: OZ by OZ: use load_for_infinite_scroll

    query = g.db.query(User). \
        outerjoin(UserPortalReader,
                  and_(UserPortalReader.user_id == User.id, UserPortalReader.status == 'active',
                       UserPortalReader.portal_id.in_(portals_ids))). \
        outerjoin(Portal,
                  and_(UserPortalReader.portal_id == Portal.id, Portal.status == Portal.STATUSES['PORTAL_ACTIVE'])). \
        outerjoin(UserCompany,
                  and_(UserCompany.user_id == User.id, UserCompany.status == UserCompany.STATUSES['EMPLOYMENT_ACTIVE'],
                       UserCompany.company_id.in_(companies_ids))). \
        outerjoin(Company,
                  and_(UserCompany.company_id == Company.id, Company.status == Company.STATUSES['COMPANY_ACTIVE'])). \
        outerjoin(Contact,
                  or_(and_(Contact.user1_id == User.id, Contact.user2_id == g.user.id),
                      and_(Contact.user2_id == User.id, Contact.user1_id == g.user.id))). \
        filter(and_(User.full_name.ilike("%" + json['text'] + "%"),
                    User.id != g.user.id,
                    or_(Portal.id != None, Company.id != None))). \
        group_by(User.id, Contact.status). \
        order_by(expression.case([
        (or_(
            and_(Contact.status == Contact.STATUSES['REQUESTED_UNCONFIRMED'], User.id == g.user.id),
            and_(Contact.status == Contact.STATUSES['UNCONFIRMED_REQUESTED'], User.id != g.user.id)), '1'),
        (or_(
            and_(Contact.status == Contact.STATUSES['REQUESTED_UNCONFIRMED'], User.id != g.user.id),
            and_(Contact.status == Contact.STATUSES['UNCONFIRMED_REQUESTED'], User.id == g.user.id)), '2'),
        (or_(
            and_(Contact.status == Contact.STATUSES['ACTIVE_BANNED'], User.id == g.user.id),
            and_(Contact.status == Contact.STATUSES['BANNED_ACTIVE'], User.id != g.user.id)), '3'),
        (or_(
            and_(Contact.status == Contact.STATUSES['ACTIVE_BANNED'], User.id != g.user.id),
            and_(Contact.status == Contact.STATUSES['BANNED_ACTIVE'], User.id == g.user.id)), '4'),
        (or_(
            and_(Contact.status == Contact.STATUSES['ANY_REVOKED'], User.id == g.user.id),
            and_(Contact.status == Contact.STATUSES['REVOKED_ANY'], User.id != g.user.id)), 'X'),
        (or_(
            and_(Contact.status == Contact.STATUSES['ANY_REVOKED'], User.id != g.user.id),
            and_(Contact.status == Contact.STATUSES['REVOKED_ANY'], User.id == g.user.id)), 'X'),
        (Contact.status == Contact.STATUSES['ACTIVE_ACTIVE'], '5')
    ], else_='X'), User.full_name, User.id). \
        limit(PER_PAGE + 1).offset((json['page'] - 1) * PER_PAGE)

    users = query.all()
    next_page = (json['page'] + 1) if len(users) > PER_PAGE else False
    users = users[0:PER_PAGE]

    ret = []

    for u in users:
        user_dict = u.get_client_side_dict(
            fields='id,full_name,avatar.url,address_email')
        user_dict['common_portals_subscribed'] = [
            p.get_client_side_dict(fields='id,logo.url,host,name')
            for p in u.active_portals_subscribed if p.id in portals_ids
        ]
        user_dict['common_companies_employers'] = [
            c.get_client_side_dict(fields='id,logo.url,name')
            for c in u.companies_employer_active if c.id in companies_ids
        ]
        contact = g.db().query(Contact).filter_by(user1_id=min([u.id, g.user.id])).filter_by(
            user2_id=max([u.id, g.user.id])) \
            .first()
        if contact:
            user_dict['contact_status'] = contact.get_status_for_user(
                g.user.id)
        else:
            user_dict['contact_status'] = False
        ret.append(user_dict)

    return {'community': ret, 'next_page': next_page}
예제 #58
0
 def find_user_authorization_by_org_id_and_corp_type(cls, org_id: int, corp_type: str):
     """Return authorization view object."""
     return cls.query.filter_by(corp_type_code=corp_type, org_id=org_id).order_by(
         expression.case(((Authorization.org_membership == OWNER, 1),
                          (Authorization.org_membership == ADMIN, 2),
                          (Authorization.org_membership == MEMBER, 3)))).first()
예제 #59
0
            def get_result_matrix(self, db, solver_configs, instances, cost='resultTime', fixed_limit=None):
                """ Returns the results as matrix of lists of result tuples, i.e.
                    Dict<idInstance, Dict<idSolverConfig, List of runs>> """
                num_successful = dict(
                    (i.idInstance, dict((sc.idSolverConfig, 0) for sc in solver_configs)) for i in instances)
                num_completed = dict(
                    (i.idInstance, dict((sc.idSolverConfig, 0) for sc in solver_configs)) for i in instances)
                M = dict((i.idInstance, dict((sc.idSolverConfig, list()) for sc in solver_configs)) for i in instances)
                solver_config_ids = [sc.idSolverConfig for sc in solver_configs]
                instance_ids = [i.idInstance for i in instances]
                if not solver_config_ids or not instance_ids:
                    return M, 0, 0
                table = db.metadata.tables['ExperimentResults']
                table_result_codes = db.metadata.tables['ResultCodes']
                from_table = table
                table_has_prop = db.metadata.tables['ExperimentResult_has_Property']
                table_has_prop_value = db.metadata.tables['ExperimentResult_has_PropertyValue']

                status_column = table.c['status']
                result_code_column = table.c['resultCode']
                if cost == 'resultTime':
                    cost_column = table.c['resultTime']
                    cost_property = db.ExperimentResult.resultTime
                    cost_limit_column = table.c['CPUTimeLimit']

                    if fixed_limit:
                        cost_column = expression.case([(table.c['resultTime'] > fixed_limit, fixed_limit)],
                                                      else_=table.c['resultTime'])
                        cost_limit_column = literal(fixed_limit)
                        status_column = expression.case([(table.c['resultTime'] > fixed_limit, literal(21))],
                                                        else_=table.c['status'])
                        result_code_column = expression.case([(table.c['resultTime'] > fixed_limit, literal(-21))],
                                                             else_=table.c['resultCode'])
                elif cost == 'wallTime':
                    cost_column = table.c['wallTime']
                    cost_property = db.ExperimentResult.wallTime
                    cost_limit_column = table.c['wallClockTimeLimit']

                    if fixed_limit:
                        cost_column = expression.case([(table.c['wallTime'] > fixed_limit, fixed_limit)],
                                                      else_=table.c['wallTime'])
                        cost_limit_column = literal(fixed_limit)
                        status_column = expression.case([(table.c['wallTime'] > fixed_limit, literal(22))],
                                                        else_=table.c['status'])
                        result_code_column = expression.case([(table.c['wallTime'] > fixed_limit, literal(-22))],
                                                             else_=table.c['resultCode'])
                elif cost == 'cost':
                    cost_column = table.c['cost']
                    cost_property = db.ExperimentResult.cost
                    inf = float('inf')
                    cost_limit_column = table.c['CPUTimeLimit'] # doesnt matter
                else:
                    cost_column = table_has_prop_value.c['value']
                    cost_property = db.ResultPropertyValue.value
                    inf = float('inf')
                    cost_limit_column = table.c['CPUTimeLimit']
                    from_table = table.join(table_has_prop, and_(table_has_prop.c['idProperty'] == int(cost),
                                                                 table_has_prop.c['idExperimentResults'] == table.c[
                                                                     'idJob'])).join(table_has_prop_value)

                s = select([table.c['idJob'], expression.label('resultCode', result_code_column),
                            expression.label('cost', cost_column), expression.label('status', status_column),
                            table.c['SolverConfig_idSolverConfig'], table.c['Instances_idInstance'],
                            table_result_codes.c['description'], expression.label('limit', cost_limit_column)],
                           and_(table.c['Experiment_idExperiment'] == self.idExperiment,
                                table.c['SolverConfig_idSolverConfig'].in_(solver_config_ids),
                                table.c['Instances_idInstance'].in_(instance_ids)),
                           from_obj=from_table.join(table_result_codes))

                Run = namedtuple('Run', ['idJob', 'status', 'result_code_description', 'resultCode', 'resultTime',
                                         'successful', 'penalized_time10', 'idSolverConfig', 'idInstance',
                                         'penalized_time1', 'censored'])

                for r in db.session.connection().execute(s):
                    if r.Instances_idInstance not in M: continue
                    if r.SolverConfig_idSolverConfig not in M[r.Instances_idInstance]: continue
                    if str(r.resultCode).startswith('1'): num_successful[r.Instances_idInstance][
                        r.SolverConfig_idSolverConfig] += 1
                    if r.status not in STATUS_PROCESSING: num_completed[r.Instances_idInstance][
                        r.SolverConfig_idSolverConfig] += 1
                    M[r.Instances_idInstance][r.SolverConfig_idSolverConfig].append(
                        Run(r.idJob, int(r.status), r[6], int(r.resultCode),
                            None if int(r.status) <= 0 else float(r.cost), str(r.resultCode).startswith('1'),
                            float(r.cost) if str(r.resultCode).startswith('1') else (inf if cost not in (
                            'resultTime', 'wallTime') else float(r.limit)) * 10,
                            r.SolverConfig_idSolverConfig, r.Instances_idInstance,
                            float(r.cost) if str(r.resultCode).startswith('1') else (
                            inf if cost not in ('resultTime', 'wallTime') else float(r.limit)),
                            not str(r.resultCode).startswith('1')))
                return M, num_successful, num_completed