예제 #1
0
def get_data_by_time(session, start_time, end_time):
    #return utils.list_db_objects(session, Temperature, created_at={'ge': str(start_time), 'le': str(end_time)})
    return utils.list_db_objects_by_group(session, Temperature,
                                          select=[func.avg(Temperature.temperature).label("avg_temp"),
                                                  func.hour(Temperature.created_at).label("hour")],
                                          group_by=func.hour(Temperature.created_at),
                                          created_at={'ge': str(start_time), 'le': str(end_time)})
예제 #2
0
def get_data_by_time(session, start_time, end_time):
    #return utils.list_db_objects(session, Illuminance, created_at={'ge': str(start_time), 'le': str(end_time)})
    return utils.list_db_objects_by_group(session, Illuminance,
                                          select=[func.avg(Illuminance.illuminance).label("avg_illuminance"),
                                                  func.hour(Illuminance.created_at).label("hour")],
                                          group_by=func.hour(Illuminance.created_at),
                                          created_at={'ge': str(start_time), 'le': str(end_time)})
예제 #3
0
def get_hourly_single_exchange_volume_in_period_from_db_trades(
        tc_db, exchange_name, start_time, end_time):
    """
    Get the exchange volume for this exchange in this period from our saved version
    of the trade history.
    """

    # Watch this query for performance.
    results = tc_db.query(
            func.hour(EHTrade.timestamp),
            func.sum(EHTrade._volume),
        )\
        .filter(EHTrade.timestamp >= start_time)\
        .filter(EHTrade.timestamp < end_time)\
        .filter(EHTrade.exchange == exchange_name)\
        .group_by(func.hour(EHTrade.timestamp))\
        .all()

    formatted_results = []

    for row in results:
        hour = row[0]
        timestamp = Delorean(start_time, 'UTC').next_hour(hour).datetime
        volume = Money(row[1], 'BTC')

        formatted_results.append([
            timestamp,
            volume,
        ])

    formatted_results = sorted(formatted_results, key=lambda r: r[0])

    return formatted_results
예제 #4
0
파일: gas.py 프로젝트: 01org/SmartHome-Demo
def get_data_by_time(session, start_time, end_time):
    # return utils.list_db_objects(session, Gas, status=True, created_at={'ge': str(start_time), 'le': str(end_time)})
    return utils.list_db_objects_by_group(session, Gas,
                                          select=[func.count(Gas.status).label("cnt"),
                                                  func.hour(Gas.created_at).label("hour")],
                                          group_by=func.hour(Gas.created_at),
                                          status=True,
                                          created_at={'ge': str(start_time), 'le': str(end_time)})
예제 #5
0
def get_data_by_time(session, start_time, end_time, resource_list):
    #return utils.list_db_objects(session, Illuminance, created_at={'ge': str(start_time), 'le': str(end_time)})
    return utils.list_db_objects_by_group(session, Illuminance,
                                          select=[func.avg(Illuminance.illuminance).label("avg_illuminance"),
                                                  func.hour(Illuminance.created_at).label("hour")],
                                          group_by=func.hour(Illuminance.created_at),
                                          created_at={'ge': str(start_time), 'le': str(end_time)},
                                          resource_id={'in': resource_list})
예제 #6
0
def get_data_by_time(session, start_time, end_time, resource_list):
    # return utils.list_db_objects(session, Buzzer, created_at={'ge': str(start_time), 'le': str(end_time)})
    return utils.list_db_objects_by_group(session, Buzzer,
                                          select=[func.count(Buzzer.status),
                                                  func.hour(Buzzer.created_at)],
                                          group_by=func.hour(Buzzer.created_at),
                                          status=True,
                                          created_at={'ge': str(start_time), 'le': str(end_time)},
                                          resource_id={'in': resource_list})
예제 #7
0
def hourly_chart(station_id):
    """Return the hourly average bikes available of the station"""
    results = db.session \
        .query(func.avg(DublinBike.available_bike)) \
        .filter(DublinBike.number == station_id) \
        .group_by(func.hour(DublinBike.localtime)) \
        .order_by(func.hour(DublinBike.localtime)) \
        .all()

    return jsonify([{
        'hour': hour,
        'available_bike': float(results[hour][0])
    } for hour in range(24)])
예제 #8
0
def get_data_by_time(session, start_time, end_time):
    # return utils.list_db_objects(session, Buzzer, created_at={'ge': str(start_time), 'le': str(end_time)})
    return utils.list_db_objects_by_group(
        session,
        Buzzer,
        select=[func.count(Buzzer.status),
                func.hour(Buzzer.created_at)],
        group_by=func.hour(Buzzer.created_at),
        status=True,
        created_at={
            'ge': str(start_time),
            'le': str(end_time)
        })
예제 #9
0
def punchcard(start_date = None, end_date = None):
    """
    This gets the information needed to display a punch card style graph
    of drink usage.
    Arguments:
        start_date: the start date to filter results by
        end_date: the end date to filter resutls by
    Returns:
        A list of lists that contains a point for every hour or every weekday.
            Index 0: weekday
            Index 1: hour of the weekday
            Index 2: the amount of drops
            Index 3: the percentage of drops used to display the circle

        """
    new_results = []
    if start_date and end_date:
        results = DBSession.query(
                func.weekday(DropLog.time),
                func.hour(DropLog.time), func.count('*')
            ).filter(
                DropLog.time >= start_date,
                DropLog.time <= end_date,
                DropLog.username != 'openhouse'
            ).group_by(
                func.weekday(DropLog.time),
                func.hour(DropLog.time)
            ).all()

    else:
        results = DBSession.query(
                func.weekday(DropLog.time),
                func.hour(DropLog.time), func.count('*')
            ).filter(
                DropLog.username != 'openhouse'
            ).group_by(
                func.weekday(DropLog.time),
                func.hour(DropLog.time)
            ).all()

    biggest = 0
    for result in results:
        if result[2] > biggest:
            biggest = result[2]
    divisor = (1.0 * biggest) / 22

    for result in results:
        new_results.append([result[0], result[1], result[2], result[2] / divisor,
            format_date(result[0], result[1])])

    return new_results
예제 #10
0
def get_data_by_time(session, start_time, end_time):
    # return utils.list_db_objects(session, Gas, status=True, created_at={'ge': str(start_time), 'le': str(end_time)})
    return utils.list_db_objects_by_group(
        session,
        Gas,
        select=[
            func.count(Gas.status).label("cnt"),
            func.hour(Gas.created_at).label("hour")
        ],
        group_by=func.hour(Gas.created_at),
        status=True,
        created_at={
            'ge': str(start_time),
            'le': str(end_time)
        })
예제 #11
0
def get_time_stats(day=True, hour=False):
    if not day and not hour:
        # This is not a useful query, transform it
        day = True
    columns = []
    if day:
        columns.append(func.dayname(Highscore.datetime).label("day"))
    if hour:
        columns.append(func.hour(Highscore.datetime).label("hour"))
    columns.append(func.count().label("num_scores"))

    query = db.session.query(*columns) \
        .filter(Highscore.datetime) \

    if day:
        query = query.group_by("day").order_by("day")
    if hour:
        query = query.group_by("hour").order_by("hour")

    output = {}
    for row in query.all():
        if day and hour:
            d = output.get(row.day, {})
            d[row.hour] = row.num_scores
            output[row.day] = d
        elif day:
            output[row.day] = row.num_scores
        elif hour:
            output[row.hour] = row.num_scores

    return output
예제 #12
0
파일: volatility.py 프로젝트: zlex7/gryphon
def get_daily_core_fv_series_in_period(db, start_time, end_time):
    """
    Get the daily close-close price series in this period from our core fv datums.
    NOTE that we use close prices for this series, not open prices like for hourly.
    Getting this series for the period [Nov 1st, Nov 4th) will give you three
    datapoints.
    """

    fundamental_value_series = []

    # Getting the absolute last core_fv datum in a series of periods efficiently is
    # difficult, so this query gets the first datum in the last five minutes of
    # every day.
    fv_series = db.query(
            Datum.time_created,
            Datum.numeric_value,
        )\
        .filter(Datum.time_created >= start_time)\
        .filter(Datum.time_created < end_time)\
        .filter(func.hour(Datum.time_created) == 23)\
        .filter(func.minute(Datum.time_created) > 55)\
        .filter(Datum.datum_type.like('%_CORE_FUNDAMENTAL_VALUE'))\
        .group_by(func.date(Datum.time_created))\
        .all()

    # Do a teeny bit of sanity checking on the data
    delta = end_time - start_time
    assert (len(fv_series) == delta.days)

    fundamental_value_series = convert_datum_series_to_time_series(fv_series)

    return fundamental_value_series
예제 #13
0
파일: volatility.py 프로젝트: zlex7/gryphon
def get_hourly_core_fv_series_in_period(db, start_time, end_time):
    """
    Get the hourly open-open price series in this period from our core fv datums.
    Like most of our periods, this one is inclusive on the left but not the right. So
    getting this series for a full day [Nov 1st, Nov 2nd) will give you 24 datapoints,
    but getting this series for a day and one minute will likely give you 25.
    """

    fundamental_value_series = []

    fv_series = db.query(
            Datum.time_created,
            Datum.numeric_value,
        )\
        .filter(Datum.time_created >= start_time)\
        .filter(Datum.time_created < end_time)\
        .filter(Datum.datum_type.like('%_CORE_FUNDAMENTAL_VALUE'))\
        .group_by(func.date(Datum.time_created), func.hour(Datum.time_created))\
        .all()

    # Do a teeny bit of sanity checking. The fanciness here is because timedelta
    # doesn't have a .hours method.
    delta = end_time - start_time
    hours = 24 * delta.days + math.ceil(delta.seconds / 3600.0)
    assert (len(fv_series) == hours)

    fundamental_value_series = convert_datum_series_to_time_series(fv_series)

    return fundamental_value_series
예제 #14
0
파일: volatility.py 프로젝트: zlex7/gryphon
def get_ten_minute_core_fv_series_in_period(db, start_time, end_time):
    """
    Get the ten-minute open-price series in this period from our core fv datums.
    Like most of our periods, this one is inclusive on the left but not the right. So
    getting this series for a full day [Nov 1st, Nov 2nd) will give you 24*6 == 144
    datapoints, but getting this series for a day and one minute will likely give you
    25.
    """

    fundamental_value_series = []

    fv_series = db.query(
            Datum.time_created,
            Datum.numeric_value,
        )\
        .filter(Datum.time_created >= start_time)\
        .filter(Datum.time_created < end_time)\
        .filter(Datum.datum_type.like('%_CORE_FUNDAMENTAL_VALUE'))\
        .group_by(
            func.date(Datum.time_created),
            func.hour(Datum.time_created),
            func.floor((func.minute(Datum.time_created) / 10))*10
        )\
        .all()

    # Do a teeny bit of sanity checking.
    delta = end_time - start_time
    ten_minutes = math.floor((delta.days * 86400 + delta.seconds) / 600)
    assert (len(fv_series) == ten_minutes)

    fundamental_value_series = convert_datum_series_to_time_series(fv_series)

    return fundamental_value_series
예제 #15
0
    def get_bikes_for_weekday(cls, dbsession, weekday, station_id):
        """returns a list of bikes for a provided weekday and station.
        averaged per hour so 24 results."""
        station = [("Time", "Available Bikes", "Available Stands")]

        station_data = dbsession.query(func.hour(cls.last_update),
                                        func.avg(cls.available_bikes),
                                        func.avg(cls.available_bike_stands)) \
            .filter(cls.station_id == station_id,
                    func.weekday(cls.last_update) == weekday) \
            .group_by(func.hour(cls.last_update)) \
            .all()

        # this section parses the query return into a readable list.
        # from docs:extend() appends the contents of seq to list.
        if station_data:
            station.extend([(a, float(b), float(c))
                            for a, b, c in station_data])
        else:
            station.extend([(0, 0, 0)])
        return station
예제 #16
0
    def get_bikes_for_wetday(cls, dbsession, wetdate, station_id):
        """very similar to get_bikes_for_weekday but not the same: date specified is wetdate not weekday.
        returns a list of bikes for a provided datetime object (wetdate) and station."""
        # averaged per hour so 24 results.
        station = [("Time", "Available Bikes", "Available Stands")]
        station_data = dbsession.query(
            func.hour(cls.last_update),
            func.avg(cls.available_bikes),
            func.avg(cls.available_bike_stands))\
            .filter(cls.station_id == station_id,
                    func.date(cls.last_update) == wetdate.date())\
            .group_by(func.hour(cls.last_update)).all()

        # this section parses the query return into a readable list.
        # from docs:extend() appends the contents of seq to list.
        if station_data:
            station.extend([(a, float(b), float(c))
                            for a, b, c in station_data])
        else:
            station.extend([(0, 0, 0)])
        return station
예제 #17
0
def apply_get_stats(time_based='hour'):
    """
    提现申请统计
    :return:
    """
    # 按小时统计
    if time_based == 'hour':
        start_time, end_time = get_current_day_time_ends()
        hours = get_hours(False)
        hours_zerofill = get_hours()
        result = dict(zip(hours, [0] * len(hours)))
        rows = db.session \
            .query(func.hour(ApplyGet.create_time).label('hour'), func.sum(ApplyGet.money_apply)) \
            .filter(ApplyGet.create_time >= time_local_to_utc(start_time),
                    ApplyGet.create_time <= time_local_to_utc(end_time)) \
            .group_by('hour') \
            .limit(len(hours)) \
            .all()
        result.update(dict(rows))
        return [(hours_zerofill[i], result[hour])
                for i, hour in enumerate(hours)]
    # 按日期统计
    if time_based == 'date':
        start_time, end_time = get_current_month_time_ends()
        today = datetime.today()
        days = get_days(year=today.year, month=today.month, zerofill=False)
        days_zerofill = get_days(year=today.year, month=today.month)
        result = dict(zip(days, [0] * len(days)))
        rows = db.session \
            .query(func.day(ApplyGet.create_time).label('date'), func.sum(ApplyGet.money_apply)) \
            .filter(ApplyGet.create_time >= time_local_to_utc(start_time),
                    ApplyGet.create_time <= time_local_to_utc(end_time)) \
            .group_by('date') \
            .limit(len(days)) \
            .all()
        result.update(dict(rows))
        return [(days_zerofill[i], result[day]) for i, day in enumerate(days)]
    # 按月份统计
    if time_based == 'month':
        start_time, end_time = get_current_year_time_ends()
        months = get_months(False)
        months_zerofill = get_months()
        result = dict(zip(months, [0] * len(months)))
        rows = db.session \
            .query(func.month(ApplyGet.create_time).label('month'), func.sum(ApplyGet.money_apply)) \
            .filter(ApplyGet.create_time >= time_local_to_utc(start_time),
                    ApplyGet.create_time <= time_local_to_utc(end_time)) \
            .group_by('month') \
            .limit(len(months)) \
            .all()
        result.update(dict(rows))
        return [(months_zerofill[i], result[month])
                for i, month in enumerate(months)]
예제 #18
0
 def apply_timeseries(self, query, column=Revision.rev_timestamp):
     """
     Take a query and slice it up into equal time intervals
     
     Parameters
         query   : a sql alchemy query
         column  : defaults to Revision.rev_timestamp, specifies the timestamp
                   column to use for the timeseries
     
     Returns
         The query parameter passed in, with a grouping by the desired time slice
     """
     choice = self.timeseries.data
     
     if choice == TimeseriesChoices.NONE:
         return query
     
     query = query.add_column(func.year(column))
     query = query.group_by(func.year(column))
     
     if choice == TimeseriesChoices.YEAR:
         return query
     
     query = query.add_column(func.month(column))
     query = query.group_by(func.month(column))
     
     if choice == TimeseriesChoices.MONTH:
         return query
     
     query = query.add_column(func.day(column))
     query = query.group_by(func.day(column))
     
     if choice == TimeseriesChoices.DAY:
         return query
     
     query = query.add_column(func.hour(column))
     query = query.group_by(func.hour(column))
     
     if choice == TimeseriesChoices.HOUR:
         return query
 def apply_timeseries(self, query, rev=Revision):
     """
     Take a query and slice it up into equal time intervals
     
     Parameters
         query   : a sql alchemy query
         rev     : defaults to Revision, specifies the object that
                   contains the appropriate rev_timestamp
     
     Returns
         The query parameter passed in, with a grouping by the desired time slice
     """
     choice = self.timeseries.data
     
     if choice == TimeseriesChoices.NONE:
         return query
     
     query = query.add_column(func.year(rev.rev_timestamp))
     query = query.group_by(func.year(rev.rev_timestamp))
     
     if choice == TimeseriesChoices.YEAR:
         return query
     
     query = query.add_column(func.month(rev.rev_timestamp))
     query = query.group_by(func.month(rev.rev_timestamp))
     
     if choice == TimeseriesChoices.MONTH:
         return query
     
     query = query.add_column(func.day(rev.rev_timestamp))
     query = query.group_by(func.day(rev.rev_timestamp))
     
     if choice == TimeseriesChoices.DAY:
         return query
     
     query = query.add_column(func.hour(rev.rev_timestamp))
     query = query.group_by(func.hour(rev.rev_timestamp))
     
     if choice == TimeseriesChoices.HOUR:
         return query
    def group_query_by_interval(self, table):
        """ Return original query plus grouping for interval. """

        if 'year' in self.args['interval']:
            self.base_query = self.base_query.\
                              group_by(func.year(table.date))
            self.base_query = self.base_query.order_by(func.year(table.date))

        elif 'month' in self.args['interval']:
            self.base_query = self.base_query.\
                              group_by(func.year(table.date),
                                       func.month(table.date))
            self.base_query = self.base_query.order_by(func.year(table.date),
                                                       func.month(table.date))

        elif 'day' in self.args['interval']:
            self.base_query = self.base_query.\
                              group_by(func.year(table.date),
                                       func.month(table.date),
                                       func.day(table.date))
            self.base_query = self.base_query.order_by(func.year(table.date),
                                                       func.month(table.date),
                                                       func.day(table.date))

        elif 'hour' in self.args['interval']:
            self.base_query = self.base_query.\
                              group_by(func.year(table.date),
                                       func.month(table.date),
                                       func.day(table.date),
                                       func.hour(table.date))
            self.base_query = self.base_query.order_by(func.year(table.date),
                                                       func.month(table.date),
                                                       func.day(table.date),
                                                       func.hour(table.date))

        return self.base_query
예제 #21
0
파일: views.py 프로젝트: kironono/homepower
def api_watt_month_view(request):
    start = datetime.datetime.now() - datetime.timedelta(30)
    query = DBSession.query(
        func.sum(WattLog.value).label("value"),
        func.count(WattLog.value).label("count"),
    )
    query = query.filter(WattLog.created_at > start)
    query = query.group_by(
        func.year(WattLog.created_at),
        func.month(WattLog.created_at),
        func.day(WattLog.created_at),
        func.hour(WattLog.created_at),
    )
    ret = 0
    for d in query.all():
        avg = d.value / d.count
        ret += avg

    return {
        "watt": ret,
    }
예제 #22
0
def get_custom_channel_stats(db: Connection, channel_id: int, period_start: datetime, period_end: datetime,
                             average_interval: int = 60) -> List:
    """
    Get channel's stats for specified period.
    """
    query = select([ENTRIES.c.timestamp, func.avg(ENTRIES.c.value)]) \
        .select_from(ENTRIES) \
        .where(and_(ENTRIES.c.channel_id == channel_id,
                    between(ENTRIES.c.timestamp, period_start, period_end))) \
        .group_by(func.date(ENTRIES.c.timestamp),
                  func.hour(ENTRIES.c.timestamp),
                  func.floor(func.minute(ENTRIES.c.timestamp) / average_interval)) \
        .order_by(ENTRIES.c.timestamp.asc())

    result = db.execute(query)

    datetimes = datetimes_between(period_start, period_end, average_interval * 60)
    average_by_datetime = OrderedDict.fromkeys(datetimes)
    for time, value in result:
        dt = time.replace(minute=0, second=0)
        average_by_datetime[dt] = value

    return average_by_datetime.items()
def top_hours(username = None, machine_id = None, start_date = None, end_date = None):
    """
    Gets the number of drops for each hour of the day
    Arguments:
        username: The username to filter by for the results
        machine_id: The ID of the machine to get the drop count for
        start_date: the start date to filter results by
        end_date: the end date to filter results by
    """
    if machine_id: # top hours for a given machine
        if start_date and end_date and start_date <= end_date: # within a time frame
            pop_hours = DBSession.query(
                    func.hour(DropLog.time), func.count("*")
                    ).filter(DropLog.machine_id == machine_id,
                            DropLog.time >= start_date, DropLog.time <= end_date
                    ).group_by(func.hour(DropLog.time)
                    ).order_by(func.hour(DropLog.time)).all()
        else:
            pop_hours = DBSession.query(
                    func.hour(DropLog.time), func.count("*")
                    ).filter(DropLog.machine_id == machine_id
                    ).group_by(func.hour(DropLog.time)
                    ).order_by(func.hour(DropLog.time)).all()
    elif not username: # top hours overall
        if start_date and end_date and start_date <= end_date: # within a time frame
            pop_hours = DBSession.query(
                func.hour(DropLog.time), func.count("*")
                ).filter(DropLog.time >= start_date, DropLog.time <= end_date
                ).group_by(func.hour(DropLog.time)).order_by(
                        func.hour(DropLog.time)).all()
        else: # top hours all time for everyone
            pop_hours = DBSession.query(
                func.hour(DropLog.time), func.count("*")
                ).group_by(func.hour(DropLog.time)).order_by(
                        func.hour(DropLog.time)).all()
    else: # top hours for a user
        if start_date and end_date and start_date <= end_date: # within a time frame
            pop_hours = DBSession.query(
                    func.hour(DropLog.time), func.count("*")
                    ).filter(DropLog.username == username,
                            DropLog.time >= start_date, DropLog.time <= end_date
                    ).group_by(func.hour(DropLog.time)
                    ).order_by(func.hour(DropLog.time)).all()

        else:
            pop_hours = DBSession.query(
                    func.hour(DropLog.time), func.count("*")
                    ).filter(DropLog.username == username
                    ).group_by(func.hour(DropLog.time)
                    ).order_by(func.hour(DropLog.time)).all()

    data = []
    current_hour = 0
    for hour in pop_hours:
        while hour[0] != current_hour:
            data.append(0)
            current_hour += 1
        data.append(hour[1])
        current_hour += 1
    data += [0] * (24 - len(data))
    return data
 def print_body_for_user(self, authenticated_user):
     stats = []
     for stats_for_hour in DB_Session_Factory.get_db_session().query(func.dayofweek(Interview.end_time), func.hour(Interview.end_time), func.avg(Interview.technical_score), func.avg(Interview.cultural_score), Interviewer, func.count(1)).group_by(func.dayofweek(Interview.end_time), func.hour(Interview.end_time), Interview.interviewer_email).join(Interviewer, Interview.interviewer_email == Interviewer.email).filter(Interview.start_time > self.earliest_ts, Interview.end_time < self.latest_ts, or_(Interview.technical_score != None, Interview.cultural_score != None), or_(Interview.technical_score != -1, Interview.cultural_score != -1)):
         stats.append({
             'Day' : self.days[stats_for_hour[0]],
             'Hour' : stats_for_hour[1],
             'Avg_Technical_Score' : stats_for_hour[2],
             'Avg_Cultural_Score' : stats_for_hour[3],
             'Interviewer' : stats_for_hour[4].dict_representation(),
             'Sample_Size' : stats_for_hour[5],
         })
     print json.dumps(stats)
예제 #25
0
    def run(self):

        q = self.session.query()

        # Add day of week
        q = q.add_column(func.dayofweek(KillMail.timestamp).label('dow'))

        # Add an hour for each column.
        for h in range(0, 24):
            q = q.add_column(func.sum(case([(func.hour(KillMail.timestamp) == h, 1)], else_=0)).label("%02i" % (h)))

        # Build a subquery to get killmail ids for attackers or victims in corporations and alliances
        sqk = None
        sql = None
        if (self.kills):
            sqk = self.session.query()
            sqk = sqk.add_column(Attacker.killmail_id)
            sqk = sqk.join(EveType, EveType.id == Attacker.ship_id)
            if (self.alliance_ids and self.corporation_ids):
                sqk = sqk.filter(
                    _or(Attacker.corporation_id.in_(self.corporation_ids), Attacker.alliance_id.in_(self.alliance_ids)))
            elif (self.alliance_ids):
                sqk = sqk.filter(Attacker.alliance_id.in_(self.alliance_ids))
            elif (self.corporation_ids):
                sqk = sqk.filter(Attacker.corporation_id.in_(self.corporation_ids))
            if (self.groupids != None and not self.groupidsinvert):
                sqk = sqk.join(EveGroup, and_(EveGroup.id == EveType.group_id, EveType.id == Attacker.ship_id),
                               isouter=True). \
                    filter(EveGroup.id.in_(self.groupids))
            elif (self.groupids != None and self.groupidsinvert):
                sqk = sqk.join(EveGroup, and_(EveGroup.id == EveType.group_id, EveType.id == Attacker.ship_id),
                               isouter=True). \
                    filter(not_(EveGroup.id.in_(self.groupids)))

        if (self.losses):
            sql = self.session.query()
            km2 = aliased(KillMail)
            sql = sql.add_column(km2.id)
            sql = sql.join(EveType, EveType.id == km2.ship_id)
            if (self.alliance_ids and self.corporation_ids):
                sql = sql.filter(
                    _or(km2.corporation_id.in_(self.corporation_ids), km2.alliance_id.in_(self.alliance_ids)))
            elif (self.alliance_ids):
                sql = sql.filter(km2.alliance_id.in_(self.alliance_ids))
            elif (self.corporation_ids):
                sql = sql.filter(km2.corporation_id.in_(self.corporation_ids))
            if (self.groupids != None and not self.groupidsinvert):
                sql = sql.join(EveGroup, and_(EveGroup.id == EveType.group_id, EveType.id == km2.ship_id),
                               isouter=True). \
                    filter(EveGroup.id.in_(self.groupids))
            elif (self.groupids != None and self.groupidsinvert):
                sql = sql.join(EveGroup, and_(EveGroup.id == EveType.group_id, EveType.id == km2.ship_id),
                               isouter=True). \
                    filter(not_(EveGroup.id.in_(self.groupids)))

        if (self.kills and self.losses):
            q = q.filter(or_(KillMail.id.in_(sql.subquery()), KillMail.id.in_(sqk.subquery())))
        elif (self.kills):
            q = q.filter(KillMail.id.in_(sqk.subquery()))
        elif (self.losses):
            q = q.filter(KillMail.id.in_(sql.subquery()))
        else:
            raise Exception("Please select kills, losses, or both")

        # Search for a specific target type
        if (self.againstgroup_ids and self.kills and not self.losses):
            et2 = aliased(EveType)
            eg2 = aliased(EveGroup)
            q = q.join(et2, et2.id == KillMail.ship_id). \
                join(eg2, eg2.id == et2.group_id). \
                filter(eg2.id.in_(self.againstgroup_ids))

        if (self.after):
            q = q.filter(KillMail.timestamp >= self.after)
        if (self.before):
            q = q.filter(KillMail.timestamp <= self.before)

        q = q.group_by('dow'). \
            order_by(desc('dow'))

        self.columns = q.column_descriptions
        self.results = q.all()

        return
예제 #26
0
    def run(self):

        q = self.session.query(Attacker)


        # Build a subquery to get killmail ids for attackers or victims in corporations and alliances
        sqk = None

        sqk = self.session.query()
        sqk = sqk.add_column(Attacker.id)
        if (self.alliance_ids and self.corporation_ids):
            sqk = sqk.filter(
                _or(Attacker.corporation_id.in_(self.corporation_ids), Attacker.alliance_id.in_(self.alliance_ids)))
        elif (self.alliance_ids):
            sqk = sqk.filter(Attacker.alliance_id.in_(self.alliance_ids))
        elif (self.corporation_ids):
            sqk = sqk.filter(Attacker.corporation_id.in_(self.corporation_ids))
        if (self.groupids != None and not self.groupidsinvert):
            sqk = sqk.join(EveGroup, and_(EveType.id == Attacker.ship_id, EveGroup.id == EveType.group_id),
                           isouter=True). \
                filter(EveGroup.id.in_(self.groupids))
        elif (self.groupids != None and self.groupidsinvert):
            sqk = sqk.join(EveGroup, and_(EveType.id == Attacker.ship_id, EveGroup.id == EveType.group_id),
                           isouter=True). \
                filter(EveGroup.id.in_(self.groupids))

        # Filter by attackers and left outer join killmails
        q = q.filter(Attacker.id.in_(sqk.subquery())). \
            join(KillMail, KillMail.id == Attacker.killmail_id, isouter=True)

        # Add day of week
        q = q.add_column(func.dayofweek(KillMail.timestamp).label('dow')).group_by('dow')

        # Add an hour for each column.
        for h in range(0, 24):
            q = q.add_column((func.sum(case([(func.hour(KillMail.timestamp) == h, 1)], else_=0))).label("%02i" % (h)))

        if (self.after):
            q = q.filter(KillMail.timestamp >= self.after)
        if (self.before):
            q = q.filter(KillMail.timestamp <= self.before)

        q = q.order_by(desc('dow'))

        self.columns = q.column_descriptions
        self.results = q.all()

        attackers_per_hour = self._arr_result()


        # We need to get the number of kills per hour accross 7 days
        ar = ActivityReport(self.session)

        ar.alliance_ids = self.alliance_ids
        ar.corporation_ids = self.corporation_ids
        ar.groupids = self.groupids
        ar.groupidsinvert = self.groupidsinvert
        ar.after = self.after
        ar.before = self.before
        ar.kills = True
        ar.losses = False
        ar.run()

        kills_per_hour = ar.arr_result()

        final_result = []

        # Create the final array and cross divide between the two arrays
        r = 0
        while (r < len(attackers_per_hour)):
            final_result.append([])
            c = 0
            while (c < len(attackers_per_hour[r])):
                # We can't divide on the header
                if (r == 0 or c == 0):
                    final_result[r].append(attackers_per_hour[r][c])
                else:
                    try:
                        final_result[r].append(attackers_per_hour[r][c] / kills_per_hour[r][c])
                    except (ZeroDivisionError, InvalidOperation) as e:
                        final_result[r].append(0)
                c += 1
            r += 1

        self.final_result = final_result