예제 #1
0
def get_interval_info_for_location(a_date_range, a_woeid):
    # Query to obtain all locations in the 'locations' table
    # for which 'updated_at' is within the specified date range
    #     a_date_range = "2019-03-01"             ->   ">= 3/1/19"
    #     a_date_range = ":2019-06-01"            ->   "<= 6/30/19"
    #     a_date_range = "2019-03-01:2019-06-30"  ->   ">= 3/1/19 and  <= 6/30/19"
    #     a_date_range = "all"                    ->    all dates
    #     a_date_range = ":"                      ->    same as "all"
    #     a_date_range = ""                       ->    same as "all"

    
    # Parse the date range
    q_start_date, q_end_date = parse_date_range(a_date_range)
    
    # Return with an error if there was a problem parsing the date range
    if q_start_date == "ERROR" or q_end_date == "ERROR":
        loc_list = [{'ERROR': 'ERROR'}]
        return jsonify(loc_list)
    
    results = db.session.query(Location) \
                            .filter( and_( \
                                Location.woeid == a_woeid, \
                                func.date(Location.updated_at) >= q_start_date, \
                                func.date(Location.updated_at) <= q_end_date \
                            )).order_by(Location.woeid).all()

    loc_list = []
    for r in results:
        loc_info = {
            'updated_at': r.updated_at,
            'woeid': r.woeid,
            'latitude': r.latitude,
            'longitude': r.longitude,
            'name_full': r.name_full,
            'name_only': r.name_only,
            'name_woe': r.name_woe,
            'county_name': r.county_name,
            'county_name_only': r.county_name_only,
            'county_woeid': r.county_woeid,
            'state_name': r.state_name,
            'state_name_only': r.state_name_only,
            'state_woeid': r.state_woeid,
            'country_name': r.country_name,
            'country_name_only': r.country_name_only,
            'country_woeid': r.country_woeid,
            'place_type': r.place_type,
            'timezone': r.timezone,
            'twitter_type': r.twitter_type,
            'twitter_country': r.twitter_country,
            'tritter_country_code': r.tritter_country_code,
            'twitter_name': r.twitter_name,
            'twitter_parentid': r.twitter_parentid
        }

        loc_list.append(loc_info)

    return jsonify(loc_list)
예제 #2
0
    def get_new_user_stats(self, begin, end):
        """Get statstic of new user

        """
        from sqlalchemy.sql.expression import func

        user_count = func.count(tables.User.user_id).label('user_count')
        date = func.date(tables.User.created).label('date')

        query = self.session.query(date, user_count) \
            .group_by(date) \
            .filter(func.date(tables.User.created) >= begin) \
            .filter(func.date(tables.User.created) <= end)
        return query.all()
예제 #3
0
 def add_newly_updated_condition(self, query, cutoff_days, stub_ids):
     studies = self.studies()
     return query.where(
         or_(
             studies.c.updated_at >=
             func.date(func.current_date() - cutoff_days),
             studies.c.nct_id.in_(stub_ids)))
예제 #4
0
    def newly_added_trials(self, cutoff_days):
        studies = self.studies()
        conditions = self.conditions()
        keywords = self.keywords()
        terms = [
            'cancer', 'neoplasm', 'tumor', 'tumour', 'malignan', 'carcinoma',
            'metast'
        ]
        fields = [studies.c.official_title, conditions.c.name, keywords.c.name]
        term_clauses = [
            field.ilike('%' + term + '%') for field in fields for term in terms
        ]

        return select([distinct(studies.c.nct_id)]).select_from(
            studies.outerjoin(
                conditions, conditions.c.nct_id == studies.c.nct_id).outerjoin(
                    keywords, keywords.c.nct_id == studies.c.nct_id)).where(
                        and_(
                            studies.c.created_at >=
                            func.date(func.current_date() - cutoff_days),
                            studies.c.study_first_posted_date >= '2017-02-01',
                            studies.c.study_type == 'Interventional',
                            studies.c.overall_status.in_([
                                'Recruiting', 'Enrolling by invitation',
                                'Not yet recruiting', 'Available'
                            ]), or_(*term_clauses)))
예제 #5
0
async def daily_new_registries():
    databaseConnectionString = Database.create_psql_connection_string(
        username=os.environ["DB_USERNAME"],
        password=os.environ["DB_PASSWORD"],
        host=os.environ["DB_HOST"],
        port=os.environ["DB_PORT"],
        name=os.environ["DB_NAME"])
    database = Database(connectionString=databaseConnectionString)

    await database.connect()
    query = TokenTransfersTable.select()
    query = query.where(
        TokenTransfersTable.c.registryAddress.in_(
            TokenTransfersTable.select().with_only_columns([
                TokenTransfersTable.c.registryAddress
            ]).group_by(TokenTransfersTable.c.registryAddress).having(
                sqlalchemyfunc.count(TokenTransfersTable.c.registryAddress) ==
                1)))
    query = query.where(
        sqlalchemyfunc.date(TokenTransfersTable.c.blockDate) ==
        sqlalchemyfunc.current_date())
    rows = await database.fetch_all(query)
    for row in rows:
        logging.info(
            f'New Tokens: registry address {row[2]} and tokenId {row[5]}')

    await database.disconnect()
예제 #6
0
파일: admin.py 프로젝트: shmundada93/apmcs
 def index(self):
     transactions = Transaction.query.filter(\
         func.date(Transaction.date) == datetime.datetime.today().date()).all()
     print "................................................................."
     summary = []
     commodities = Commodity.query.all()
     for commodity in commodities:
         val = [(t.weight, t.price, t.weight * t.price) for t in transactions if \
                t.commodity_id == commodity.id]
         if val:
             weights, prices, totals = zip(*val)
         else:
             weights, prices, totals = [[0],[0],[0]]
         temp = {"name":commodity.name}
         temp["weight"] = sum(weights)
         temp["high"] = max(prices)
         temp["low"] = min(prices)
         if sum(weights)>0:
             temp["avg"] = round(sum(totals)/sum(weights),2)
             temp["ntrans"] = len(weights)
         else:
             temp["avg"] = 0
             temp["ntrans"] = 0
         temp["total"] = sum(totals)
         summary.append(temp)
     return self.render('admin/dashboard.html', summary = summary)
예제 #7
0
 def index(self):
     transactions = Transaction.query.filter(\
         func.date(Transaction.date) == datetime.datetime.today().date()).all()
     print "................................................................."
     summary = []
     commodities = Commodity.query.all()
     for commodity in commodities:
         val = [(t.weight, t.price, t.weight * t.price) for t in transactions if \
                t.commodity_id == commodity.id]
         if val:
             weights, prices, totals = zip(*val)
         else:
             weights, prices, totals = [[0], [0], [0]]
         temp = {"name": commodity.name}
         temp["weight"] = sum(weights)
         temp["high"] = max(prices)
         temp["low"] = min(prices)
         if sum(weights) > 0:
             temp["avg"] = round(sum(totals) / sum(weights), 2)
             temp["ntrans"] = len(weights)
         else:
             temp["avg"] = 0
             temp["ntrans"] = 0
         temp["total"] = sum(totals)
         summary.append(temp)
     return self.render('admin/dashboard.html', summary=summary)
예제 #8
0
def get_interval_trends_for_location(a_date_range, a_woeid):
    # Query to obtain all trends in the 'trends' table
    # for which 'updated_at' is within the specified date range
    #     a_date_range = "2019-03-01"             ->   ">= 3/1/19"
    #     a_date_range = ":2019-06-01"            ->   "<= 6/30/19"
    #     a_date_range = "2019-03-01:2019-06-30"  ->   ">= 3/1/19 and  <= 6/30/19"
    #     a_date_range = "all"                    ->    all dates
    #     a_date_range = ":"                      ->    same as "all"
    #     a_date_range = ""                       ->    same as "all"

    
    # Parse the date range
    q_start_date, q_end_date = parse_date_range(a_date_range)
    
    # Return with an error if there was a problem parsing the date range
    if q_start_date == "ERROR" or q_end_date == "ERROR":
        trend_list = [{'ERROR': 'ERROR'}]
        return jsonify(trend_list)
    
    # Query to pull all of the most recent Trends (50 per entry in 'locations' table)
    results = db.session.query(Trend) \
                            .filter( and_( \
                                Trend.woeid == a_woeid, \
                                func.date(Trend.updated_at) >= q_start_date, \
                                func.date(Trend.updated_at) <= q_end_date \
                            )).order_by( coalesce(Trend.twitter_tweet_volume, -9999).desc() ).all()

    
    trend_list = []
    for r in results:
        trend_info = {
            'updated_at': r.updated_at,
            'woeid': r.woeid,
            'twitter_as_of': r.twitter_as_of,
            'twitter_created_at': r.twitter_created_at,
            'twitter_name': r.twitter_name,
            'twitter_tweet_name': r.twitter_tweet_name,
            'twitter_tweet_promoted_content': r.twitter_tweet_promoted_content,
            'twitter_tweet_query': r.twitter_tweet_query,
            'twitter_tweet_url': r.twitter_tweet_url,
            'twitter_tweet_volume': r.twitter_tweet_volume
        }

        trend_list.append(trend_info)

    return jsonify(trend_list)
예제 #9
0
def send_mailings():
    with transaction.manager:  # so send() will actually send emails
        for mailing in Mailing.query.all():

            def send(mailing, user):
                mailing.send(user)
                logger.info(u'Sent mailing "{}" for user "{}" ({}).'.format(
                    mailing.name, user.email, user.id))

            if mailing.trigger == MailingTriggers.after_created.name:
                for user in User.query.filter(
                        func.date(User.created) ==
                        func.date(date.today() -
                                  timedelta(days=mailing.days))).all():
                    send(mailing, user)

            elif mailing.trigger == MailingTriggers.after_last_payment.name:
                for user in User.query.filter(
                        func.date(User.last_payment) ==
                        func.date(date.today() -
                                  timedelta(days=mailing.days))).all():
                    send(mailing, user)

            elif mailing.trigger == MailingTriggers.before_valid_to.name:
                for user in User.query.filter(
                        func.date(User.valid_to) ==
                        func.date(date.today() +
                                  timedelta(days=mailing.days))).all():
                    send(mailing, user)
예제 #10
0
def get_selecet(category):
    now = datetime.now()
    limits = [
        now - timedelta(days=1), now - timedelta(days=7),
        now - timedelta(days=30)
    ]
    res = []
    for limit in limits:

        res.append(session.query(Job).\
                   filter(Job.category == category).\
                   filter(Job.parse_date > func.date(limit))
        )
    return res
예제 #11
0
def get_search_terms_from_tweets(a_date_range=None):
# Get a list of the unique tweet search terms specified in
# the 'tweets' table.
# Ensure that all tweets in the list are unique by using a Python "set"
    
    # Parse the date range
    q_start_date, q_end_date = parse_date_range(a_date_range)

    # Return with an error if there was a problem parsing the date range
    if q_start_date == "ERROR" or q_end_date == "ERROR":
        search_term_list = [{'ERROR': 'ERROR'}]
        # return jsonify(search_term_list)
        return(search_term_list)
    
    # Query to get the search_terms (i.e., 'twitter_tweet_name')
    # from the 'tweets' table for the specified date range
    results = db.session.query(Tweet.tweet_search_term) \
                        .filter( and_( \
                            func.date(Tweet.updated_at) >= q_start_date, \
                            func.date(Tweet.updated_at) <= q_end_date )) \
                        .order_by( Tweet.tweet_search_term ).all()

    # Get the list of unique search terms using set()
    # Note: The results list is a list of tuples, with first tuple being the desired value
    search_term_set = set([ t[0] for t in results])

    # To support the hashtag/no hashtag Tweet Analysis,
    # add the complementary tweet to the table for each unique tweet
    search_term_alt_set = set([ f"{y[1:]}" if y[:1] == "#" else f"#{y}" for y in search_term_set ])

    # Combined the sets
    search_term_set.update(search_term_alt_set)
    
    # Return a list
    search_term_list = sorted(list(search_term_set))

    return(search_term_list)
예제 #12
0
def get_interval_locations_with_tweet(a_date_range, a_tweet):
    # Query to obtain all locations in the 'locations' table
    # REVISED FOR GeoTweet+: Needs to account for retention of locations over time
    #     a_date_range = "2019-03-01"             ->   ">= 3/1/19"
    #     a_date_range = ":2019-06-01"            ->   "<= 6/30/19"
    #     a_date_range = "2019-03-01:2019-06-30"  ->   ">= 3/1/19 and  <= 6/30/19"
    #     a_date_range = "all"                    ->    all dates
    #     a_date_range = ":"                      ->    same as "all"
    #     a_date_range = ""                       ->    same as "all"

    # Parse the date range
    q_start_date, q_end_date = parse_date_range(a_date_range)

    # Return with an error if there was a problem parsing the date range
    if q_start_date == "ERROR" or q_end_date == "ERROR":
        trend_list = [{'ERROR': 'ERROR'}]
        return jsonify(trend_list)

    # Query to pull all of the most recent Trends (50 per entry in 'locations' table)
    # In the order_by clause, use the coalesce() function to replace all NULL values
    # in the twitter_tweet_volume field with -9999 for the purpose of the sort in descending order
    results = db.session.query(Trend, Location).join(Location) \
                            .filter( and_( \
                                Trend.twitter_tweet_name == a_tweet, \
                                func.date(Trend.updated_at) >= q_start_date, \
                                func.date(Trend.updated_at) <= q_end_date \
                            )).order_by( coalesce(Trend.twitter_tweet_volume, -9999).desc() ).all()

    loc_list = []
    for r in results:
        #print(f"Trend Information for {r.Trend.woeid} {r.Location.name_full}: {r.Trend.twitter_tweet_name} {r.Trend.twitter_tweet_volume}")
        loc_info = {
            'loc_updated_at': r.Location.updated_at,
            'woeid': r.Location.woeid,
            'latitude': r.Location.latitude,
            'longitude': r.Location.longitude,
            'name_full': r.Location.name_full,
            'name_only': r.Location.name_only,
            'name_woe': r.Location.name_woe,
            'county_name': r.Location.county_name,
            'county_name_only': r.Location.county_name_only,
            'county_woeid': r.Location.county_woeid,
            'state_name': r.Location.state_name,
            'state_name_only': r.Location.state_name_only,
            'state_woeid': r.Location.state_woeid,
            'country_name': r.Location.country_name,
            'country_name_only': r.Location.country_name_only,
            'country_woeid': r.Location.country_woeid,
            'place_type': r.Location.place_type,
            'timezone': r.Location.timezone,
            'twitter_type': r.Location.twitter_type,
            'twitter_country': r.Location.twitter_country,
            'tritter_country_code': r.Location.tritter_country_code,
            'twitter_parentid': r.Location.twitter_parentid,
            'trend_updated_at': r.Trend.updated_at,
            'twitter_as_of': r.Trend.twitter_as_of,
            'twitter_created_at': r.Trend.twitter_created_at,
            'twitter_name': r.Trend.twitter_name,
            'twitter_tweet_name': r.Trend.twitter_tweet_name,
            'twitter_tweet_promoted_content':
            r.Trend.twitter_tweet_promoted_content,
            'twitter_tweet_query': r.Trend.twitter_tweet_query,
            'twitter_tweet_url': r.Trend.twitter_tweet_url,
            'twitter_tweet_volume': r.Trend.twitter_tweet_volume
        }

        loc_list.append(loc_info)

    return jsonify(loc_list)
예제 #13
0
    def make_event_services(self, event_id):
        def make_raw_service_group(action, service_id, at_code, at_name,
                                   service_name, price, at_context, ct_code):
            service = {
                'at_id': action.actionType_id,
                'service_id': service_id,
                'at_code': at_code,
                'ct_code': ct_code,
                'at_name': at_name,
                'service_name': service_name,
                'action': action,
                'price': price,
                'is_lab': False,
                'print_context': at_context
            }

            client = Client.query.get(action.event.client_id)
            client_age = client.age_tuple(datetime.date.today())

            at_id = service['at_id']
            at_data = ats_apts.get(at_id)
            if at_data and at_data[9]:
                prop_types = at_data[9]
                prop_types = [
                    prop_type[:2]
                    for prop_type in prop_types if recordAcceptableEx(
                        client.sexCode, client_age, prop_type[3], prop_type[2])
                ]
                if prop_types:
                    service['is_lab'] = True
                    service['assignable'] = prop_types
            return service

        def make_action_as_service(a, service):
            action = {
                'action_id':
                a.id,
                'account':
                a.account,
                'amount':
                a.amount,
                'beg_date':
                a.begDate,
                'end_date':
                a.endDate,
                'status':
                a.status,
                'coord_date':
                a.coordDate,
                'coord_person':
                person_vis.make_person(a.coordPerson)
                if a.coordPerson else None,
                'sum':
                service['price'] * a.amount,
            }
            if service['is_lab']:
                action['assigned'] = [
                    prop.type_id for prop in a.properties if prop.isAssigned
                ]
                action['planned_end_date'] = a.plannedEndDate
            return action

        def shrink_service_group(group):
            actions = [
                make_action_as_service(act_serv.pop('action'), act_serv)
                for act_serv in service_group
            ]
            total_amount = sum([act['amount'] for act in actions])
            total_sum = sum([act['sum'] for act in actions])

            def calc_all_assigned(actions):
                # [] - all have same assignments, False - have different assignments
                ref_asgn_list = actions[0]['assigned']
                return all(
                    map(lambda act: act['assigned'] == ref_asgn_list,
                        actions)) and ref_asgn_list

            def calc_all_ped(actions):
                # datetime.datetime - all have same planned end date, False - have different dates
                ref_action_ped = actions[0]['planned_end_date']
                return all(
                    map(lambda act: act['planned_end_date'] == ref_action_ped,
                        actions)) and ref_action_ped

            result_service = dict(group[0],
                                  actions=actions,
                                  total_amount=total_amount,
                                  sum=total_sum)
            if result_service['is_lab']:
                result_service['all_assigned'] = calc_all_assigned(actions)
                result_service['all_planned_end_date'] = calc_all_ped(actions)

            return result_service

        person_vis = PersonTreeVisualizer()
        query = db.session.query(
            Action, ActionType.service_id, ActionType.code, ActionType.name,
            rbService.name, ContractTariff.price,
            ActionType.context, ContractTariff.code).join(
                Event, EventType, Contract, ContractTariff, ActionType).join(
                    rbService, ActionType.service_id == rbService.id).filter(
                        Action.event_id == event_id,
                        ContractTariff.eventType_id == EventType.id,
                        ContractTariff.service_id == ActionType.service_id,
                        Action.deleted == 0, ContractTariff.deleted == 0,
                        between(func.date(Event.setDate),
                                ContractTariff.begDate,
                                ContractTariff.endDate))

        ats_apts = int_get_atl_dict_all()

        services_by_at = defaultdict(list)
        for a, service_id, at_code, at_name, service_name, price, at_context, ct_code in query:
            services_by_at[(a.actionType_id, service_id)].append(
                make_raw_service_group(a, service_id, at_code, at_name,
                                       service_name, price, at_context,
                                       ct_code))
        services_grouped = []
        for key, service_group in services_by_at.iteritems():
            services_grouped.append(shrink_service_group(service_group))

        return services_grouped