Exemplo n.º 1
0
			def get_count(coll,event):
				count1=0
				count2=0
				print 'num_days_1', num_days_1
				for i in xrange(num_days_1):
					print i
					y1 = main_y1 + datetime.timedelta(days=i)
					t1 = main_y1 + datetime.timedelta(days=i+1)
					print "dates are:"
					print y1,t1
					ts_y1 =  int(time.mktime(y1.timetuple())*1000)
					ts_t1 = int(time.mktime(t1.timetuple())*1000)
					yutcd1 = datetime.datetime.utcfromtimestamp(ts_y1/1000)
					tutcd1 = datetime.datetime.utcfromtimestamp(ts_t1/1000)
					yutcoid1 = ObjectId.from_datetime(yutcd1)
					tutcoid1 = ObjectId.from_datetime(tutcd1)
					print yutcoid1, tutcoid1
					count1 += len(coll.find({'_id': {'$gte': yutcoid1, '$lte': tutcoid1}, 'service': event['service'], 'device':event['device'], 'action':event['action']}).distinct('uid'))
				print 'num_days_2', num_days_2
				for i in xrange(num_days_2):
					print i
					y2 = main_y2 + datetime.timedelta(days=i)
					t2 = main_y2 + datetime.timedelta(days=i+1)
					print "dates are:"
					print y2,t2
					ts_y2 =  int(time.mktime(y2.timetuple())*1000)
					ts_t2 = int(time.mktime(t2.timetuple())*1000)
					yutcd2 = datetime.datetime.utcfromtimestamp(ts_y2/1000)
					tutcd2 = datetime.datetime.utcfromtimestamp(ts_t2/1000)
					yutcoid2 = ObjectId.from_datetime(yutcd2)
					tutcoid2 = ObjectId.from_datetime(tutcd2)
					print yutcoid2, tutcoid2
					count2 += len(coll.find({'_id': {'$gte': yutcoid2, '$lte': tutcoid2}, 'service': event['service'], 'device':event['device'], 'action':event['action']}).distinct('uid'))

				return (count1,count2)
Exemplo n.º 2
0
def datetime_to_objectId(input="", year=0, month=1, day=1, hour=0):
    '''
    Input  -- input <datetime.datetime> <basestring> 
              year <int>
              month <int>
              day <int>
              hour <int>
    
    Output --  <ObjectId> or None or <basestring> if bson.objectid module does not exist
    
    
    reference: http://api.mongodb.org/python/1.5.2/api/pymongo/objectid.html
    
    '''
    response_objectid = None #default to None to enable 'if' check
    if "bson" not in IMPORTFAILS:
        if input:
            if isinstance(input, basestring): #need to parse
                response_objectid =  ObjectId.from_datetime(_parse_input(input))
            else: #should only be a string or datetime, all else will fail here
                response_objectid = ObjectId.from_datetime(input)
        elif year:
            response_objectid = ObjectId.from_datetime(datetime.datetime(int(year), int(month), int(day), int(hour)))
    else:
        pass
        #TODO: build object ID without objectID library
    return response_objectid # should be of ObjectID or None
Exemplo n.º 3
0
 def get(self, itemId):
     ffrom, to = self.get_argument('from', None), self.get_argument('to', None)
     filt = {'item_id': ObjectId(itemId), '_id': {
         '$gt': ObjectId.from_datetime(parse(ffrom) if ffrom else (self.now - datetime.timedelta(1))),
         '$lte': ObjectId.from_datetime(parse(to) if to else self.now)
     }}
     self.write(dumps(alfred.db.values.find(filt)))
Exemplo n.º 4
0
def get_forecast(host, port, source, db_name, username, password,
                 collection_name, date):
    # forecasts are always pushed after time that whole suite starts running
    # so implementation below is correct, and will result in us
    # being able to capture forecast and archive data used by operator in past
    # when in debug mode in present

    date_lb = date - pd.Timedelta(refresh_rate)
    date_ub = date + pd.Timedelta(refresh_rate)
    with pymongo.MongoClient(host=host, port=port) as conn:
        conn[db_name].authenticate(username, password, source=source)
        collection = conn[db_name][collection_name]

        for data in collection.find({
            "_id":
                {
                    "$gte": ObjectId.from_datetime(
                        date_lb),
                    "$lte": ObjectId.from_datetime(
                        date_ub)
                }
        }).sort(
            "_id", pymongo.ASCENDING):
            reading = data['readings']
            wfore = pd.DataFrame(reading)
            if len(wfore) == 0:
                raise ValueError("An appropriate forecast for the passed"
                                 "date does not exist in the database. Please"
                                 "pass another date, and check to make sure"
                                 "the database is functioning correctly.")
            else:
                wfore.set_index('time', inplace=True)
                wfore = wfore.sort_index()
                wfore = wfore.tz_localize('UTC')
                return wfore
Exemplo n.º 5
0
    def get_vitals(self, collection, dt_now, td_lookback):
        # TODO: Currently just going [6] hours back
        lstintPatientUID = [
            int(x) for x in list(self.df_i_cohort['UID'].unique())
        ]
        dt_end = dt_now
        dt_start = dt_end - td_lookback
        obj_start = ObjectId.from_datetime(dt_start)
        obj_end = ObjectId.from_datetime(dt_end)

        # TODO: Create aggregate group so not overwhelmed
        mg_query = {
            '_id': {
                '$gt': obj_start,
                '$lt': obj_end
            },
            'patient_id': {
                '$in': lstintPatientUID
            }
        }
        sortOrder = (('_id', -1), ('recorded_on', -1))
        temp = collection.find(mg_query).sort(sortOrder)
        df_i_vitals = pd.DataFrame.from_dict(list(temp))

        return df_i_vitals
Exemplo n.º 6
0
    def get_orders(self, collection, dt_now):
        df_i_cohort = self.df_i_cohort

        df_i_orders = pd.DataFrame()
        print("size: " + str(df_i_cohort.shape))
        for index, row in df_i_cohort.iterrows():
            if index % 50 == 0:
                print(str(datetime.datetime.now()) + '\t' + str(index))
            dt_end = dt_now
            obj_end = ObjectId.from_datetime(dt_end)
            dt_start = row['dt_visitStart_UTC']
            obj_start = ObjectId.from_datetime(dt_start)

            mg_query = {'UID': int(row['UID']),
                        '_id': {'$lt': obj_end, '$gt': obj_start},
                        # 'recorded_on': {'$gt': nTimeStart},
                        }
            sortOrder = (('_id', -1), ('OrderDate', -1))

            df_temp_ind = pd.DataFrame(list(collection.find(mg_query)))

            if df_temp_ind.shape[0] == 0:
                df_temp_ind = pd.DataFrame(columns=['CSN'], data=[row['CSN']])
            else:
                df_temp_ind['CSN'] = row['CSN']
            #             df_rows = pd.concat([pd.DataFrame(row).T[['CSN']], df_temp_ind], axis='columns')

            df_i_orders = pd.concat([df_i_orders, df_temp_ind], axis='rows', sort=False)

        df_i_orders = df_i_orders.reset_index(drop=True)

        return df_i_orders
Exemplo n.º 7
0
def count_task_state(begin_time, end_time=None):
    query_body = dict()
    query_body['_id'] = dict()
    query_body['_id']['$gte'] = ObjectId.from_datetime(begin_time)
    if end_time:
        query_body['_id']['$lte'] = ObjectId.from_datetime(end_time)

    _data = dict()
    _data['task_failure'] = 0
    _data['task_success'] = 0
    _data['task_total'] = 0
    for item in mongo.db.task.aggregate([{
            '$group': {
                '_id': '$state',
                'count': {
                    '$sum': 1
                }
            }
    }]):
        if item['_id'] == 'FAILURE':
            _data['task_failure'] = item['count']
        if item['_id'] == 'SUCCESS':
            _data['task_success'] = item['count']

    _data['task_total'] = _data.get('task_failure', 0) + _data.get(
        'task_success', 0)

    return _data
Exemplo n.º 8
0
 def query_by_user_id_ndays(self,
                            db,
                            collection,
                            find,
                            serial_no,
                            user_id="",
                            days=30):
     collect_lag = collection
     data = self.db_skynet['skynet_user_info']
     if serial_no != "":
         factor = data.find({"serial_no": serial_no}).sort("_id", -1)[0]
     else:
         factor = data.find({"user_id": user_id}).sort("_id", -1)[0]
     date = factor['event_time']  #+ datetime.timedelta(hours=8)
     now_id = ObjectId.from_datetime(date)
     date = date - datetime.timedelta(days=days)
     pre30day = str(date)[0:10]
     pre30day = datetime.datetime.strptime(pre30day, "%Y-%m-%d")
     # print pre30day
     pre30_id = ObjectId.from_datetime(pre30day)
     collection = eval("self.db_{0}['{1}']".format(db, collection))
     ret = []
     find["_id"] = {"$gt": pre30_id, "$lt": now_id}
     for data in collection.find(find).sort("_id", -1):
         ret.append(data)
     ret = self.public_get_gzip_data(ret, collect_lag)
     return ret
Exemplo n.º 9
0
 def query_sms_list_beforeEvenTime_inXdays(self, db, collection, find,
                                           serial_no, days):
     collect_lag = collection
     collection = eval("self.db_{0}['{1}']".format(db, collection))
     info = self.db_skynet['skynet_user_info'].find({
         "serial_no": serial_no
     }).sort("_id", -1)[0]
     event_time = info['event_time']
     now_id = ObjectId.from_datetime(event_time)
     find["_id"] = {"$lt": now_id}
     start_date = event_time - datetime.timedelta(days=days)
     start_id = ObjectId.from_datetime(start_date)
     ret = []
     new_ret = []
     find["_id"] = {"$gt": start_id, "$lt": now_id}
     for data in collection.find(find).sort("_id", -1):
         ret.append(data)
     ret = self.public_get_gzip_data(ret, collect_lag)
     if not ret:
         return 'No DATA'
     for row in ret:
         for sub_row in row.get('mobileSms'):
             if sub_row not in new_ret:
                 new_ret.append(sub_row)
     return new_ret
Exemplo n.º 10
0
 def query_phone_max5_beforeEvenTime_inXdays(self, db, collection, find,
                                             serial_no, days, validTime):
     collect_lag = collection
     collection = eval("self.db_{0}['{1}']".format(db, collection))
     info = self.db_skynet['skynet_user_info'].find({
         "serial_no": serial_no
     }).sort("_id", -1)[0]
     event_time = info['event_time']
     now_id = ObjectId.from_datetime(event_time)
     find["_id"] = {"$lt": now_id}
     start_date = event_time - datetime.timedelta(days=days)
     start_id = ObjectId.from_datetime(start_date)
     ret = []
     find["_id"] = {"$gt": start_id, "$lt": now_id}
     for data in collection.find(find).sort("_id", -1):
         ret.append(data)
     ret = self.public_get_gzip_data(ret, collect_lag)
     phoneDic = {}
     if not ret:
         return 'No DATA'
     for everyId in ret:
         for line in everyId['actions']:
             phone = phoneClear(line["callNumber"])
             phone = phone_clean(phone)
             if phone != None and line.get("callTime") > validTime:
                 if phone not in phoneDic:
                     phoneDic[phone] = [line["callTime"]]
                 else:
                     if line["callTime"] not in phoneDic[phone]:
                         phoneDic[phone].append(line["callTime"])
     temp = []
     for k, v in phoneDic.items():
         temp.append([len(set(v)), max(v), k])
     temp.sort(reverse=True)
     return [str(phone[2]) for phone in temp[:5]]
Exemplo n.º 11
0
def transaction_counts():
    global interval_in_milliseconds
    global db
    current_timestamp = datetime.datetime.now()
    time_delta = timedelta(milliseconds=interval_in_milliseconds)
    current_timestamp_objectid = ObjectId.from_datetime(current_timestamp)
    previous_timestamp_objectid = ObjectId.from_datetime(
        current_timestamp - time_delta)
    # transaction_counts = db.abot_transaction_counters.aggregate([{'$match': {'_id': {'$lt': current_timestamp_objectid, '$gt': previous_timestamp_objectid}}}, {
                                                                # '$group': {'_id': 'null', 'average': {'$avg': '$count'}, 'total': {'$sum': '$count'}}}])
    s1ap_transaction_counts = db.abot_transaction_counters.aggregate([{'$match': {'_id': {'$lt': current_timestamp_objectid, '$gt': previous_timestamp_objectid}, 'counter': {
                                                                      '$regex': 'S1AP'}}}, {'$group': {'_id': 'null', 'average': {'$avg': '$count'}, 'total': {'$sum': '$count'}}}])
    gtpv1u_transaction_counts = db.abot_transaction_counters.aggregate([{'$match': {'_id': {'$lt': current_timestamp_objectid, '$gt': previous_timestamp_objectid}, 'counter': {
                                                                      '$regex': 'GTPV1U'}}}, {'$group': {'_id': 'null', 'average': {'$avg': '$count'}, 'total': {'$sum': '$count'}}}])
    gtpv2c_transaction_counts = db.abot_transaction_counters.aggregate([{'$match': {'_id': {'$lt': current_timestamp_objectid, '$gt': previous_timestamp_objectid}, 'counter': {
                                                                      '$regex': 'GTPV2C'}}}, {'$group': {'_id': 'null', 'average': {'$avg': '$count'}, 'total': {'$sum': '$count'}}}])
    s1ap_tc = loads(dumps(s1ap_transaction_counts))
    gtpv1u_tc = loads(dumps(gtpv1u_transaction_counts))
    gtpv2c_tc = loads(dumps(gtpv2c_transaction_counts))
    response = {}
        
    response['s1ap'] = 0
    response['gtpv1u'] = 0
    response['gtpv2c'] = 0

    if len(s1ap_tc) > 0:
        response['s1ap'] = dumps(s1ap_tc[0]['average'])

    if len(gtpv1u_tc) > 0:
        response['gtpv1u'] = dumps(gtpv1u_tc[0]['average'])

    if len(gtpv2c_tc) > 0:
        response['gtpv2c'] = dumps(gtpv2c_tc[0]['average'])
    return dumps(response)
Exemplo n.º 12
0
def worker(args):
    macsy_settings, liwc_dict, start_date, end_date = args

    root = logging.getLogger()
    root.setLevel(logging.DEBUG)

    bbapi, db = load(macsy_settings)

    with io.open(liwc_dict, 'r', encoding="utf-8") as liwc_file:
        liwc = LIWC(liwc_file)

    filter = {
        "_id": {
            "$gte": ObjectId.from_datetime(start_date),
            "$lt": ObjectId.from_datetime(end_date)
        }
    }

    p = pipeline(liwc, bbapi, db, filter)

    while True:
        _id, total_tweets = p.send(None)
        logging.debug("{}: {} tweets".format(_id['x'].isoformat(),
                                             total_tweets))

    return None
Exemplo n.º 13
0
def copy(source_params, dest_params, start_date, end_date):
    source_db = connect_mongodb(source_params)
    source_tables = get_table_names(source_params)

    dest_db = connect_mongodb(dest_params)
    dest_tables = get_table_names(dest_params)

    # records = []
    # for record in source_db[source_tables['topics_table']].find():
    #     records.append(record)
    # print("total records {}".format(len(records)))
    # dest_db[dest_tables['topics_table']].insert_many(
    #     records)
    #
    # records = []
    # for record in source_db[source_tables['meta_table']].find():
    #     records.append(record)
    # print("total records {}".format(len(records)))
    # dest_db[dest_tables['meta_table']].insert_many(
    #     records)

    # This is probably the most inefficient way of doing a copying a subset
    # of a
    # collection to another database but this is the one that requires the
    # minimum access
    # Wish this feature request is closed soon
    # https://jira.mongodb.org/browse/SERVER-13201
    # Aggregation is the fastest way to get a subset of data from a collection,
    # next would be map reduce. map reduce can write output to another db
    # but it would only generate doc of schema
    # id:<object id>, value:<search/mapreduce result>

    dest_db[dest_tables['data_table']].create_index(
        [('topic_id', pymongo.DESCENDING),
         ('ts', pymongo.DESCENDING)],
        unique=True, background=False)
    records = []
    i = 0
    print ("start obj:{}".format(ObjectId.from_datetime(start_date)))
    print ("end obj:{}".format(ObjectId.from_datetime(end_date)))
    cursor = source_db[source_tables['data_table']].find(
        {'$and':
            [{'_id': {'$gte': ObjectId.from_datetime(start_date)}},
             {'_id': {'$lte': ObjectId.from_datetime(end_date)}}]})
    print ("Record count from cursor {}".format(cursor.count()))
    for record in cursor:
        i += 1
        records.append(
                    ReplaceOne(
                        {'ts':record['ts'], 'topic_id':record['topic_id']},
                        {'ts': record['ts'], 'topic_id': record['topic_id'],
                                'value': record['value']},
                        upsert=True))
        if i == 2000:
            print("total records {}".format(len(records)))
            dest_db[dest_tables['data_table']].bulk_write(records)
            i =0
            records = []
Exemplo n.º 14
0
def idslice(col, start_seconds, end_seconds=0):
    start_delta = timedelta(seconds=start_seconds)

    start_objid = ObjectId.from_datetime(utc_now() - start_delta)
    end_delta = timedelta(seconds=end_seconds)
    end_objid = ObjectId.from_datetime(utc_now() - end_delta)
    for obj in col.find(
        {'_id': {'$gte': start_objid, '$lt': end_objid}}).sort('_id'):
        yield obj
def get_date_query(start_date, end_date):
    if start_date is not None and end_date is not None:
        oid_start = ObjectId.from_datetime(start_date)
        oid_stop = ObjectId.from_datetime(end_date)

        return {"_id": {"$gte": oid_start, "$lt": oid_stop}}
    else:

        return None
Exemplo n.º 16
0
def getDateQuery(start_date,end_date): 

    if not start_date is None and not end_date is None:
        oid_start = ObjectId.from_datetime(start_date)
        oid_stop = ObjectId.from_datetime(end_date)
        
        return { "_id": { "$gte": oid_start, "$lt": oid_stop } }
    else:

        return None
    def test_from_datetime(self):
        d = datetime.datetime.utcnow()
        d = d - datetime.timedelta(microseconds=d.microsecond)
        oid = ObjectId.from_datetime(d)
        self.assertEqual(d, oid.generation_time.replace(tzinfo=None))
        self.assertEqual("0" * 16, str(oid)[8:])

        aware = datetime.datetime(1993, 4, 4, 2, tzinfo=FixedOffset(555, "SomeZone"))
        as_utc = (aware - aware.utcoffset()).replace(tzinfo=utc)
        oid = ObjectId.from_datetime(aware)
        self.assertEqual(as_utc, oid.generation_time)
def objectid (day):
    start_date = str(day)
    date_1 = datetime.datetime.strptime(start_date,"%Y-%m-%d %H:%M:%S")
    end_date = date_1 + datetime.timedelta(hours=1)

    s=datetime.datetime(date_1.year,date_1.month,date_1.day,date_1.hour)
    d=datetime.datetime(end_date.year,end_date.month,end_date.day,end_date.hour)

    objmin =ObjectId.from_datetime(s)
    objmax =ObjectId.from_datetime(d)
    #print str(date_1),str(end_date),objmin,objmax
    return objmin,objmax,end_date
def worker(macsy_settings, liwc_dict, start_date, end_date):
    root = logging.getLogger()
    root.setLevel(logging.DEBUG)

    bbapi, db = load(macsy_settings)

    with io.open(liwc_dict, 'r', encoding="utf-8") as liwc_file:
        liwc = LIWC(liwc_file)

    locs_col = bbapi.load_blackboard(
        "LOCATION_PIPE_B").document_manager.get_collection()

    locs = [loc["_id"] for loc in locs_col.find({}, {"_id": 1})]

    _id_filter = {
        "$gte": ObjectId.from_datetime(start_date),
        "$lt": ObjectId.from_datetime(end_date)
    }

    filter = {"_id": _id_filter, "L": {"$in": locs}}

    p = pipeline(liwc, bbapi, db, filter)

    labels = [v for v, _ in liwc.categories.values()] + ["wc", "wc_dic"]

    tweets_col = bbapi.load_blackboard(
        "TWEET_PIPE_B").document_manager.get_collection()

    # pls symlink
    locations = {}
    for l in locs:
        count = tweets_col.find({"_id": _id_filter, "L": l}).count()
        locations[l] = {
            "fp":
            np.lib.format.open_memmap("brexit/loc_{}_tweets.npy".format(
                str(l)),
                                      mode="w+",
                                      dtype=np.int64,
                                      shape=(count, len(labels))),
            "i":
            0
        }

    for o in p:
        _id, location, vector, total, total_dic = o
        fp = locations[location]["fp"]
        i = locations[location]["i"]

        fp[i, :-2] = vector
        fp[i, -2:] = [total, total_dic]

        locations[location]["i"] += 1
def objectid(day):
    start_date = str(day)
    date_1 = datetime.datetime.strptime(start_date, "%Y-%m-%d %H:%M:%S")
    end_date = date_1 + datetime.timedelta(hours=1)

    s = datetime.datetime(date_1.year, date_1.month, date_1.day, date_1.hour)
    d = datetime.datetime(end_date.year, end_date.month, end_date.day,
                          end_date.hour)

    objmin = ObjectId.from_datetime(s)
    objmax = ObjectId.from_datetime(d)
    #print str(date_1),str(end_date),objmin,objmax
    return objmin, objmax, end_date
Exemplo n.º 21
0
def idslice(col, start_seconds, end_seconds=0):
    start_delta = timedelta(seconds=start_seconds)

    start_objid = ObjectId.from_datetime(utc_now() - start_delta)
    end_delta = timedelta(seconds=end_seconds)
    end_objid = ObjectId.from_datetime(utc_now() - end_delta)
    for obj in col.find({
            '_id': {
                '$gte': start_objid,
                '$lt': end_objid
            }
    }).sort('_id'):
        yield obj
Exemplo n.º 22
0
 def query_all_by_userId_inXdays(self,
                                 db,
                                 collection,
                                 find,
                                 serial_no,
                                 start_days=0,
                                 start_time="",
                                 end_days=0,
                                 end_time='',
                                 s_start_id="",
                                 s_end_id=""):
     collect_lag = collection
     collection = eval("self.db_{0}['{1}']".format(db, collection))
     info = self.db_skynet['skynet_user_info'].find({
         "serial_no": serial_no
     }).sort("_id", -1)[0]
     event_time = info['event_time']
     now_id = ObjectId.from_datetime(event_time)
     find["_id"] = {"$lt": now_id}
     if len(start_time) == 0:
         start_date = event_time - datetime.timedelta(days=start_days)
     else:
         event_time_add8 = event_time - datetime.timedelta(
             days=start_days) + datetime.timedelta(hours=8)
         start_day = str(event_time_add8)[0:10] + ' ' + start_time
         start_date_add8 = datetime.datetime.strptime(
             start_day, "%Y-%m-%d %H:%M:%S")
         start_date = start_date_add8 - datetime.timedelta(hours=8)
     if s_start_id:
         start_id = s_start_id
     else:
         start_id = ObjectId.from_datetime(start_date)
     if len(end_time) == 0:
         end_date = event_time + datetime.timedelta(days=end_days)
     else:
         event_time_add8 = event_time + datetime.timedelta(
             days=end_days) + datetime.timedelta(hours=8)
         end_day = str(event_time_add8)[0:10] + ' ' + end_time
         end_date_add8 = datetime.datetime.strptime(end_day,
                                                    "%Y-%m-%d %H:%M:%S")
         end_date = end_date_add8 - datetime.timedelta(hours=8)
     if s_end_id:
         end_id = s_end_id
     else:
         end_id = ObjectId.from_datetime(end_date)
     ret = []
     find["_id"] = {"$gt": start_id, "$lt": end_id}
     for data in collection.find(find).sort("_id", -1):
         ret.append(data)
     ret = self.public_get_gzip_data(ret, collect_lag)
     return ret
def worker(start_date, end_date, f):
    root = logging.getLogger()
    root.setLevel(logging.DEBUG)

    macsy_settings = f["tweets"].attrs["macsy_settings"]
    bbapi, db = load(macsy_settings)

    with io.open(f["tweets"].attrs["liwc_dict"], 'r',
                 encoding="utf-8") as liwc_file:
        liwc = LIWC(liwc_file)

    _id_filter = {
        "$gte": ObjectId.from_datetime(start_date),
        "$lt": ObjectId.from_datetime(end_date)
    }

    filter = ast.literal_eval(f["tweets"].attrs["filter"])
    filter["_id"] = _id_filter

    blackboard = f["tweets"].attrs["blackboard"]
    tweets_col = bbapi.load_blackboard(
        blackboard).document_manager.get_collection()

    f["tweets"].attrs["inserted"] = np.asarray(
        f["tweets"].attrs["inserted"].tolist() +
        ["{} to {}".format(start_date.isoformat(), end_date.isoformat())],
        dtype=h5py.string_dtype())

    # times should contain UTC isoformated times
    times = f["tweets"]["times"]
    buckets_lookup = dict((dt, i) for i, dt in enumerate(times))

    indicators = f["tweets"]["indicators"]
    wordcounts = f["tweets"]["wordcounts"]
    tweetcounts = f["tweets"]["tweetcounts"]

    # Make sure indicators are normalized
    p = pipeline(liwc, bbapi, db, filter, blackboard,
                 f["tweets"].attrs["trim_rt"],
                 f["tweets"].attrs["indicator_resolution"])

    for _id, vector, wordcount, _ in p:
        i = buckets_lookup.get(
            find_bucket(_id.generation_time).isoformat(), None)
        if i is None:
            continue

        indicators[i, :] += vector
        wordcounts[i] += wordcount
        tweetcounts[i] += 1
Exemplo n.º 24
0
def main():
    client = MongoClient()
    db = client['convai-bot']
    dialogs = db.dialogs
    now = datetime.datetime.now(tzlocal())
    start_id = ObjectId.from_datetime(datetime.datetime(now.year, now.month, now.day, 0, 0, 0, 0, tzlocal()).astimezone(tzutc()))
    end_id = ObjectId.from_datetime(datetime.datetime(now.year, now.month, now.day, 23, 59, 59, 999, tzlocal()).astimezone(tzutc()))
    test_set = dialogs.find({'$and': [{'_id': {'$gte': start_id}}, {'_id': {'$lte': end_id}}]}).sort('_id', 1)
    train_set = dialogs.find().sort('_id', 1)
    print("\nExport test set for day %s\n" % now.date())
    export_to_file('test_set', test_set, labeled=False, filtered=False)
    print("\nExport train set for day %s\n" % now.date())
    export_to_file('train_set', train_set, labeled=True, filtered=True)
    client.close()
Exemplo n.º 25
0
def object_id_day_range(start_time, end_time):
    """
        Aim: Converts time to Mongo's ObjectId
        Input:
                start_time: The starting time, type: datetime.datetime object
                end_time: The end time, type: datetime.datetime object
        Output:
                start_object_id: The starting ObjectId, pymongo ObjectId object
                end_object_id: The ending ObjectId, pymongo ObjectId object
    """

    start_object_id = ObjectId.from_datetime(start_time)
    end_object_id = ObjectId.from_datetime(end_time)
    return start_object_id, end_object_id
Exemplo n.º 26
0
 def test_bb_get_date(self):
     self.assertEqual(
         str(
             self.bb.get_date({
                 DateBasedDocumentManager.doc_id:
                 ObjectId.from_datetime(dtparser.parse('21-10-2017'))
             })), '2017-10-21 00:00:00+00:00')
     with self.assertRaises(ValueError):
         self.bb.get_date({
             'different_id':
             ObjectId.from_datetime(dtparser.parse('21-10-2017'))
         })
     with self.assertRaises(ValueError):
         self.bb.get_date({DateBasedDocumentManager.doc_id: 1})
Exemplo n.º 27
0
    def test_from_datetime(self):
        if "PyPy 1.8.0" in sys.version:
            # See https://bugs.pypy.org/issue1092
            raise SkipTest("datetime.timedelta is broken in pypy 1.8.0")
        d = datetime.datetime.utcnow()
        d = d - datetime.timedelta(microseconds=d.microsecond)
        oid = ObjectId.from_datetime(d)
        self.assertEqual(d, oid.generation_time.replace(tzinfo=None))
        self.assertEqual("0" * 16, str(oid)[8:])

        aware = datetime.datetime(1993, 4, 4, 2, tzinfo=FixedOffset(555, "SomeZone"))
        as_utc = (aware - aware.utcoffset()).replace(tzinfo=utc)
        oid = ObjectId.from_datetime(aware)
        self.assertEqual(as_utc, oid.generation_time)
Exemplo n.º 28
0
    def test_from_datetime(self):
        d = datetime.datetime.utcnow()
        d = d - datetime.timedelta(microseconds=d.microsecond)
        oid = ObjectId.from_datetime(d)
        self.assertEqual(d, oid.generation_time.replace(tzinfo=None))
        self.assertEqual("0" * 16, str(oid)[8:])

        aware = datetime.datetime(1993,
                                  4,
                                  4,
                                  2,
                                  tzinfo=FixedOffset(555, "SomeZone"))
        as_utc = (aware - aware.utcoffset()).replace(tzinfo=utc)
        oid = ObjectId.from_datetime(aware)
        self.assertEqual(as_utc, oid.generation_time)
Exemplo n.º 29
0
 def get(self, itemId):
     ffrom, to = self.get_argument('from',
                                   None), self.get_argument('to', None)
     filt = {
         'item_id': ObjectId(itemId),
         '_id': {
             '$gt':
             ObjectId.from_datetime(
                 parse(ffrom) if ffrom else (self.now -
                                             datetime.timedelta(1))),
             '$lte':
             ObjectId.from_datetime(parse(to) if to else self.now)
         }
     }
     self.write(dumps(alfred.db.values.find(filt)))
Exemplo n.º 30
0
 def __init__(self,env,product,startTime,endTime,step=30):
     """
     :param env: 环境
     :param mongodbSelect:mongodb数据库【skynet,lake,galaxy】
     :param startTime: 开始时间 【样式:"2019-02-20 16:16:01"】
     :param endTime: 结束时间 【样式:"2019-02-20 17:16:01"】
     :param step: 时间间隔,默认10分钟,单位分钟
     """
     self.env=env
     self.product=product
     self.startTime=datetime.datetime.strptime(startTime,"%Y-%m-%d %H:%M:%S")
     self.endTime=datetime.datetime.strptime(endTime,"%Y-%m-%d %H:%M:%S")
     self.step=step
     self.startObjectId=ObjectId.from_datetime(self.startTime)
     self.endObjectId=ObjectId.from_datetime(self.endTime)
Exemplo n.º 31
0
def procedure_completions():
    global interval_in_milliseconds
    global db
    current_timestamp = datetime.datetime.now()
    time_delta = timedelta(milliseconds=interval_in_milliseconds)
    current_timestamp_objectid = ObjectId.from_datetime(current_timestamp)
    previous_timestamp_objectid = ObjectId.from_datetime(
        current_timestamp - time_delta)
    procedures_counts = db.abot_procedure_counters.aggregate([{'$match': {'_id': {'$lt': current_timestamp_objectid, '$gt': previous_timestamp_objectid}}}, {
                                                             '$group': {'_id': 'null', 'total': {'$sum': '$count'}}}])
    tc = loads(dumps(procedures_counts))
    if len(tc) > 0:
        d = dumps(tc[0]['total'])
        return d
    return dumps(0)
Exemplo n.º 32
0
  def get(self):
    try:
      args = parse(items_parser_sett['get'])
    except Exception as e:
      return parse_error_response(e)
    subreddit = args['subreddit']
    from_oid = ObjectId.from_datetime(
      datetime.datetime.utcfromtimestamp(args['from']))
    to_oid = ObjectId.from_datetime(
      datetime.datetime.utcfromtimestamp(args['to']))
    keyword = args.get('keyword', None)

    data = request.mongo.get_entities(subreddit, from_oid, to_oid, keyword)

    return ok_response(data=data)
Exemplo n.º 33
0
    def test_from_datetime(self):
        if 'PyPy 1.8.0' in sys.version:
            # See https://bugs.pypy.org/issue1092
            raise SkipTest("datetime.timedelta is broken in pypy 1.8.0")
        d = datetime.datetime.utcnow()
        d = d - datetime.timedelta(microseconds=d.microsecond)
        oid = ObjectId.from_datetime(d)
        self.assertEqual(d, oid.generation_time.replace(tzinfo=None))
        self.assertEqual("0" * 16, str(oid)[8:])

        aware = datetime.datetime(1993, 4, 4, 2,
                                  tzinfo=FixedOffset(555, "SomeZone"))
        as_utc = (aware - aware.utcoffset()).replace(tzinfo=utc)
        oid = ObjectId.from_datetime(aware)
        self.assertEqual(as_utc, oid.generation_time)
Exemplo n.º 34
0
    def on_status(self, status):
        # Connect to MongoDB host
        client = MongoClient(MONGO_HOST,
                             username=auth_data.username,
                             password=auth_data.password)
        # Use defined database
        db = client.twitter_analytics
        collection = db.tweets

        gen_time = datetime.utcnow() - timedelta(days=days_to_keep)
        del_to_id = ObjectId.from_datetime(gen_time)

        if from_creator(status):
            try:
                if hasattr(status, "extended_tweet"):
                    text = deEmojify(status.extended_tweet["full_text"])
                else:
                    text = deEmojify(status.text)
                collection.insert_one({
                    'created_at': status.created_at,
                    'user_id': status.id,
                    'user_name': status.user.name,
                    'user_screen_name': status.user.screen_name,
                    'location': status.user.location,
                    'text': text,
                    'reply_count': status.reply_count,
                    'retweet_count': status.retweet_count
                })
                print(text)
            except Exception:
                pass
Exemplo n.º 35
0
def generate_date_based_blackboard(db, blackboard_name):
    blackboard_name = blackboard_name.upper()

    # Generate tags collection
    tags_coll = db[blackboard_name + TagManager.tag_suffix]
    tag_ids = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
    tgs = [{'_id': x, 'Nm': 'Tag_{}'.format(x), 'Ctrl': 0} for x in tag_ids]
    tgs.append({'_id': 11, 'Nm': 'FOR>Tag_11', 'Ctrl': 1})
    tgs.append({'_id': 12, 'Nm': 'POST>Tag_12', 'Ctrl': 1})    
    tags_coll.insert(tgs)

    # Generate counter collection
    counter_coll = db[blackboard_name + CounterManager.counter_suffix]
    counter_coll.insert({"_id" : CounterManager.counter_type, CounterManager.counter_type : CounterManager.counter_type_date_based})
    counter_coll.insert({"_id" : CounterManager.counter_next, CounterManager.counter_tag : tags_coll.count() + 1})
    counter_coll.insert({"_id" : CounterManager.counter_hash, CounterManager.counter_hash : 'HSH', CounterManager.counter_hash_fields : ['oID', 'T', 'D']})
    counter_coll.insert({"_id" : CounterManager.counter_indexes, CounterManager.counter_indexes : [{"_id" : 1},{"oID" : 1, "_id" : 1}, {"Tg" : 1, "_id" : 1, "oID" : 1}, {"HSH" : 1}, { "Fds" : 1, "_id" : 1}, {"aID" : 1}, {"FOR" : 1, "_id" : 1}, {"TrOf" : 1}, {"Tg" : 1, "_id" : 1}]})

    # Generate date_based collections
    document_colls = {year: db['{}_{}'.format(blackboard_name, year)] for year in range(2009,2019)}
    for tid, (year, coll) in zip(tag_ids, document_colls.items()):
        obj_id = ObjectId.from_datetime(datetime(year, 1, 1))
        coll.insert({'_id': obj_id, 'T': 'Title {}'.format(tid), 'oID' : tid, 'D' : 'Description', 'Tg' : [tid, tid-1], 'FOR' : [11, 12]})
        coll.create_index([('_id', 1)], background = True)
        coll.create_index([('oID', 1), ('_id', 1)], background = True)
        coll.create_index([('Tg', 1), ('_id', 1), ('oID', 1)], background = True)
        coll.create_index([('HSH', 1)], background = True)
Exemplo n.º 36
0
    def test_sql_insert(self):
        cursor = MagicMock()
        now = datetime.now()

        # Use ordereddict to ensure correct order in generated SQL request
        doc = OrderedDict()
        doc['_id'] = ObjectId.from_datetime(now)
        doc['field'] = 'val'

        sql.sql_insert(cursor, 'table', doc, '_id')

        doc['_creationDate'] = utils.extract_creation_date(doc, '_id')

        cursor.execute.assert_called_with(
            'INSERT INTO table  (_creationDate,_id,field)  VALUES  (%(_creationDate)s,%(_id)s,%(field)s)  ON CONFLICT (_id) DO UPDATE SET  (_creationDate,_id,field)  =  (%(_creationDate)s,%(_id)s,%(field)s) ',
            doc
        )

        doc = {
            'field': 'val'
        }

        sql.sql_insert(cursor, 'table', doc, '_id')

        cursor.execute.assert_called_with(
            'INSERT INTO table  (field)  VALUES  (%(field)s) ',
            doc
        )
Exemplo n.º 37
0
    def get(self):
        """Get Daily KPi."""
        now = datetime.now()
        gen_time = datetime(now.year, now.month, now.day)
        dummy_id = ObjectId.from_datetime(gen_time)

        vdm_database = config.setup_mongo()
        cursor = vdm_database.booking.aggregate([
            {
                "$project":
                    {
                        "_id": 1,
                        "NbSpectateur": {"$size": "$Reservation"},
                        "TotalPrice": {"$sum": "$Reservation.prix"}
                    },
            },
            {"$match": {"_id": {"$gte": dummy_id}}},
        ])
        data = []
        for reservation in cursor:
            res_str = json.loads(dumps(reservation))
            data.append(res_str)

        response = jsonify(data)
        response.headers.add('Access-Control-Allow-Origin', '*')
        response.status_code = 200
        return response
Exemplo n.º 38
0
 def build_filters(self, **kwargs):
     ''' Break url parameters and turn into filters '''
     filters = {}
     for param, value in kwargs.items():
         # break each url parameter to key + operator (if exists)
         pl = dict(enumerate(param.split('__')))
         key = pl[0]
         operator = pl.get(1, None)
         if key in self._meta.object_class.fields():
             field = self._meta.object_class._fields[key]
             if field.is_composite: # composite keys require additional handling
                 # currently covering cases for dbref and list
                 if isinstance(field, DBRefField):
                     key, value = self.process_dbref_filter(key, value)
                 elif isinstance(value, list) and operator == 'in':
                     value = [convert_value(v) for v in value]
             else:
                 value = convert_value(value)
             # assign operator, if applicable
             filters[key] = {'${}'.format(operator): value} if operator else value
         elif key == 'created':  # special case where we map `created` key to mongo's _id which also contains a creation timestamp
             dt = parser.parse(convert_value(value))
             dummy_id = ObjectId.from_datetime(dt)
             filters['_id'] = {'${}'.format(operator): dummy_id} if operator else dummy_id
     return filters
Exemplo n.º 39
0
    def _remove_expired_publish_queue_items(self):
        expire_interval = app.config.get('PUBLISH_QUEUE_EXPIRY_MINUTES', 0)
        if expire_interval:
            expire_time = utcnow() - timedelta(minutes=expire_interval)
            logger.info('{} Removing publish queue items created before {}'.format(self.log_msg, str(expire_time)))

            get_resource_service('publish_queue').delete({'_id': {'$lte': ObjectId.from_datetime(expire_time)}})
Exemplo n.º 40
0
    def test_jsanitize(self):
        #clean_json should have no effect on None types.
        d = {"hello": 1, "world": None}
        clean = jsanitize(d)
        self.assertIsNone(clean["world"])
        self.assertEqual(json.loads(json.dumps(d)), json.loads(json.dumps(
            clean)))

        d = {"hello": GoodMSONClass(1, 2)}
        self.assertRaises(TypeError, json.dumps, d)
        clean = jsanitize(d)
        self.assertIsInstance(clean["hello"], six.string_types)
        clean_strict = jsanitize(d, strict=True)
        self.assertEqual(clean_strict["hello"]["a"], 1)
        self.assertEqual(clean_strict["hello"]["b"], 2)

        d = {"dt": datetime.datetime.now()}
        clean = jsanitize(d)
        self.assertIsInstance(clean["dt"], six.string_types)
        clean = jsanitize(d, allow_bson=True)
        self.assertIsInstance(clean["dt"], datetime.datetime)

        d = {"a": ["b", np.array([1, 2, 3])],
             "b": ObjectId.from_datetime(datetime.datetime.now())}
        clean = jsanitize(d)
        self.assertEqual(clean["a"], ['b', [1, 2, 3]])
        self.assertIsInstance(clean["b"], six.string_types)
Exemplo n.º 41
0
    def test_jsanitize(self):
        # clean_json should have no effect on None types.
        d = {"hello": 1, "world": None}
        clean = jsanitize(d)
        self.assertIsNone(clean["world"])
        self.assertEqual(json.loads(json.dumps(d)),
                         json.loads(json.dumps(clean)))

        d = {"hello": GoodMSONClass(1, 2, 3)}
        self.assertRaises(TypeError, json.dumps, d)
        clean = jsanitize(d)
        self.assertIsInstance(clean["hello"], str)
        clean_strict = jsanitize(d, strict=True)
        self.assertEqual(clean_strict["hello"]["a"], 1)
        self.assertEqual(clean_strict["hello"]["b"], 2)

        d = {"dt": datetime.datetime.now()}
        clean = jsanitize(d)
        self.assertIsInstance(clean["dt"], str)
        clean = jsanitize(d, allow_bson=True)
        self.assertIsInstance(clean["dt"], datetime.datetime)

        d = {
            "a": ["b", np.array([1, 2, 3])],
            "b": ObjectId.from_datetime(datetime.datetime.now()),
        }
        clean = jsanitize(d)
        self.assertEqual(clean["a"], ["b", [1, 2, 3]])
        self.assertIsInstance(clean["b"], str)

        rnd_bin = bytes(np.random.rand(10))
        d = {"a": bytes(rnd_bin)}
        clean = jsanitize(d, allow_bson=True)
        self.assertEqual(clean["a"], bytes(rnd_bin))
        self.assertIsInstance(clean["a"], bytes)
Exemplo n.º 42
0
 def get_doc_obj_by_dt(self, p_db_nm, p_collection_nm, p_query_dt):
     v_query_dt_str      = p_query_dt.strftime('%Y-%m-%d %H:%M.%S')
     v_query_dt_utc      = datetime.datetime.strptime(v_query_dt_str, '%Y-%m-%d %H:%M.%S')
     v_query_obj_id      = ObjectId.from_datetime(v_query_dt_utc)
     db_obj              = self.client_obj[p_db_nm]
     v_doc_obj           = db_obj[p_collection_nm].find({"_id": {"$gte": v_query_obj_id}})
     return v_doc_obj
Exemplo n.º 43
0
def dates_filter_query(field, start_time, end_time):
    collection = get_mongo_client(db_name=db_json, collection_name="json_logs")
    if (field == '_id'):
        filtered_rows = collection.find({
            field: {
                '$gte': ObjectId.from_datetime(start_time),
                '$lt': ObjectId.from_datetime(end_time)
            }
        })
    else:
        filtered_rows = collection.find(
            {field: {
                '$gte': start_time,
                '$lt': end_time
            }})
    return (filtered_rows)
Exemplo n.º 44
0
    def from_metaweblog(cls, struct, post_type ='post', is_edit = False):
        # 从metaweblog RPC取得的结构,初始化一个post
        title = struct.get('title','')

        meta_description = struct.get('mt_excerpt', '')
        if len(meta_description) > 155:
            raise ValueError(
                "Description is %d chars, Max length is 155" % len(meta_description)
            )

        if 'mt_keywords' in struct:
            tags = [
                tag.strip() for tag in struct['mt_keywords'].split(',')
                if tag.strip()
            ]
        else:
            tags = None

        slug = (
            slugify.slugify(struct['wp_slug'])
                if struct.get('wp_slug')
                else slugify.slugify(title))

        description = struct.get('description', '')
        status = (struct.get('post_status')
                or struct.get('page_status')
                or 'publish')

        if "date_modified_gmt" in struct:
            tup = struct['date_modified_gmt'].timetuple()
            mod = utc_tz.localize(datetime.datetime(*tup[0:6]))
        else:
            mod = datetime.datetime.utcnow()

        body = markup.markup(description)
        img_src = struct[u'img_src'] if struct.has_key(u'img_src') else ''
        rv = cls(
            title=title,
            # Format for display
            body=body,
            plain=plain.plain(body),
            summary=summarize.summarize(body, 200),
            original=description,
            meta_description=meta_description,
            tags=tags,
            slug=slug,
            type=post_type,
            status=status,
            wordpress_id=struct.get('postid'),
            mod=mod,
            img_src=img_src
        )

        if not is_edit and "data_created_gmt" in struct:
            date_created = datetime.datetime.strptime(
                struct['date_created_gmt'].value, "%Y%m%dT%H:%M:%S")
            rv.id = ObjectId.from_datetime(date_created)

        return rv
Exemplo n.º 45
0
    def from_metaweblog(
        cls, struct, post_type='post', is_edit=False
    ):
        """Receive metaWeblog RPC struct and initialize a Post.
           Used both by migrate_from_wordpress and when receiving a new or
           edited post from MarsEdit.
        """
        title = struct.get('title', '')

        # We expect MarsEdit to set categories with mt_setPostCategories()
        assert 'categories' not in struct

        if 'mt_keywords' in struct:
            tags = [
                tag.strip() for tag in struct['mt_keywords'].split(',')
                if tag.strip()
            ]
        else:
            tags = None


        slug = (
            slugify.slugify(struct['wp_slug'])
            if struct.get('wp_slug')
            else slugify.slugify(title))

        description = struct.get('description', '')
        status = struct.get('post_status', 'publish')
        if 'date_modified_gmt' in struct:
            tup = struct['date_modified_gmt'].timetuple()
            mod = utc_tz.localize(datetime.datetime(*tup[0:6]))
        else:
            mod = datetime.datetime.utcnow()

        body = markup.markup(description)

        rv = cls(
            title=title,
            # Format for display
            body=body,
            summary=summarize.summarize(body, 200),
            original=description,
            tags=tags,
            slug=slug,
            type=post_type,
            status=status,
            wordpress_id=struct.get('postid'),
            mod=mod
        )

        if not is_edit and 'date_created_gmt' in struct:
            # TODO: can fail if two posts created in same second, add random
            #   suffix to ObjectId
            date_created = datetime.datetime.strptime(
                struct['date_created_gmt'].value, "%Y%m%dT%H:%M:%S")
            rv.id = ObjectId.from_datetime(date_created)

        return rv
Exemplo n.º 46
0
 def remove_expired_results(self, collection):
     limit = datetime.datetime.now(pytz.utc) - datetime.timedelta(days=EXPIRE_AFTER)
     _id = ObjectId.from_datetime(limit)
     docs = self._db[collection].find({"_id": {"$lt": _id}, "sample_id": ""})
     for d in docs:
         if "title" in d:
             print collection + ": removing " + d["title"]
         else:
             print collection + ": removing (no name)"
     self._db[collection].remove({"_id": {"$lt": _id}, "sample_id": ""})
Exemplo n.º 47
0
    def from_datetime(cls, *args, **kwargs):
        min = cls._pop_from_dict(kwargs, 'min')
        max = cls._pop_from_dict(kwargs, 'max')

        assert not (min and max), "Both min and max specified"

        oid = ObjectId(BaseObjectId.from_datetime(*args, **kwargs))
        if min or max:
            oid = ObjectId(oid.__id[:2] + ('\x00' if min else '\xff') * 10)
        return oid
Exemplo n.º 48
0
def admin():
    # Auth Check
    is_admin = auth_check("ordering_admin")
    if request.form.get("action") == "tax" and request.form.get("tax") and is_admin:
        g.mongo.db.preferences.update({"_id": "ordering"}, {"$set": {"tax": float(request.form.get("tax", 0))}},
                                      upsert=True)
    elif request.form.get("action") == "tax_corp" and request.form.get("tax") and is_admin:
        g.mongo.db.preferences.update({"_id": "ordering"}, {"$set": {"tax_corp": float(request.form.get("tax", 0))}},
                                      upsert=True)
    tax_db = g.mongo.db.preferences.find_one({"_id": "ordering"})
    tax = "{:.02f}".format(tax_db.get("tax", 0)) if tax_db else 0
    tax_corp = "{:.02f}".format(tax_db.get("tax_corp", 0)) if tax_db else 0

    # Invoice List
    one_month_oid = ObjectId.from_datetime(datetime.datetime.today() - datetime.timedelta(30))
    invoice_table = []
    marketeer_invoice_table = []
    new_invoice_table = []
    for invoice_db in g.mongo.db.invoices.find({"$or": [{"_id": {"$gt": one_month_oid}},
                                                        {"status": {"$not": re.compile("Completed")}}]}):
        invoice_status = invoice_db.get("status", "Not Processed")
        invoice_timestamp = ObjectId(invoice_db["_id"]).generation_time.strftime("%Y-%m-%d %H:%M:%S")
        invoice_color = ""
        if invoice_status == "Shipping - Completed":
            invoice_color = "primary"
        elif invoice_status == "Processing" or invoice_status.startswith("Shipping"):
            invoice_color = "warning"
        elif invoice_status in ["Failed", "Rejected", "Hold"]:
            invoice_color = "danger"
        elif invoice_status == "Completed":
            invoice_color = "success"

        finish_time = invoice_db.get("finish_time")
        if finish_time:
            time_to_delivery = finish_time - int(ObjectId(invoice_db["_id"]).generation_time.timestamp())
            ttd_days = time_to_delivery // (60 * 60 * 24)
            ttd_hours = time_to_delivery % (60 * 60 * 24) // (60 * 60)
            ttd_minutes = time_to_delivery % (60 * 60 * 24) % (60 * 60) // 60
            ttd_format = "{0}D{1}H{2}M".format(ttd_days, ttd_hours, ttd_minutes)
        else:
            ttd_format = "N/A"

        invoice_row = [invoice_color, invoice_timestamp, ttd_format, invoice_db["_id"], invoice_db["jf_end"],
                       "{:,.02f}".format(invoice_db["order_total"]), invoice_db.get("character"),
                       invoice_db.get("marketeer"), invoice_status]

        invoice_table.append(invoice_row)
        if invoice_db.get("marketeer") == session["CharacterName"]:
            marketeer_invoice_table.append(invoice_row)
        if invoice_status in ["Not Processed", "Submitted"]:
            new_invoice_table.append(invoice_row)

    return render_template("ordering_admin.html", invoice_table=invoice_table, tax=tax, is_admin=is_admin,
                           marketeer_invoice_table=marketeer_invoice_table, new_invoice_table=new_invoice_table,
                           tax_corp=tax_corp)
Exemplo n.º 49
0
def oid_date_range_filter(dt_from=None, dt_upto=None, field_name='_id'):
    """
    constructs a range query usefull to query an ObjectId field by date
    :Parameters:
        - dt_from (datetime or tuple): starting date_time if tuple a datetime is constucted from tuple
        - dt_upto (datetime or tuple): end date_time if tuple a datetime is constucted from tuple
        - field_name: (str): optional default to '_id' field to query or None if None returns range only else returns full query
    :Returns:
        - range query (due to objectId structure $gt includes dt_from) while $lt dt_upto (not included)
    """
    def dt(dt_or_tuple):
        if isinstance(dt_or_tuple, datetime):
            return dt_or_tuple
        elif isinstance(dt_or_tuple, tuple):
            return datetime(*dt_or_tuple)
        else:
            raise TypeError('dt must be a date or tuple')
    q = SON()
    if dt_from is not None:
        q.update(SON([('$gte', ObjectId.from_datetime(dt(dt_from)))]))
    if dt_upto is not None:
        q.update(SON([('$lte', ObjectId.from_datetime(dt(dt_upto)))]))
    return q if field_name is None else SON([(field_name, q)])
Exemplo n.º 50
0
def get_server_object_ids(**kwargs):
    """
        # for a given timestamp_range, compute the corresponding ObjectIds of MongoDB
    """
    # y = datetime.datetime.now().date() - datetime.timedelta(days=1)
    # t= datetime.datetime.now().date()
    if kwargs is not None:
        if 'time_range' in kwargs:
            prev, curr = kwargs['time_range']
            print prev, curr
            prev_timestamp =  int(time.mktime(prev.timetuple())*1000)
            curr_timestamp = int(time.mktime(curr.timetuple())*1000)
        elif 'timestamp_range' in kwargs:
            prev_timestamp, curr_timestamp = kwargs['timestamp_range']
        else:
            raise Exception('Keyword Error')

    prev_utc_datetime = datetime.datetime.utcfromtimestamp(prev_timestamp/1000)
    curr_utc_datetime = datetime.datetime.utcfromtimestamp(curr_timestamp/1000)

    prev_utc_objectId = ObjectId.from_datetime(prev_utc_datetime)
    curr_utc_objectId = ObjectId.from_datetime(curr_utc_datetime)

    return prev_utc_objectId, curr_utc_objectId
Exemplo n.º 51
0
def home():
    with open("configs/base.json", "r") as base_config_file:
        base_config = json.load(base_config_file)

    # Missing APIs
    missing_apis = []
    corp_ids = []
    api_characters = set()
    # Determine accounts in corp
    for user_info in g.mongo.db.users.find({"corporation_id": base_config["corporation_id"]}):
        if g.mongo.db.security_characters.find_one({"_id": user_info["character_id"]}):
            corp_ids.append(user_info["_id"])
    # Determine characters with an api
    for api_user in g.mongo.db.api_keys.find({"_id": {"$in": corp_ids}}):
        for key in api_user["keys"]:
            api_characters.add(key["character_name"])
    # Determine characters in corp without an api from a corp account
    for corp_character in g.mongo.db.security_characters.find():
        if corp_character["name"] not in api_characters:
            missing_apis.append(corp_character["name"])

    # Personal Invoices
    one_month_oid = ObjectId.from_datetime(datetime.datetime.today() - datetime.timedelta(30))
    invoice_table = []
    for invoice in g.mongo.db.invoices.find({"_id": {"$gt": one_month_oid}, "user": session["CharacterOwnerHash"]}):
        invoice_status = invoice.get("status", "Not Processed")
        invoice_timestamp = ObjectId(invoice["_id"]).generation_time.strftime("%Y-%m-%d %H:%M:%S")
        invoice_color = ""
        if invoice_status == "Shipping - Completed":
            invoice_color = "primary"
        elif invoice_status == "Processing" or invoice_status.startswith("Shipping"):
            invoice_color = "warning"
        elif invoice_status in ["Failed", "Rejected"]:
            invoice_color = "danger"
        invoice_table.append([invoice_color, invoice_timestamp, invoice["_id"], invoice["jf_end"],
                              "{:,.02f}".format(invoice["order_total"]), invoice.get("marketeer"), invoice_status])

    # Away from EVE
    db_vacation = g.mongo.db.personals.find({"vacation": {"$exists": True}})
    away_from_eve = []
    for character in db_vacation:
        if character["corporation_id"] == base_config["corporation_id"]:
            away_from_eve.append([character["character_name"], character["vacation"], character["vacation_date"]])

    return render_template("corp.html", away_from_eve=away_from_eve, invoice_table=invoice_table,
                           missing_apis=missing_apis)
Exemplo n.º 52
0
    def history(self,
                after=True,
                fields=[],
                limit=10,
                since=None,
                sort=DESCENDING):
        """
        Return historical versions of this :term:`context`.

        :param after: if since is provided, only return history items
        after `since`. False will return items before
        since. **Default: ``True``**
        :params fields: a ``list`` or ``dict`` of fields to return in
        the results. If a list it should be list of strings
        representing the fields to return i.e. ``['mtime', ]``. If a
        ``dict`` it should specify either fields to omit or include
        i.e.: ``{'_id': False}`` or {'_id': False, 'title': True,
        'changed_by': True}.
        :param limit: The number of history records to fetch. **Default: 10**

        :param since: a :mod:``datetime.datetime`` object representing
        the date to use as a search point with ``after``. **Default: None**
        :param sort: The direction to sort the history
        items. ``DESCENDING`` provides the most recent change
        first. **Default: ``DESCENDING``**
                """
        if limit is None:
            limit = 0
        if not isinstance(limit, int):
            raise TypeError("expected int, recieved {}".format(type(limit)))
        query = {'orig_id': self.data['_id']}
        if since and isinstance(since, datetime.datetime):
            stamp = ObjectId.from_datetime(since)
            if after:
                operator = "$gt"
            else:
                operator = "$lt"
            query['_id'] = {operator: stamp}
        if fields:
            cursor = self._collection_history.find(
                query, fields).limit(limit).sort('_id', DESCENDING)
        else:
            cursor = self._collection_history.find(
                query).limit(limit).sort('_id', DESCENDING)
        return cursor
Exemplo n.º 53
0
Arquivo: bot.py Projeto: xuanb/msbot
    def questionGathering(self, usersToSendMsg=None):
        days = 5
        gen_time = datetime.today() - timedelta(days=days)
        dummy_id = ObjectId.from_datetime(gen_time)
        if not usersToSendMsg:
            usersToSendMsg = list(self.db.users.find({"_id": {"$lte": dummy_id}}))
        question = self.db.questions.find_one({"order": (self.order % self.db.questions.count())})
        self.order = +1

        for user in usersToSendMsg:
            markup =  ReplyKeyboardMarkup(keyboard=
                self.keyboard_two_col(question['keyboard'][user['language']]),
                resize_keyboard=True, one_time_keyboard=True)
            text_msg = question['content'][user['language']]
            self.sendMessage(user['chat_id'],self.data['question_info_msg'][user['language']])
            user['question_msg'] = self.sendMessage(user['chat_id'], text_msg, reply_markup=markup)
            user['question'] = question
            self.users_to_send_msg[user['chat_id']] = user
Exemplo n.º 54
0
def getDocsForToday(conColl):
    today = datetime.today()
    aggr = conColl.aggregate(
        [
            {"$match": {"_id": {"$gte": ObjectId.from_datetime(datetime(today.year, today.month, today.day))}}},
            {
                "$group": {
                    "_id": None,
                    "caloriesForDay": {"$sum": "$calories"},
                    "fatsForDay": {"$sum": "$fat"},
                    "carbsForDay": {"$sum": "$carbohydrates"},
                    "proteinsForDay": {"$sum": "$proteins"},
                }
            },
        ]
    )

    aggrDocs = list(aggr)

    return aggrDocs
Exemplo n.º 55
0
    def on_get(self, req, resp):
        now = datetime.datetime.now();
        querydate = now - datetime.timedelta(days=1)
        if req.params.get('date', None):
            
            arrdate = req.params['date'].split("-");
            year = int(arrdate[0]);
            month = int(arrdate[1]);
            day = int(arrdate[2].split(" ")[0]);
            hour = int(arrdate[2].split(" ")[1].split(":")[0]);
            minute = int(arrdate[2].split(" ")[1].split(":")[1]);
            second = int(arrdate[2].split(" ")[1].split(":")[2]);
            querydate = datetime.datetime(year,month,day,hour,minute,second);
	msg = "Request ::: "+str(querydate);
        self.printMsg(msg) ;
        dummy_id = ObjectId.from_datetime(querydate)
        result = EventModel.objects(id__gt = dummy_id)[:100];
       	json_string = json.dumps([r.obj_dict() for r in result])
       	resp.body = json_string;
       	resp.status = falcon.HTTP_200
    def test_extract_creation_date(self):
        now = datetime.now()

        doc = {
            '_id': ObjectId.from_datetime(now)
        }

        got = utils.extract_creation_date(doc, '_id')
        expected = now

        if expected.utcoffset() is not None:
            expected -= expected.utcoffset()

        expected = timegm(expected.timetuple())
        expected = datetime.fromtimestamp(expected, utc)

        self.assertEqual(expected, got)

        got = utils.extract_creation_date({}, '_id')
        self.assertIsNone(got)
Exemplo n.º 57
0
    def test_insert(self):
        from macsy import utils
        # Generate a doc, check # of docs, insert it, check it's incremented
        obj_id = ObjectId.from_datetime(dtparser.parse('21-10-2017'))
        hsh = utils.java_string_hashcode('515TitleDescription')
        expected = 11
        self.assertEqual(self.bb.count(), expected-1)
        self.assertEqual(self.bb.insert({DateBasedDocumentManager.doc_id : obj_id, 'HSH' : hsh, 'oID' : 515, 'T' : 'Title', 'D' : 'Description', 'Overwritten' : False, 'Inserted' : True, 'Tg' : [1, 2, 3]}), obj_id)
        self.assertEqual(self.bb.count(), expected)
        self.assertEqual([x for x in self.bb.find(query={'Inserted' : True})][0]['_id'], obj_id)
        
        
        # Try to insert it again
        self.assertEqual(self.bb.insert({DateBasedDocumentManager.doc_id : obj_id, 'oID' : 515, 'T' : 'Title', 'D' : 'Description', 'Overwritten' : True, 'Updated' : True, 'Tg' : [4, 5]}), obj_id)
        self.assertEqual(self.bb.count(), expected)
        self.assertEqual([x for x in self.bb.find(query={'Updated' : True})][0]['_id'], obj_id)
        self.assertEqual(self.bb.count(query={'Overwritten' : False}), 0)
        self.assertEqual([x for x in self.bb.find(query={'Overwritten' : True})][0]['_id'], obj_id)
        self.assertEqual([x for x in self.bb.find(tags=[1, 2, 3, 4, 5])][0]['_id'], obj_id)

        # Insert a document without an id and generate one
        self.assertEqual(self.bb.insert({'Blank_id' : True}).generation_time.date(), datetime.now().date())