def get_file_list(cls, channel, start_time: datetime, end_time: datetime):
        page = 1
        params = {
            'count': cls.REQUEST_COUNT_FILES,
            'channel': channel,
            'ts_from': start_time.timestamp(),
            'ts_to': end_time.timestamp()
        }

        num_files = 0
        file_list = []
        while True:
            # Get next page of files
            params['page'] = page
            print(f"Querying slack for page {page} of ALL files between {params['ts_from']} - {params['ts_to']}")
            response = cls.get_request(cls.URL_FILE_LIST, params, cls.SCHEMA_FILE_LIST)

            num_files += len(response['files'])
            tot_files = response['paging']['total']
            print(f"Retrieved data about {num_files}/{tot_files} files")

            # Add files to list
            for file in response['files']:
                file_list.append(file)

            # Decide whether to continue or not
            if num_files == 0 or response['paging']['page'] >= response['paging']['pages']:
                break
            page += 1

        return file_list
Exemple #2
0
def get_match_history_games(summoner: Summoner, start_time: datetime,
                            end_time: datetime):
    request_string = "/match/v4/matchlists/by-account/" + summoner.account_id \
                     + "?beginTime=" + str(int(start_time.timestamp() * 1000))
    if end_time is not None:
        request_string += "&endTime=" + str(int(end_time.timestamp() * 1000))

    return _riot_get(request_string)
    def fetch_price_data_nifty50(self, ct: datetime, pt: datetime):
        """
        Fetches the daily historical price for 1 year for NIFTY 50 index

        Parameters
        ----------
        ct : datetime
            The Current date.
        pt : datetime
            The date 1 year previous.

        Returns
        -------
        df : dataframe
            A dataframe containtaing Date and the correspnding price
            .

        """

        # convert to epoch
        current_timestamp = int(ct.timestamp())
        previous_timestamp = int(pt.timestamp())

        headers = {
            'authority': 'in.investing.com',
            'cache-control': 'max-age=0',
            'upgrade-insecure-requests': '1',
            'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.182 Safari/537.36',
            'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
            'sec-gpc': '1',
            'sec-fetch-site': 'none',
            'sec-fetch-mode': 'navigate',
            'sec-fetch-user': '******',
            'sec-fetch-dest': 'document',
            'accept-language': 'en-GB,en-US;q=0.9,en;q=0.8',
            'cookie': '__cfduid=d3c2e1fe62f9856cf29821a05e88b54811619441949; SideBlockUser=a%3A2%3A%7Bs%3A10%3A%22stack_size%22%3Ba%3A1%3A%7Bs%3A11%3A%22last_quotes%22%3Bi%3A8%3B%7Ds%3A6%3A%22stacks%22%3Ba%3A1%3A%7Bs%3A11%3A%22last_quotes%22%3Ba%3A1%3A%7Bi%3A0%3Ba%3A3%3A%7Bs%3A7%3A%22pair_ID%22%3Bs%3A5%3A%2224003%22%3Bs%3A10%3A%22pair_title%22%3Bs%3A0%3A%22%22%3Bs%3A9%3A%22pair_link%22%3Bs%3A37%3A%22%2Frates-bonds%2Findia-3-month-bond-yield%22%3B%7D%7D%7D%7D; adBlockerNewUserDomains=1619441951; udid=f54e7210e8e3bc9b35a468ba65a8ad14; G_ENABLED_IDPS=google; _fbp=fb.1.1619441955314.16268788; r_p_s_n=1; PHPSESSID=ojh953jnvn10jlr2df6l722n98; StickySession=id.22709788039.279in.investing.com; _tz_id=d1084eeb396bb44ef6ef1f44c51b7af5; welcomePopup=1; geoC=IN; nyxDorf=ODk%2BZGYuZDo3YWFqYy41NWIyZjw1LDAwNTdlYw%3D%3D; __cflb=02DiuF9qvuxBvFEb2qB1HcuDLvqD9ieP4VA1bEKif7H4g; ses_id=MX8xcGJtMjphJWlvZzY3NTJhM2wzMWVmZ29nbDs0Z3EwJGZoNWI%2BeGRrbiAwMzYqZGU3MjZiMGM9b2Y4N2RnOzEyMWRiYjJpYTRpYGc3N2EyazNoM2dlNGdgZ2M7aGduMDZmNzViPjNkNG5jMDg2O2R2Nys2cjAhPW9mNjd2ZyAxPjFwYjIybmE%2BaWVnNDc2MjIzOjM9ZWdnN2dkOzlnfzB7; smd=f54e7210e8e3bc9b35a468ba65a8ad14-1620052261',
        }
        params = (
            ('end_date', int(current_timestamp)),
            ('st_date', int(previous_timestamp)),
        )

        response = requests.get(self.__config_details['nifty_50']['full_url'], headers=headers,
                                params=params)

        soup = BeautifulSoup(response.content, "lxml")
        table = soup.find('table', attrs={'class': 'common-table medium js-table'})
        df = pd.read_html(str(table))[0]

        # drop columns and keep only Date and Price
        df = df.drop(['Open', 'High', 'Low', 'Volume', 'Chg%'], axis=1)

        # change the Date to timestamp and change to the format %Y-%m-%d
        df['Date'] = pd.to_datetime(df['Date']).dt.strftime("%Y-%m-%d")

        df = df.sort_values(by=['Date'])

        return df
    def _pushshift_query_builder(query: str = None,
                                 start_date: datetime = None,
                                 end_date: datetime = None,
                                 sort_field: str = None,
                                 size: int = 10000,
                                 randomize: bool = True,
                                 sort_dir: str = "desc") -> list:
        '''Pushshift Elasticsearch query builder'''

        q = defaultdict(dict)

        size = 10000 if size is None else size
        q['size'] = size

        # Add Random sampling component if requested
        if randomize:
            q['query']['function_score'] = {}
            q['query']['function_score']['random_score'] = {}
            q['query']['function_score']['random_score']['seed'] = int(
                time.time() * 1000)
            q['query']['function_score']['query'] = defaultdict(dict)
            q['query']['function_score']['query']['bool'][
                'must'] = filters = []
        else:
            q['query']['bool'] = {}
            q['query']['bool']['must'] = filters = []

        if sort_field is not None:
            q['sort'] = {sort_field: sort_dir}

        sqs = defaultdict(dict)
        sqs['simple_query_string']['query'] = query
        sqs['simple_query_string']['fields'] = ["title", "selftext"]
        sqs['simple_query_string']['default_operator'] = "and"
        filters.append(sqs)

        if start_date is not None:
            filters.append({
                'range': {
                    'created_utc': {
                        'gte': int(start_date.timestamp())
                    }
                }
            })

        if end_date is not None:
            filters.append(
                {'range': {
                    'created_utc': {
                        'lt': int(end_date.timestamp())
                    }
                }})

        return q
Exemple #5
0
def get_datetime_timestamp(dt: datetime) -> float:
    import sys
    if sys.platform == 'win32':
        try:
            return dt.timestamp()
        except OSError as e:
            if e.errno == 22:
                return 0
            else:
                raise
    else:
        return dt.timestamp()
Exemple #6
0
def mod_last_detected(file_path: Path, last_detected: datetime):
    dir = file_path.parent

    db = read_db_file(str(dir))
    file_name = file_path.name
    file_db = db["watching"][file_name]
    file_db["last_detected"] = last_detected.timestamp()
    write_db_file(str(dir), db)
def is_bid_by_time(end_time: datetime) -> (bool, int):
    now = datetime.datetime.now()
    time_left = int(end_time.timestamp() - now.timestamp())

    if -15 <= time_left <= CRUNCH_TIME:
        return True, time_left
    else:
        return False, time_left
 async def fetch_my_trades_for_order(self, order_id: str, symbol: str,
                                     since: datetime) -> List:
     if not self.has_api('fetchMyTrades'):
         return []
     my_trades = await self.ex.fetch_my_trades(symbol, since.timestamp())
     matched_trades = [
         trade for trade in my_trades if trade['order'] == order_id
     ]
     return matched_trades
 def time2seconds(t: datetime) -> int:
     """
     converts a datetime to an integer of seconds since epoch
     """
     try:
         return int(t.timestamp())
     except:
         # only implemented in python3.3
         # this is an old compatibility thing
         return t.hour * 60 * 60 + t.minute * 60 + t.second
Exemple #10
0
 def time2seconds(t: datetime) -> int:
     """
     converts a datetime to an integer of seconds since epoch
     """
     try:
         return int(t.timestamp())
     except:
         # only implemented in python3.3
         # this is an old compatibility thing
         return t.hour * 60 * 60 + t.minute * 60 + t.second
    def serializeDateResponce(self, date: datetime) -> str:
        """
            Формирует ответ с переданной или текущуй датой
        """

        if date is None:
            date = datetime.datetime.now()
        if type(date) is datetime.datetime:
            date = date.timestamp()

        return self._serialize(self.DATE_RESPONSE, date=date)
    def next_datetime(min_year: datetime,
                      max_year: datetime = None) -> datetime:
        """
        Generates a random Date and time in the range ['minYear', 'maxYear'].
        This method generate dates without time (or time set to 00:00:00)

        :param min_year: min range args
        :param max_year: (optional) maximum range args
        :return: a random Date and time args.
        """
        if max_year is None:
            max_year = min_year
            min_year = datetime.datetime(2000, 1, 1)

        diff = int((max_year.timestamp() * 1000) -
                   (min_year.timestamp() * 1000))
        if diff <= 0:
            return min_year

        _time = ((max_year.timestamp() * 1000) +
                 RandomInteger.next_integer(0, diff)) / 1000
        return datetime.datetime.fromtimestamp(_time, pytz.utc)
    def download_data(self,
                      query,
                      subreddit='',
                      from_date: datetime = None,
                      to_date: datetime = None):
        logger.info(f'Downloading tweets with keyword {query} from subreddit '
                    f'{subreddit} from date {from_date} to date {to_date}')

        posts_by_day = {}

        for page, pages, posts in reddit.get_posts(query, subreddit,
                                                   int(from_date.timestamp()),
                                                   int(to_date.timestamp())):
            logger.info(f'Fetched {page} of {pages} pages')

            for post in posts:
                post_date = datetime.datetime.fromtimestamp(
                    post['created_utc']).date()

                if post_date not in posts_by_day:
                    posts_by_day[post_date] = []

                posts_by_day[post_date].append(post)

        for day in posts_by_day:
            logger.info(f'Saving posts_{query}_{day}.csv')

            csv_writer = CsvWriter(self.data_dir, f'posts_{query}_{day}.csv')

            csv_writer.add_row(self.csv_header)

            for post in posts_by_day[day]:
                post = _get_post_row(post)

                if '[removed]' in post[1]:
                    continue

                csv_writer.add_row(post)
    def get_conv_history(cls, conv, start_time: datetime, end_time: datetime):
        print("Retrieving messages between " + cls.format_time(start_time) + " - " + cls.format_time(end_time))

        params = {
            'channel': conv,
            'inclusive': True,
            'oldest': start_time.timestamp(),
            'latest': end_time.timestamp(),
        }

        print(f"Querying slack for messages between {params['oldest']} - {params['latest']}")

        # Build up array repeatedly
        messages = []
        while True:
            # Get next batch of messages
            content = cls.get_request(cls.URL_HISTORY_CONV, params, schema=cls.SCHEMA_HISTORY_DM, timeout=cls.WAIT_TIME_TIER_4)

            next_messages = content['messages']
            if len(next_messages) == 0:
                break

            # Make sure first/last messages don't overlap
            if len(messages) > 0 and next_messages[0]['ts'] == messages[-1]['ts']:
                messages.extend(next_messages[1:])
            else:
                messages.extend(next_messages)

            # Update params and print status if there are more messages to get
            if not content['has_more']:
                print("Retrieved " + str(len(messages)) + " messages")
                break

            print("Messages retrieved so far: " + str(len(messages)))
            params['cursor'] = content['response_metadata']['next_cursor']

        return messages
Exemple #15
0
        def __init__(self, usr: Address, fax: bytes, eta: datetime):
            """Creates a plan to be executed later.

            Args:
            usr: Address of the caller
            fax: Identifies the calldata
            eta: Identifies the earliest time of execution
            """
            assert isinstance(usr, Address)
            assert isinstance(fax, bytes)
            assert isinstance(eta, datetime.datetime)

            self.usr = usr
            self.fax = fax
            self.eta = eta.timestamp()
Exemple #16
0
def collect_hour_of_data(time: datetime):
    data = {}
    unix_time = int(time.timestamp())
    url = 'https://venmo.com/api/v5/public?until={}'.format(unix_time)

    for i in range(30):
        print("collecting batch: {}".format(i))
        r = requests.get(url)
        values = r.json()
        add_data(values.get('data'), data)

        url = values.get('paging').get('next')

    with open('data-{}.json'.format(time.strftime('%Y-%m-%dT%X')), 'w') as f:
        json.dump(data, f, indent=4)
Exemple #17
0
 def __init__(self, symbol_name: str, payed_amount: float,
              pay_date_string: str, state: str, run_date_dt: datetime):
     # run_date is the current run_date, if we get a pay_date_string is None, then we assume date is now
     self.symbol_name = symbol_name
     self.payed_amount = payed_amount
     self.pay_date_string = pay_date_string
     self.state = state  # state "paid" seems the best one
     if pay_date_string is None:
         # NOTE: might need to change this behavior. if the pay_date_string is None, most likely state is pending, so lets just give it current date for fun. For current date we better provide the run_date, which is global var in another file, so we will need to feed it in thru an argument.
         self.date_dt = run_date_dt
         self.date_epoch = run_date_dt.timestamp()
     else:
         try:
             self.date_dt = dateutil.parser.parse(pay_date_string)
             self.date_epoch = self.date_dt.timestamp()
         except:
             raise ValueError(
                 'provided date must be a parseable date - preferably of the type "%y-%m-%dT%H:%M:%S.%f%z" or "%y-%m-%d %H:%M:%S.%f %z"'
             )
    def time_difference(self, now_time: datetime, complete_keys: list) -> int:
        """
        获取redis数据缓存时间中偏离现在的最长时间
        :param now_time: 此时
        :param complete_keys:redis缓存的keys
        :return:时间差
        """
        time_list = list()  # 存放缓存数据的key的最大时间
        for key in complete_keys:
            redis_data = self.redis_worke.hash_get_all(key)
            #  只要有出现了空数据,则需要调用检查
            if not redis_data:
                return 1000000
            max_time = max(redis_data.keys()).decode()
            time_list.append(max_time)

        min_time = min(time_list)
        last_time = time.strptime(min_time, "%H:%M:%S")
        redis_last_time = datetime.datetime(now_time.year, now_time.month, now_time.day, last_time.tm_hour,
                                            last_time.tm_min,
                                            last_time.tm_sec)
        time_inv = now_time.timestamp() - redis_last_time.timestamp()  # 时间差
        return time_inv
Exemple #19
0
 def get_milliseconds_from_datetime(datetime_value: datetime) -> float:
     return datetime_value.timestamp() * 1000
Exemple #20
0
def minute_of (time: datetime) -> int:
    return int(time.timestamp()/60)
Exemple #21
0
 def generate(self, user, for_time: datetime) -> str:
     return self.__otp(user).at(int(for_time.timestamp())) 
Exemple #22
0
 def verify(self, user, otp: str, for_time: datetime) -> bool:
     return self.__otp(user).verify(otp, int(for_time.timestamp()))
def get_data_for_timespan(start: datetime, end: datetime, product):
    mongo = MongoClient()
    db = mongo["coinbase_history"]
    c = db.get_collection("historical_data")
    data = []
    for d in c.find({'Product': product.id}):
        data.append(d)
    days = (end - start).days

    wanted_dates = []
    for i in range(0, days + 1):
        wanted_dates.append((start + datetime.timedelta(i)).timestamp())
    dates, last_date = [], start.timestamp()
    data.reverse()
    # TODO get biggest diff of data to check if data is missing
    wanted_index = 0
    for i in range(0, len(data)):
        try:
            # check if wanted_dates is more than a half day below and upgrade accordingly
            while fucking_date.fromtimestamp(
                    wanted_dates[wanted_index]) < fucking_date.fromtimestamp(
                        data[i]['Timestamp']) - datetime.timedelta(0.5):
                wanted_index += 1
            if abs((fucking_date.fromtimestamp(data[i]['Timestamp']) -
                    fucking_date.fromtimestamp(
                        wanted_dates[wanted_index])).total_seconds()) < 43200:
                dates.append(data[i])
                wanted_dates.pop(wanted_index)
        except Exception as e:
            pass
    # if fucking_date.fromtimestamp(dates[-1]['Timestamp']).date()==fucking_date.fromtimestamp(wanted_dates[0]).date():
    #     wanted_dates.pop(0)

    if len(wanted_dates) == 1 and fucking_date.fromtimestamp(wanted_dates[0]).date() == fucking_date.today().date() \
            or len(dates)>0 and (fucking_date.now().date() - fucking_date.fromtimestamp(dates[-1]['Timestamp']).date()).days == 1 \
            and end.date() == fucking_date.today().date():
        today = client.get_product_24hr_stats(product.id)
        parsed_today = {
            "Product":
            product.id,
            "Timestamp":
            datetime.datetime.combine(datetime.datetime.today(),
                                      datetime.time()).timestamp(),
            "Low":
            float(today['low']),
            "High":
            float(today['high']),
            "Open":
            float(today['open']),
            "Close":
            float(today['last']),
            "Volume":
            float(today['volume'])
        }
        dates.append(parsed_today)
        mongo.close()
        return dates
    elif len(wanted_dates) == 0:
        return dates
    else:
        for i in range(0, wanted_dates.__len__()):
            wanted_dates[i] = fucking_date.fromtimestamp(
                wanted_dates[i]).date()
        # TODO get still needed data from client
        data = []
        if wanted_dates[-1] == fucking_date.now().date():
            wanted_dates.pop(-1)
            data.append(client.get_product_24hr_stats(product.id))
        data.extend(
            client.get_product_historic_rates(product.id,
                                              start=wanted_dates[0],
                                              end=wanted_dates[-1],
                                              granularity=86400))
        data.reverse()
        try:
            db.create_collection(
                "historical_data", **{
                    "_id": [("Product", pymongo.ASCENDING),
                            ("Timestamp", pymongo.ASCENDING)]
                })
        except Exception:
            pass
        finally:
            db_coll = db["historical_data"]
            db_coll.create_index([("Product", pymongo.DESCENDING),
                                  ("Timestamp", pymongo.DESCENDING)],
                                 unique=True)
        for row in data:
            if 'low' in row:
                break
            row = {
                "Product": product.id,
                "Timestamp": row[0],
                "Low": row[1],
                "High": row[2],
                "Open": row[3],
                "Close": row[4],
                "Volume": row[5]
            }
            try:
                db_coll.update_one(
                    {
                        "Product": product.id,
                        "Timestamp": row["Timestamp"]
                    }, {"$set": row},
                    upsert=True)
            except Exception:
                print("Reminder to make a db system completely from scratch")

        temp_timestamps = []
        for temp_date in dates:
            temp_timestamps.append(temp_date['Timestamp'])
        for row in data:
            try:
                dates.append({
                    "Product": product.id,
                    "Timestamp": row[0],
                    "Low": row[1],
                    "High": row[2],
                    "Open": row[3],
                    "Close": row[4],
                    "Volume": row[5]
                })
            except Exception:
                try:
                    data[-1] = {
                        "Product":
                        product.id,
                        "Timestamp":
                        datetime.datetime.combine(datetime.datetime.today(),
                                                  datetime.time()).timestamp(),
                        "Low":
                        float(data[-1]['low']),
                        "High":
                        float(data[-1]['high']),
                        "Open":
                        float(data[-1]['open']),
                        "Close":
                        float(data[-1]['last']),
                        "Volume":
                        float(data[-1]['volume'])
                    }
                except:
                    pass
        try:
            mongo.close()
        except:
            pass
        dates.sort(key=lambda x: x['Timestamp'])
        return dates
Exemple #24
0
def timestamp(dt: datetime) -> int:
    return int(dt.timestamp() * 1000)
Exemple #25
0
 def to_epoch(dt: datetime) -> int:
     return int(dt.timestamp()) * 1000
 def enterabs_dt(self, dt: datetime, priority, func, args=(), kwargs=None):
     kwargs = kwargs if kwargs else {}
     self.sched.enterabs(dt.timestamp(), priority, func, args, kwargs)
Exemple #27
0
def rand_time(start: datetime = datetime.datetime.now(),
              hrs: float = 1) -> datetime:
    stime_ms = (start - datetime.timedelta(hours=hrs)).timestamp()
    etime_ms = start.timestamp()
    rtime_ms = stime_ms + random.random() * (etime_ms - stime_ms)
    return datetime.datetime.fromtimestamp(rtime_ms)