Пример #1
0
 def get_runtime(self):
     """Returns runtime in seconds from output."""
     from datetime import datetime
     start = " ".join(self.grep_output('PROGRAM STARTED AT')[-1].split()[-2:])
     end = " ".join(self.grep_output('PROGRAM ENDED AT')[-1].split()[-2:])
     startTime = datetime.fromisoformat(start)
     endTime = datetime.fromisoformat(end)
     diff = endTime - startTime
     return diff.total_seconds()
Пример #2
0
 async def _get_initial(self) \
         -> Tuple[int, Sequence[Message], Sequence[int]]:
     redis = self._redis
     prefix = self._prefix
     while True:
         await redis.watch(prefix + b':max-mod', self._abort_key)
         pipe = redis.pipeline()
         pipe.zrange(prefix + b':sequence')
         pipe.get(self._abort_key)
         uids, abort = await pipe.execute()
         MailboxAbort.assertFalse(abort)
         multi = redis.multi_exec()
         multi.get(prefix + b':max-mod')
         for uid in uids:
             msg_prefix = prefix + b':msg:' + uid
             multi.echo(uid)
             multi.smembers(msg_prefix + b':flags')
             multi.get(msg_prefix + b':time')
         try:
             results = await multi.execute()
         except MultiExecError:
             if await _check_errors(multi):
                 raise
         else:
             break
     mod_seq = int(results[0] or 0)
     updated: List[Message] = []
     for i in range(1, len(results), 3):
         msg_uid = int(results[i])
         msg_flags = {Flag(flag) for flag in results[i + 1]}
         msg_time = datetime.fromisoformat(results[i + 2].decode('ascii'))
         msg = Message(msg_uid, msg_flags, msg_time)
         updated.append(msg)
     return mod_seq, updated, []
Пример #3
0
Файл: nps.py Проект: merxbj/src
 def get(self, code, fromDateTime):
     if fromDateTime:
         result = [washcode for washcode in washcodes
                   if datetime.fromisoformat(washcode["CreatedTime"]) > fromDateTime]
     else:
         result = [washcode for washcode in washcodes if not code or washcode["Code"] == code]
     return result, 200
Пример #4
0
    def _create_pr_from_api_data(self, pr_data):
        """Create a PullRequest instance based on data from GitHub API"""
        if pr_data is None:
            return None
        pr_number = pr_data['number']
        pr_author = pr_data['user']['login']
        pr_title = pr_data['title']
        pr_state = pr_data['state']
        pr_merge_hash = pr_data['merge_commit_sha']
        ### python expects an ISO date with the Z to properly parse it, so I strip it.
        pr_merge_dttm = datetime.fromisoformat(pr_data['merged_at'].rstrip('Z')) if pr_data['merged_at'] else None
        pr_update_dttm = datetime.fromisoformat(pr_data['updated_at'].rstrip('Z')) if pr_data['updated_at'] else None
        ### PR description can be empty :S example: https://github.com/CleverRaven/Cataclysm-DDA/pull/24213
        pr_body = pr_data['body'] if pr_data['body'] else ''

        return self.pr_factory.create(pr_number, pr_title, pr_author, pr_state, pr_body,
                                      pr_merge_hash, pr_merge_dttm, pr_update_dttm)
Пример #5
0
Файл: nps.py Проект: merxbj/src
 def get(self, id, fromDateTime, active, name):
     if fromDateTime:
         result = [washkey for washkey in washkeys
                   if datetime.fromisoformat(washkey["CreatedTime"]) > fromDateTime]
     elif active:
         result = [washkey for washkey in washkeys if washkey["Active"] == active]
     elif name:
         result = [washkey for washkey in washkeys if name.lower() in washkey["Name"].lower()]
     else:
         result = [washkey for washkey in washkeys if not id or washkey["Id"] == id]
     return result, 200
def datetime_from_isoformat(value: str):
    """Return a datetime object from an isoformat string.

    Args:
        value (str): Datetime string in isoformat.

    """
    if sys.version_info >= (3, 7):
        return datetime.fromisoformat(value)

    return datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%f')
Пример #7
0
def convert_to_ts(iso_string):
	'''
	Adds trailing zeros to rounded fraction of iso8061 datetime, then create a timestamp from the formatted string
	'''

	ts, fraction = iso_string.rsplit('.', maxsplit=1)
	len_fraction = len(fraction)

	if len_fraction < 6:
		trailing_zeroes = '0' * (6-len_fraction)
		iso_string = ts + '.' + fraction + trailing_zeroes

	dt = datetime.fromisoformat(iso_string)
	ts = datetime.timestamp(dt)

	return ts
Пример #8
0
def snapshot_folder():
    """
    Use the commit date in UTC as folder name
    """
    logger.info("Snapshot folder")
    try:
        stdout = subprocess.check_output(["git", "show", "-s", "--format=%cI", "HEAD"])
    except subprocess.CalledProcessError as e:
        logger.error("Error: {}".format(e.output.decode('ascii', 'ignore').strip()))
        sys.exit(2)
    except FileNotFoundError as e:
        logger.error("Error: {}".format(e))
        sys.exit(2)
    ds = stdout.decode('ascii', 'ignore').strip()
    dt = datetime.fromisoformat(ds)
    utc = dt - dt.utcoffset()
    return utc.strftime("%Y%m%d_%H%M%S")
Пример #9
0
def parse_iso8601(val):
    # See http://bugs.python.org/issue15873
    if hasattr(datetime, 'fromisoformat'):
        return datetime.fromisoformat(val)
    try:
        from django.utils.dateparse import parse_datetime
    except ImportError:
        try:
            from iso8601 import parse_date as parse_datetime
        except ImportError:
            raise Exception('No suitable iso8601 parser found!')
    try:
        result = parse_datetime(val)
    except Exception:
        result = None
    if result is None:
        raise ValueError("Could not parse %s as iso8601 date!" % val)
    return result
Пример #10
0
    def _create_commit_from_api_data(self, commit_data):
        """Create a Commit instance based on data from GitHub API"""
        if commit_data is None:
            return None
        commit_sha = commit_data['sha']
        ### some commits have null in author.login :S like:
        ### https://api.github.com/repos/CleverRaven/Cataclysm-DDA/commits/569bef1891a843ec71654530a64d51939aabb3e2
        ### I try to use author.login when possible or fallback to "commit.author" which doesn't match
        ### with usernames in pull requests API (I guess it comes from the distinction between "name" and "username".
        ### I rather have a name that doesn't match that leave it empty.
        ### Anyways, I'm surprised but GitHub API sucks, is super inconsistent and not well thought or documented.
        if commit_data['author'] is not None:
            commit_author = commit_data['author']['login']
        else:
            commit_author = commit_data['commit']['author']['name']
        commit_message = commit_data['commit']['message'].splitlines()[0] if commit_data['commit']['message'] else ''
        commit_dttm = commit_data['commit']['committer']['date']
        commit_dttm = datetime.fromisoformat(commit_dttm.rstrip('Z')) if commit_dttm else None
        commit_parents = tuple(p['sha'] for p in commit_data['parents'])

        return self.commit_factory.create(commit_sha, commit_message, commit_dttm, commit_author, commit_parents)
Пример #11
0
 async def get(self, uid: int, cached_msg: CachedMessage = None,
               requirement: FetchRequirement = FetchRequirement.METADATA) \
         -> Optional[Message]:
     redis = self._redis
     prefix = self._prefix
     msg_prefix = prefix + b':msg:%d' % uid
     multi = redis.multi_exec()
     multi.sismember(prefix + b':uids', uid)
     multi.smembers(msg_prefix + b':flags')
     multi.get(msg_prefix + b':time')
     multi.sismember(prefix + b':recent', uid)
     if requirement & FetchRequirement.BODY:
         multi.get(msg_prefix + b':header')
         multi.get(msg_prefix + b':body')
     elif requirement & FetchRequirement.HEADERS:
         multi.get(msg_prefix + b':header')
         multi.echo(b'')
     else:
         multi.echo(b'')
         multi.echo(b'')
     multi.get(self._abort_key)
     exists, flags, time, recent, header, body, abort = \
         await multi.execute()
     MailboxAbort.assertFalse(abort)
     if not exists:
         if cached_msg is None:
             return None
         else:
             return Message(cached_msg.uid, cached_msg.permanent_flags,
                            cached_msg.internal_date, expunged=True)
     msg_flags = {Flag(flag) for flag in flags}
     msg_time = datetime.fromisoformat(time.decode('ascii'))
     msg_recent = bool(recent)
     if header:
         msg_content = MessageContent.parse_split(header, body)
         return Message(uid, msg_flags, msg_time, recent=msg_recent,
                        content=msg_content)
     else:
         return Message(uid, msg_flags, msg_time, recent=msg_recent)
Пример #12
0
def main():
    with open(sys.argv[1], "r") as f:
        conf = json.load(f)

    created = (
      datetime.now(tz=timezone.utc)
      if conf["created"] == "now"
      else datetime.fromisoformat(conf["created"])
    )
    mtime = int(created.timestamp())
    store_dir = conf["store_dir"]

    from_image = load_from_image(conf["from_image"])

    with tarfile.open(mode="w|", fileobj=sys.stdout.buffer) as tar:
        layers = []
        layers.extend(add_base_layers(tar, from_image))

        start = len(layers) + 1
        for num, store_layer in enumerate(conf["store_layers"], start=start):
            print("Creating layer", num, "from paths:", store_layer,
                  file=sys.stderr)
            info = add_layer_dir(tar, store_layer, store_dir, mtime=mtime)
            layers.append(info)

        print("Creating layer", len(layers) + 1, "with customisation...",
              file=sys.stderr)
        layers.append(
          add_customisation_layer(
            tar,
            conf["customisation_layer"],
            mtime=mtime
          )
        )

        print("Adding manifests...", file=sys.stderr)

        image_json = {
            "created": datetime.isoformat(created),
            "architecture": conf["architecture"],
            "os": "linux",
            "config": overlay_base_config(from_image, conf["config"]),
            "rootfs": {
                "diff_ids": [f"sha256:{layer.checksum}" for layer in layers],
                "type": "layers",
            },
            "history": [
                {
                  "created": datetime.isoformat(created),
                  "comment": f"store paths: {layer.paths}"
                }
                for layer in layers
            ],
        }

        image_json = json.dumps(image_json, indent=4).encode("utf-8")
        image_json_checksum = hashlib.sha256(image_json).hexdigest()
        image_json_path = f"{image_json_checksum}.json"
        add_bytes(tar, image_json_path, image_json, mtime=mtime)

        manifest_json = [
            {
                "Config": image_json_path,
                "RepoTags": [conf["repo_tag"]],
                "Layers": [layer.path for layer in layers],
            }
        ]
        manifest_json = json.dumps(manifest_json, indent=4).encode("utf-8")
        add_bytes(tar, "manifest.json", manifest_json, mtime=mtime)

        print("Done.", file=sys.stderr)
Пример #13
0
def media_scraper(results,
                  api,
                  formatted_directories,
                  username,
                  api_type,
                  parent_type=""):
    media_set = {}
    directories = []
    session = api.sessions[0]
    if api_type == "Stories":
        if "stories" in results:
            items = results["stories"]
            for item in items:
                item["text"] = results["title"]
            results = results["stories"]
    if api_type == "Archived":
        print
        pass
    if api_type == "Posts":
        print
    if api_type == "Messages":
        pass
    if not results or "error" in results:
        return media_set
    if "result" in results:
        session = results["session"]
        results = results["result"]
        if "error" in results:
            return media_set
    download_path = formatted_directories["download_directory"]
    for location in formatted_directories["locations"]:
        sorted_directories = copy.copy(location["sorted_directories"])
        master_date = "01-01-0001 00:00:00"
        media_type = location["media_type"]
        alt_media_type = location["alt_media_type"]
        file_directory_format = json_settings["file_directory_format"]
        if api_type == "Archived":
            x = file_directory_format.split(os.sep)
            for y in x:
                substr = "{api_type}"
                if substr == y:
                    new_path = os.path.join(substr, parent_type)
                    file_directory_format = file_directory_format.replace(
                        substr, new_path)
                    break
                print
            print
        seperator = " | "
        print(
            f"Scraping [{seperator.join(alt_media_type)}]. Should take less than a minute."
        )
        media_set2 = {}
        media_set2["valid"] = []
        media_set2["invalid"] = []
        for media_api in results:
            if api_type == "Messages":
                media_api["rawText"] = media_api["text"]
            if api_type == "Mass Messages":
                media_user = media_api["fromUser"]
                media_username = media_user["username"]
                if media_username != username:
                    continue
            if not media_api["media"] and "rawText" in media_api:
                if media_type == "Texts":
                    new_dict = dict()
                    new_dict["post_id"] = media_api["id"]
                    new_dict["text"] = media_api["rawText"]
                    media_set2["valid"].append(new_dict)
                    print
                print
            for media in media_api["media"]:
                date = "-001-11-30T00:00:00+00:00"
                size = 0
                link = ""
                if "source" in media:
                    source = media["source"]
                    link = source["source"]
                    size = media["info"]["preview"][
                        "size"] if "info" in media_api else 1
                    date = media_api[
                        "postedAt"] if "postedAt" in media_api else media_api[
                            "createdAt"]
                if "src" in media:
                    link = media["src"]
                    size = media["info"]["preview"][
                        "size"] if "info" in media_api else 1
                    date = media_api["createdAt"]
                if not link:
                    continue
                matches = ["us", "uk", "ca", "ca2", "de"]

                url = urlparse(link)
                subdomain = url.hostname.split('.')[0]
                preview_link = media["preview"]
                if any(subdomain in nm for nm in matches):
                    subdomain = url.hostname.split('.')[1]
                    if "upload" in subdomain:
                        continue
                    if "convert" in subdomain:
                        link = preview_link
                rules = [link == "", preview_link == ""]
                if all(rules):
                    continue
                new_dict = dict()
                new_dict["post_id"] = media_api["id"]
                new_dict["media_id"] = media["id"]
                new_dict["links"] = []
                for xlink in link, preview_link:
                    if xlink:
                        new_dict["links"].append(xlink)
                        break
                new_dict["price"] = media_api[
                    "price"] if "price" in media_api else None
                if date == "-001-11-30T00:00:00+00:00":
                    date_string = master_date
                    date_object = datetime.strptime(master_date,
                                                    "%d-%m-%Y %H:%M:%S")
                else:
                    date_object = datetime.fromisoformat(date)
                    date_string = date_object.replace(
                        tzinfo=None).strftime("%d-%m-%Y %H:%M:%S")
                    master_date = date_string

                if media["type"] not in alt_media_type:
                    continue
                if "rawText" not in media_api:
                    media_api["rawText"] = ""
                text = media_api["rawText"] if media_api["rawText"] else ""
                matches = [s for s in ignored_keywords if s in text]
                if matches:
                    print("Matches: ", matches)
                    continue
                new_dict["postedAt"] = date_string
                post_id = new_dict["post_id"]
                media_id = new_dict["media_id"]
                filename = link.rsplit('/', 1)[-1]
                filename, ext = os.path.splitext(filename)
                ext = ext.__str__().replace(".", "").split('?')[0]
                price = new_dict["price"]
                new_dict["text"] = text

                option = {}
                option = option | new_dict
                option["site_name"] = "OnlyFans"
                option["filename"] = filename
                option["api_type"] = api_type
                option["media_type"] = media_type
                option["ext"] = ext
                option["username"] = username
                option["date_format"] = date_format
                option["maximum_length"] = maximum_length
                option["directory"] = download_path

                prepared_format = prepare_reformat(option)
                file_directory = main_helper.reformat(prepared_format,
                                                      file_directory_format)
                prepared_format.directory = file_directory
                file_path = main_helper.reformat(prepared_format,
                                                 filename_format)
                new_dict["directory"] = os.path.join(file_directory)
                new_dict["filename"] = os.path.basename(file_path)
                new_dict["session"] = session
                if size == 0:
                    media_set2["invalid"].append(new_dict)
                    continue
                if file_directory not in directories:
                    directories.append(file_directory)
                media_set2["valid"].append(new_dict)
        if media_set2["valid"] or media_set2["invalid"]:
            media_set[media_type] = media_set2
        else:
            print
    media_set["directories"] = directories
    return media_set
Пример #14
0
 def mock_get_all(self) -> List[Ride]:
     return [
         Ride(0, datetime.fromisoformat('2020-01-01T23:00:00'), 192,
              timedelta(seconds=28800)),
     ]
Пример #15
0
 def from_dict(cls, d: Dict[str, Any]) -> "Command":
     attrs = {}
     for k in cls.__keys:
         attrs[k] = d[k]
     attrs["date"] = datetime.fromisoformat(attrs["date"])
     return Command(**attrs)
Пример #16
0
def _sensor_to_datetime(sensor):
    return datetime.fromisoformat(sensor.state)
Пример #17
0
def get_collection_items(collection_id=None,
                         roles=[],
                         item_id=None,
                         bbox=None,
                         time=None,
                         ids=None,
                         collections=None,
                         cubes=None,
                         intersects=None,
                         page=1,
                         limit=10,
                         query=None,
                         **kwargs):
    """Retrieve a list of collection items based on filters.

    :param collection_id: Single Collection ID to include in the search for items.
                          Only Items in one of the provided Collection will be searched, defaults to None
    :type collection_id: str, optional
    :param item_id: item identifier, defaults to None
    :type item_id: str, optional
    :param bbox: bounding box for intersection [west, north, east, south], defaults to None
    :type bbox: list, optional
    :param time: Single date+time, or a range ('/' seperator), formatted to RFC 3339, section 5.6, defaults to None
    :type time: str, optional
    :param ids: Array of Item ids to return. All other filter parameters that further restrict the
                number of search results are ignored, defaults to None
    :type ids: list, optional
    :param collections: Array of Collection IDs to include in the search for items.
                        Only Items in one of the provided Collections will be searched, defaults to None
    :type collections: list, optional
    :param cubes: Bool indicating if only cubes should be returned, defaults to None
    :type cubes: bool, optional
    :param intersects: Searches items by performing intersection between their geometry and provided GeoJSON geometry.
                       All GeoJSON geometry types must be supported., defaults to None
    :type intersects: dict, optional
    :param page: The page offset of results, defaults to 1
    :type page: int, optional
    :param limit: The maximum number of results to return (page size), defaults to 10
    :type limit: int, optional
    :return: list of collectio items
    :rtype: list
    """
    columns = [
        Collection.name.label('collection'),
        Item.name.label('item'),
        Item.start_date.label('start'),
        Item.end_date.label('end'), Item.assets,
        func.ST_AsGeoJSON(Item.geom).label('geom'),
        func.Box2D(Item.geom).label('bbox'),
        Tile.name.label('tile')
    ]

    where = [
        Collection.id == Item.collection_id, Item.tile_id == Tile.id,
        or_(Collection.is_public.is_(True),
            Collection.id.in_([int(r.split(':')[0]) for r in roles]))
    ]

    if ids is not None:
        where += [Item.id.in_(ids.split(','))]
    elif item_id is not None:
        where += [Item.id.like(item_id)]
    else:
        if collections is not None:
            where += [Collection.name.in_(collections.split(','))]
        elif collection_id is not None:
            where += [Collection.name.like(collection_id)]

        if intersects is not None:
            where += [
                func.ST_Intersects(func.ST_GeomFromGeoJSON(str(intersects)),
                                   Item.geom)
            ]

        if query:
            filters = create_query_filter(query)
            if filters:
                where += filters

        if bbox is not None:
            try:
                split_bbox = [float(x) for x in bbox.split(',')]

                where += [
                    func.ST_Intersects(
                        func.ST_MakeEnvelope(split_bbox[0], split_bbox[1],
                                             split_bbox[2], split_bbox[3],
                                             func.ST_SRID(Item.geom)),
                        Item.geom)
                ]
            except:
                raise (
                    InvalidBoundingBoxError(f"'{bbox}' is not a valid bbox.'"))

        if time is not None:
            if "/" in time:
                time_start, time_end = time.split("/")
                time_end = datetime.fromisoformat(time_end)
                where += [
                    or_(Item.end_date <= time_end, Item.start_date <= time_end)
                ]
            else:
                time_start = datetime.fromisoformat(time)
            where += [
                or_(Item.start_date >= time_start, Item.end_date >= time_start)
            ]

    query = session.query(*columns).filter(*where).order_by(
        Item.start_date.desc())

    result = query.paginate(page=int(page),
                            per_page=int(limit),
                            error_out=False,
                            max_per_page=int(BDC_STAC_MAX_LIMIT))

    return result
Пример #18
0
def top_contributors_questions(start=None,
                               end=None,
                               locale=None,
                               product=None,
                               count=10,
                               page=1):
    """Get the top Support Forum contributors."""

    search = AnswerDocument.search()

    search = (
        search.filter(
            # filter out answers by the question author
            "script",
            script=
            "doc['creator_id'].value != doc['question_creator_id'].value",
        ).filter(
            # filter answers created between `start` and `end`, or within the last 90 days
            "range",
            created={
                "gte": start or datetime.now() - timedelta(days=90),
                "lte": end
            },
        )
        # set the query size to 0 because we don't care about the results
        # we're just filtering for the aggregations defined below
        .extra(size=0))
    if locale:
        search = search.filter("term", locale=locale)
    if product:
        search = search.filter("term", question_product_id=product.id)

    # our filters above aren't perfect, and don't only return answers from contributors
    # so we need to collect more buckets than `count`, so we can hopefully find `count`
    # number of contributors within
    search.aggs.bucket(
        # create buckets for the `count * 10` most active users
        "contributions",
        A("terms", field="creator_id", size=count * 10),
    ).bucket(
        # within each of those, create a bucket for the most recent answer, and extract its date
        "latest",
        A(
            "top_hits",
            sort={"created": {
                "order": "desc"
            }},
            _source={"includes": "created"},
            size=1,
        ),
    )

    contribution_buckets = search.execute().aggregations.contributions.buckets

    if not contribution_buckets:
        return [], 0

    user_ids = [bucket.key for bucket in contribution_buckets]
    contributor_group_ids = list(
        Group.objects.filter(name__in=CONTRIBUTOR_GROUPS).values_list(
            "id", flat=True))

    # fetch all the users returned by the aggregation which are in the contributor groups
    user_hits = (ProfileDocument.search().query("terms", **{
        "_id": user_ids
    }).query("terms", group_ids=contributor_group_ids).extra(
        size=len(user_ids)).execute().hits)
    users = {hit.meta.id: hit for hit in user_hits}

    total_contributors = len(user_hits)
    top_contributors = []
    for bucket in contribution_buckets:
        if len(top_contributors) == page * count:
            # stop once we've collected enough contributors
            break
        user = users.get(bucket.key)
        if user is None:
            continue
        last_activity = datetime.fromisoformat(
            bucket.latest.hits.hits[0]._source.created)
        days_since_last_activity = (datetime.now(tz=timezone.utc) -
                                    last_activity).days
        top_contributors.append({
            "count": bucket.doc_count,
            "term": bucket.key,
            "user": {
                "id": user.meta.id,
                "username": user.username,
                "display_name": user.name,
                "avatar": getattr(getattr(user, "avatar", None), "url", None),
                "days_since_last_activity": days_since_last_activity,
            },
        })

    return top_contributors[count * (page - 1):], total_contributors
Пример #19
0
def parse_datetime(datestring: str) -> datetime:
    datestring = datestring.replace("Z", "+00:00")
    return datetime.fromisoformat(datestring)
Пример #20
0
    def test_model_creation(self):
        # Create with all available attributes using keyword args
        beverage = Beverage(
            beverage_id="This Is My #3 BeverageId",
            producer="Westbrook",
            name="Gose",
            year=2013,
            size="12 oz",
            location="Home",
            batch=1,
            bottle_date="2013-06-24",
            qty=14,
            qty_cold=6,
            style="Sour",
            specific_style="Gose",
            untappd="https://untappd.com/b/westbrook-brewing-co-gose/155824",
            aging_potential=3,
            trade_value=3,
            for_trade=True,
            note="My go-to when mowing the lawn.",
            date_added="2020-04-10",
            last_modified="2020-04-11")

        assert beverage.beverage_id == "This Is My #3 BeverageId"
        assert beverage.producer == "Westbrook"
        assert beverage.name == "Gose"
        assert beverage.year == 2013
        assert beverage.size == "12 oz"
        assert beverage.location == "Home"
        assert beverage.batch == 1
        assert beverage.bottle_date == "2013-06-24"
        assert beverage.qty == 14
        assert beverage.qty_cold == 6
        assert beverage.style == "Sour"
        assert beverage.specific_style == "Gose"
        assert beverage.untappd == "https://untappd.com/b/westbrook-brewing-co-gose/155824"
        assert beverage.aging_potential == 3
        assert beverage.trade_value == 3
        assert beverage.for_trade is True
        assert beverage.note == "My go-to when mowing the lawn."
        assert beverage.date_added == datetime.fromisoformat("2020-04-10")
        assert (beverage.last_modified - datetime.utcnow()).total_seconds() < 1

        # Same test, but created by parsing a dictionary
        beverage = Beverage(**default_beverage)

        assert beverage.beverage_id == "This Is My #4 BeverageId"
        assert beverage.producer == "Westbrook"
        assert beverage.name == "Gose"
        assert beverage.year == 2013
        assert beverage.size == "12 oz"
        assert beverage.location == "Home"
        assert beverage.batch == 1
        assert beverage.bottle_date == "2013-06-24"
        assert beverage.qty == 14
        assert beverage.qty_cold == 9
        assert beverage.style == "Sour"
        assert beverage.specific_style == "Gose"
        assert beverage.untappd == "https://untappd.com/b/westbrook-brewing-co-gose/155824"
        assert beverage.aging_potential == 3
        assert beverage.trade_value == 3
        assert beverage.for_trade is True
        assert beverage.note == "My go-to when mowing the lawn."
        assert beverage.date_added == datetime.fromisoformat("2020-04-10")
        assert (beverage.last_modified - datetime.utcnow()).total_seconds() < 1
Пример #21
0
                          value=CompletionTimes)
Excercise_table_df.insert(loc=Excercise_table_df.shape[1],
                          column="Excercise",
                          value=Excercise)
Excercise_table_df.insert(loc=Excercise_table_df.shape[1],
                          column="ExcerciseID",
                          value=ExcerciseID)

### A USER can complete multiple excercises! This is the 2nd SQL question
UserId = Excercise_table_df['id'].to_list()
UsersExcercise = []
CompletionTimePerExcercise = []
for u in range(0, len(UserId)):
    ex = random.randint(0, 4)  ### Random how many excercises a user completes
    for i in range(ex):  ###Fill the new columns
        date_time_obj = datetime.fromisoformat(CompletionTimes[u])
        date_time_obj = date_time_obj + timedelta(days=abs(
            int(random.gauss(
                3,
                7))))  #### Shift around the completion times by 3 +/- 7 days
        Excercise_table_df = Excercise_table_df.append(
            {
                'id': UserId[u],
                'Excercise_Completion': date_time_obj.isoformat(),
                'Excercise': Assignments[i],
                'ExcerciseID': i
            },
            ignore_index=True)

Excercise_table_df.to_csv("ExcerciseTable.csv")
Пример #22
0
    "name": "Gose",
    "year": 2013,
    "size": "12 oz",
    "location": "Home",
    "batch": 1,
    "bottle_date": "2013-06-24",
    "qty": 14,
    "qty_cold": 9,
    "style": "Sour",
    "specific_style": "Gose",
    "untappd": "https://untappd.com/b/westbrook-brewing-co-gose/155824",
    "aging_potential": 3,
    "trade_value": 3,
    "for_trade": True,
    "note": "My go-to when mowing the lawn.",
    "date_added": datetime.fromisoformat("2020-04-10"),
    "last_modified": datetime.fromisoformat("2020-04-11")
}


class TestBeverageModel:
    def test_required_attributes(self):
        # Must provide values for producer, beverage name, year, size, and location
        with pytest.raises(KeyError):
            beverage = Beverage()

        # Missing producer
        with pytest.raises(KeyError):
            beverage = Beverage(name="Gose",
                                year=2013,
                                size="12 oz",
Пример #23
0
def _date_format(value):
    if not value:
        return ''

    date = datetime.fromisoformat(value)
    return date_format(date, 'SHORT_DATE_FORMAT')
def customStringDateFormat(string):
    date_from_string = datetime.fromisoformat(
        string[:-1]).strftime("%d/%m/%Y %H:%M:%S")
    return date_from_string
Пример #25
0
 def setUp(self):
     self.factory = YFinSignalFactory("MSFT")
     self.start_date = datetime.fromisoformat("2019-01-01")
     self.end_date = self.start_date + timedelta(days=100)
Пример #26
0
def rfc3339_to_datetime(rfc3339_string):
    return datetime.fromisoformat(rfc3339_string)
Пример #27
0
def timeuntilstart(event):
    start = datetime.fromisoformat(
        event["start"].get("dateTime", event["start"].get("date"))
    )
    return start - datetime.now(timezone.utc)
Пример #28
0
    async def on_ready(self):
        """L'avvio del bot."""
        # commands.Bot.remove_command(self, name="help")
        # Wait until config cache is populated with stuff from db and on_connect ran
        await self.wait_for_connected()

        if self.guild is None:
            logger.error(
                "Spegnimento per via della configurazione `GUILD_ID` non valida."
            )
            return await self.logout()

        logger.line()
        logger.debug("Client pronto.")
        logger.info("Loggato come: %s", self.user)
        logger.info("ID Bot: %s", self.user.id)
        owners = ", ".join(
            getattr(self.get_user(owner_id), "name", str(owner_id))
            for owner_id in self.owner_ids)
        logger.info("Proprietari: %s", owners)
        logger.info("Prefix: %s", self.prefix)
        logger.info("Nome server: %s", self.guild.name)
        logger.info("ID Server: %s", self.guild.id)
        if self.using_multiple_server_setup:
            logger.info("Ricezione ID Server: %s", self.modmail_guild.id)
        logger.line()

        await self.threads.populate_cache()

        # closures
        closures = self.config["closures"]
        logger.info("Ci sono %d stanze che stanno per essere chiuse.",
                    len(closures))
        logger.line()

        for recipient_id, items in tuple(closures.items()):
            after = (datetime.fromisoformat(items["time"]) -
                     datetime.utcnow()).total_seconds()
            if after <= 0:
                logger.debug("Sto chiudendo la stanza per l'utente %s.",
                             recipient_id)
                after = 0
            else:
                logger.debug(
                    "La stanza dell'utente %s verrà chiusa tra %s secondi.",
                    recipient_id, after)

            thread = await self.threads.find(recipient_id=int(recipient_id))

            if not thread:
                # If the channel is deleted
                logger.debug(
                    "Non è stato possibile chiudere la stanza per l'utente %s.",
                    recipient_id)
                self.config["closures"].pop(recipient_id)
                await self.config.update()
                continue

            await thread.close(
                closer=self.get_user(items["closer_id"]),
                after=after,
                silent=items["silent"],
                delete_channel=items["delete_channel"],
                message=items["message"],
                auto_close=items.get("auto_close", False),
            )

        for log in await self.api.get_open_logs():
            if self.get_channel(int(log["channel_id"])) is None:
                logger.debug("Unable to resolve thread with channel %s.",
                             log["channel_id"])
                log_data = await self.api.post_log(
                    log["channel_id"],
                    {
                        "open": False,
                        "closed_at": str(datetime.utcnow()),
                        "close_message":
                        "Channel has been deleted, no closer found.",
                        "closer": {
                            "id": str(self.user.id),
                            "name": self.user.name,
                            "discriminator": self.user.discriminator,
                            "avatar_url": str(self.user.avatar_url),
                            "mod": True,
                        },
                    },
                )
                if log_data:
                    logger.debug(
                        "La stanza con canale %s è stata chiusa con successo.",
                        log["channel_id"])
                else:
                    logger.debug(
                        "Non è stato possibile chiudere la stanza con canale %s, salto.",
                        log["channel_id"],
                    )

        self.metadata_loop = tasks.Loop(
            self.post_metadata,
            seconds=0,
            minutes=0,
            hours=1,
            count=None,
            reconnect=True,
            loop=None,
        )
        self.metadata_loop.before_loop(self.before_post_metadata)
        self.metadata_loop.start()
Пример #29
0
def convert_time_to_ts(_time):
    _t = datetime.fromisoformat(' '.join(_time))

    return floor(_t.timestamp())
Пример #30
0
def from_iso(value: str):
    return datetime.fromisoformat(value)
Пример #31
0
def time_series_plot(c: Config, n: float):
    """Plot 24min time series of cracking, for multiple cracked bridges.

    For each bridge (hard-coded), a time series of strain fem is plotted.
    For each bridge it is initially in healthy condition, and the crack occurs
    halfway through.

    Args:
        n: float, meters in front of the crack zone where to place sensor.

    """

    # First construct one day (24 minutes) of traffic.
    total_mins = 24
    total_seconds = total_mins * 60
    traffic_scenario = normal_traffic(c=c, lam=5, min_d=2)
    traffic_sequence, traffic, traffic_array = load_traffic(
        c=c,
        traffic_scenario=traffic_scenario,
        max_time=total_seconds,
    )
    traffic_array.shape

    # Temperatures for one day.
    temps_day = temperature.from_to_mins(
        temperature.load("holly-springs"),
        datetime.fromisoformat(f"2019-07-03T00:00"),
        datetime.fromisoformat(f"2019-07-03T23:59"),
    )
    print(f"len temps = {len(temps_day['solar'])}")
    print(f"len temps = {len(temps_day['temp'])}")

    # Then generate some cracking time series.
    damages = [
        HealthyDamage(),
        transverse_crack(),
        transverse_crack(length=14.0, at_x=48.0),
    ]
    sensors = [
        Point(x=52, z=-8.4),  # Sensor in middle of lane.
        Point(x=damages[1].crack_area(c.bridge)[0] - n,
              z=-8.4),  # Sensor in front of crack zone.
        Point(x=damages[2].crack_area(c.bridge)[0] - n,
              z=-8.4),  # Sensor in front of crack zone.
    ]
    [print(f"Sensor {i} = {sensors[i]}") for i in range(len(sensors))]
    time_series = [
        crack_time_series(
            c=c,
            traffic_array=traffic_array,
            traffic_array_mins=total_mins,
            sensor=sensor,
            crack_frac=0.5,
            damage=damage,
            temps=temps_day["temp"],
            solar=temps_day["solar"],
        ) for damage, sensor in zip(damages, sensors)
    ]
    plt.portrait()
    for i, (y_trans, strain) in enumerate(time_series):
        x = np.arange(len(strain)) * c.sensor_hz / 60
        x_m = sensors[i].x
        damage_str = "Healthy Bridge"
        if i == 1:
            damage_str = "0.5 m crack zone"
        if i == 2:
            damage_str = "14 m crack zone"
        plt.subplot(len(time_series), 2, i * 2 + 1)
        plt.plot(x, y_trans * 1000, color="tab:blue")
        if i < len(time_series) - 1:
            plt.tick_params(axis="x", bottom=False, labelbottom=False)
        else:
            plt.xlabel("Hours")
        plt.title(f"At x = {x_m} m\n{damage_str}")
        plt.ylabel("Y trans. (mm)")

        plt.subplot(len(time_series), 2, i * 2 + 2)
        plt.plot(x, strain * 1e6, color="tab:orange")
        if i < len(time_series) - 1:
            plt.tick_params(axis="x", bottom=False, labelbottom=False)
        else:
            plt.xlabel("Hours")
        plt.title(f"At x = {x_m} m,\n{damage_str}")
        plt.ylabel("Microstrain XXB")
    plt.tight_layout()
    plt.savefig(c.get_image_path("crack", "time-series-q5.pdf"))
    plt.close()
Пример #32
0
from datetime import datetime

URL = 'https://hgy780tcj2.execute-api.eu-central-1.amazonaws.com/dev/data'

DUMMY_TRANSACTION = {
    'charging_end': datetime.fromisoformat("2018-05-26T17:07:07"),
    'charging_start': datetime.fromisoformat("2018-05-26T00:05:04"),
    'country_code': "US",
    'evseid': "GI*BRA*H732*06",
    'meter_value_end': 4374.142,
    'meter_value_start': 4132.568,
    'metering_signature': "",
    'partner_product_id': "",
    'provider_id': "HF-MXK",
    'session_id': "d7ef6cb2-e4a8-4159-862e-73a7fc2f3a92",
    'session_end': datetime.fromisoformat("2018-05-26T17:07:07"),
    'session_start': datetime.fromisoformat("2018-05-25T05:52:49"),
    'uid': "e881a4da78f95356e786d90b8da68787",
}

RAW_TRANSACTION = {
    'Charging end': "2018-05-26T17:07:07",
    'Charging start': "2018-05-26T00:05:04",
    'CountryCode': "US",
    'EVSEID': "GI*BRA*H732*06",
    'Meter value end': "4374.142",
    'Meter value start': "4132.568",
    'Metering signature': "",
    "Partner product ID": "false",
    "Proveider ID": "HF-MXK",
    "Session ID": "d7ef6cb2-e4a8-4159-862e-73a7fc2f3a92",
async def test_evaluate_jsonvalue_date(page):
    date = datetime.fromisoformat("2020-05-27T01:31:38.506")
    result = await page.evaluate(
        '() => ({ date: new Date("2020-05-27T01:31:38.506Z") })')
    assert result == {"date": date}
Пример #34
0
def make_action(request):
    if request.method == 'GET':
        data = request.GET.dict()
        if ('timestamp' not in data) or ('query' not in data) or ('run_id'
                                                                  not in data):
            raise SystemError("Required parameters missing")

        data['timestamp'] = datetime.fromisoformat(data['timestamp'])
        run = Run.objects.get(id=data['run_id'])
        if run.duration is not None:
            raise SystemError("This run is finished!")

        query = eval(data['query'])

        #  if timezone.make_naive(Snapshot.objects.filter(run=run).last().timestamp) > data['timestamp']:
        #    raise SystemError("You are trying to change the past!")

        for key in query:
            if key not in StupidEmulator.pairs:
                raise SystemError(f"Invalid pair {key}")

        last = Snapshot.objects.filter(run=run).last()

        try:
            now_record = get_closest(Record, data['timestamp'])
            if now_record.timestamp < Snapshot.objects.filter(
                    run=run).last().timestamp:
                raise SystemError("You are trying to change the past!")
        except ValueError:
            raise SystemError("Bad times...")

        for id in range(last.record.id, now_record.id):
            record = Record.objects.get(id=id)
            rates = {'btcusdt': record.price}
            snapshot = Snapshot.objects.create(
                run=run,
                balances=eval(last.balances),
                usd_balance=StupidEmulator.count_usdt(eval(last.balances),
                                                      rates),
                timestamp=record.timestamp,
                record=record)

        rates = {'btcusdt': now_record.price}
        delta, new_balance = StupidEmulator.make_order(eval(last.balances),
                                                       query, rates)
        snapshot = Snapshot.objects.create(
            run=run,
            balances=new_balance,
            usd_balance=StupidEmulator.count_usdt(new_balance, rates),
            timestamp=now_record.timestamp,
            record=now_record)
        action = Action.objects.create(snapshot=snapshot,
                                       query=str(query),
                                       delta=str(delta))
        result = model_to_dict(action)
        result['snapshot'] = model_to_dict(action.snapshot)
        result['snapshot']['record'] = model_to_dict(action.snapshot.record)
        return HttpResponse(json.dumps(result, cls=DjangoJSONEncoder))

    else:
        raise SystemError('Invalid method')
async def test_evaluate_roundtrip_date(page):
    date = datetime.fromisoformat("2020-05-27T01:31:38.506")
    result = await page.evaluate("date => date", date)
    assert result == date
Пример #36
0
def add_cores(cpi: str, end: str, ini: Optional[str] = None) -> None:
    """"
    Add to the database 
    """
    cpi = cpi.upper()
    dvi = fetch_group(group="ITEM", ticker=True,
                      indicator=cpi)  # all items for all dates
    dpi = fetch_group(group="ITEM", kind="PESO", ticker=True,
                      indicator=cpi)  # all item for all dates
    dg = fetch_group(group="GERAL", indicator=cpi)  #

    import pendulum
    end = pendulum.parse(end)
    if ini is None:
        ini = end
    else:
        if pendulum.parse(ini) >= pendulum.datetime(2015, 1, 1):
            ini = pendulum.parse(ini)
        else:
            ini = pendulum.datetime(2015, 1, 1)
    period = pendulum.period(ini, end)
    months = [p.format("YYYY-MM-DD") for p in period.range('months')]
    for month in months:
        dvs = fetch_all(
            kind="VARIACAO", indicator=cpi, date_ini=month,
            date_end=month)  # needs to fetch all indices for addhoc
        dps = fetch_all(
            kind="PESO", indicator=cpi, date_ini=month,
            date_end=month)  # needs to fetch all indices for addhoc
        dvsub = fetch_group(group="SUBITEM",
                            kind="VARIACAO",
                            date_ini=month,
                            date_end=month,
                            indicator=cpi,
                            ticker=True)  # needs to fetch all indices
        dpsub = fetch_group(group="SUBITEM",
                            kind="PESO",
                            date_ini=month,
                            date_end=month,
                            indicator=cpi,
                            ticker=True)  # needs to fetch all indices

        #cores
        aggr_cores = [
            p55(month, dvsub, dpsub),
            difusao(month, dvsub),
            core_ma(month, dvi, dpi),
            core_smooth(month, dvi, dpi),  #history dependent
            core_dp(month, dvi, dpi, dg),  #history dependent
            core_adhoc("EXO", dvs, dps, month),
            core_adhoc("EX1", dvs, dps, month),
            core_adhoc("EX2", dvs, dps, month),
            core_adhoc("EX3", dvs, dps, month),
            core_adhoc("MONITORADOS", dvs, dps, month),
            core_adhoc("LIVRES", dvs, dps, month),
            core_adhoc("TRADABLE", dvs, dps, month),
            core_adhoc("DURAVEIS", dvs, dps, month),
            core_adhoc("SERVICOS", dvs, dps, month),
            core_adhoc("SERVICOS_CORE", dvs, dps, month),
            core_adhoc("INDUSTRIAIS", dvs, dps, month)
        ]

        cores_tickers = [
            f"{cpi}.core_p55", f"{cpi}.core_difusao", f"{cpi}.core_aparadas",
            f"{cpi}.core_aparadas_suavizadas", f"{cpi}.core_dp",
            f"{cpi}.core_EXO", f"{cpi}.core_EX1", f"{cpi}.core_EX2",
            f"{cpi}.core_EX3", f"{cpi}.core_monitorados", f"{cpi}.core_livres",
            f"{cpi}.core_tradables", f"{cpi}.core_duraveis", f"{cpi}.servicos",
            f"{cpi}.core_servicos", f"{cpi}.core_industriais"
        ]

        df = pd.DataFrame(data=[aggr_cores],
                          columns=[c.upper() for c in cores_tickers],
                          index=[month]).dropna(axis=1)
        for col in df.columns:
            add_obs(col, dt.fromisoformat(df.index[0]), df.loc[:,
                                                               col].values[0])

    print(f"Cores for {cpi} and {month} added to the database")
Пример #37
0
def tz_constructor(loader: yaml.loader.SafeLoader, node: yaml.nodes.ScalarNode) -> datetime:
    dt_str, timezone = loader.construct_scalar(node).split(';')
    return datetime.fromisoformat(dt_str).replace(tzinfo=pytz.timezone(timezone))
Пример #38
0
def parse_iso_8601_string_to_datetime(date_string: str) -> datetime:
    return datetime.fromisoformat(date_string).astimezone(timezone.utc)
Пример #39
0
def main(args):
    configure_logging(level=getattr(logging, args.log.upper()))
    logger.debug("Reading CPS file into memory...")

    num_read = 0
    intervals = []  # Each entry is a (datetime, int) tuple
    earliest = datetime.max
    latest = datetime.min

    with args.cps_file as cps_file:
        cps_lines = csv.DictReader(cps_file, fieldnames=['dt', 'cps'])
        for cps_line in cps_lines:
            num_read += 1
            dt = datetime.fromisoformat(cps_line['dt'])

            # Filter records outside of the chosen period.
            if args.start and dt < args.start: continue
            if args.end and dt >= args.end: continue

            # We can't guarantee that the records are in order, so note the earliest and latest.
            if dt < earliest: earliest = dt
            if dt > latest: latest = dt

            cps = int(cps_line['cps'])
            intervals.append((dt, cps))

    if not intervals:
        sys.exit("No records found in the specified time period")

    logger.debug("%s entries read, %s kept, earliest: %s, latest: %s",
                 num_read, len(intervals), earliest, latest)

    # Adjust start and end date/times if they were not explicitly set.
    start = args.start if args.start else earliest
    end = args.end if args.end else latest + ONE_SECOND

    # Calculate how long our CPS and queue_time arrays need to be.
    # Each entry represents a one second duration.
    num_entries = num_seconds(end - start)
    logger.debug("CPS array contains %s entries", num_entries)

    # Create the arrays and load the CPS array from the intervals read.
    dt_array = np.arange(start, end, dtype='datetime64[s]')
    cps_array = np.zeros(num_entries, dtype=np.int32)
    queue_time = np.zeros(num_entries, dtype=np.single)

    for interval in intervals:
        index = num_seconds(interval[0] -
                            start)  # interval[0] contains a datetime
        cps_array[index] = interval[
            1]  # interval[1] is the corresponding count

    # If a CPS was specified, calculate the daily maxima.
    if args.cps:
        calculate_queue_time(cps_array, queue_time, args.cps)
        maxima = get_daily_maxima(start, queue_time)
        print_maxima(maxima, args.cps)
        plot_results(dt_array, queue_time, args.cps, start, end)

    # Otherwise prompt for CPS interactively.
    else:
        while True:
            response = input("Enter CPS value, or Q to quit: ").strip()
            if not response: continue
            if response[0].upper() == 'Q': break

            try:
                cps = int(response)
                calculate_queue_time(cps_array, queue_time, cps)
                maxima = get_daily_maxima(start, queue_time)
                print_maxima(maxima, cps)
                plot_results(dt_array, queue_time, cps, start, end)
            except ValueError:
                continue
Пример #40
0
def __convert_value_to_datetime(value):
    if type(value) == datetime:
        return value
    else:
        return datetime.fromisoformat(value)
Пример #41
0
so user should do that.
"""
import hmac
import json
import sys
import secrets
from datetime import datetime
import re

HMAC_KEY = secrets.token_bytes(32)

for l in sys.stdin:
    if re.search(r'302 GET /hub/user-redirect/(git-sync|git-pull|interact)\?',
                 l):
        if '/hub/login' not in l:
            parts = l.split(' ')
            timestamp = datetime.fromisoformat(
                f'{parts[1]}T{parts[2]}').isoformat()
            url = parts[7]
            user = parts[10][1:-1].split('@')[0]
            hashed_user = hmac.new(key=HMAC_KEY,
                                   msg=user.encode(),
                                   digestmod='sha256').hexdigest()

            print(
                json.dumps({
                    'timestamp': timestamp,
                    'url': url,
                    'username': hashed_user
                }))
Пример #42
0
 def _set_definition(self, full_defn: dict):
     self.definition = max(
         full_defn[self.history_key],
         key=lambda x: datetime.fromisoformat(x["dates"]["last"]))