Example #1
0
 def mandate_period_end(e):
     start = mandate_period_start(e)
     length = e['mandate_period'].get('duration')
     if length is None:
         return None
     duration = aniso8601.parse_duration(length)
     return start + duration
Example #2
0
def parse_iec_xml(file_path):
    """Parses iec xml to Pandas DataFrame, meta on the same row wit valu and start/end time
    columns = ["position", "timestamp_start_utc", "timestamp_end_utc", "value", "business_type", "from_domain", "to_domain", "line"]"""

    tree = etree.parse(file_path)
    periods = tree.findall('.//{*}Period')

    data_list = []

    for period in periods:

        business_type = get_text(period, '../{*}businessType')
        from_domain = get_text(period, '../{*}in_Domain.mRID')
        to_domain = get_text(period, '../{*}out_Domain.mRID')
        line = get_text(period, '../{*}connectingLine_RegisteredResource.mRID')

        curve_type = get_text(period, '../{*}curveType')
        resolution = aniso8601.parse_duration(
            period.find('.//{*}resolution').text, relative=True)
        start_time = aniso8601.parse_datetime(period.find('.//{*}start').text)
        end_time = aniso8601.parse_datetime(period.find('.//{*}end').text)

        points = period.findall('.//{*}Point')

        for n, point in enumerate(points):
            position = int(eval(point.find("{*}position").text))
            value = float(eval(point.find("{*}quantity").text))
            timestamp_start = (start_time + resolution *
                               (position - 1)).replace(tzinfo=None)

            if curve_type == "A03":
                # This curvetype expect values to be valid until next change or until the end of period
                if n + 2 <= len(points):
                    next_position = int(
                        eval(points[n + 1].find("{*}position").text))
                    timestamp_end = (start_time + resolution *
                                     (next_position - 1)).replace(tzinfo=None)
                else:

                    timestamp_end = end_time.replace(tzinfo=None)

            else:
                # Else the value is on only valid during specified resolution
                timestamp_end = timestamp_start + resolution

            data_list.append((position, timestamp_start, timestamp_end, value,
                              business_type, from_domain, to_domain, line))

            #dataframe.ix[timestamp_start.replace(tzinfo=None), "DATA"] = value

    data_frame = pandas.DataFrame(data_list,
                                  columns=[
                                      "position", "timestamp_start_utc",
                                      "timestamp_end_utc", "value",
                                      "business_type", "from_domain",
                                      "to_domain", "line"
                                  ])

    #print  data_frame #DEBUG
    return data_frame
Example #3
0
 def fetch_video_details(self, video_id):
     video_result = self._retrieve_video_details(video_id,
                                                 'snippet,contentDetails')
     # LATER: add debug logging for youtube response
     if video_result == False:
         return Result(False, meta_info=None, message=video_result.message)
     elif len(video_result) == 0:
         return Result(
             False,
             meta_info=None,
             message=
             _('Invalid YouTube URL. This video does not exist or is private and can not be embedded.'
               ))
     video_details = video_result[0]
     iso8601_duration = video_details['contentDetails']['duration']
     duration = aniso8601.parse_duration(iso8601_duration)
     snippet = video_details['snippet']
     best_thumbnail = self._find_biggest_thumbnail(snippet['thumbnails'])
     meta_info = {
         'unique_id': video_details['id'],
         'duration': timedelta_to_seconds(duration),
         'display_name': snippet['title'],
         'description': snippet['description'],
         'thumbnail': {
             'width': best_thumbnail['width'],
             'height': best_thumbnail['height'],
             'url': best_thumbnail['url'],
         },
         'type': VIDEO,
     }
     return Result(True, meta_info=meta_info, message=None)
 def fetch_video_details(self, video_id):
     video_result = self._retrieve_video_details(video_id, 'snippet,contentDetails')
     # LATER: add debug logging for youtube response
     if video_result == False:
         return Result(
             False,
             meta_info=None,
             message=video_result.message
         )
     elif len(video_result) == 0:
         return Result(
             False,
             meta_info=None,
             message=_('Invalid YouTube URL. This video does not exist or is private and can not be embedded.')
         )
     video_details = video_result[0]
     iso8601_duration = video_details['contentDetails']['duration']
     duration = aniso8601.parse_duration(iso8601_duration)
     snippet = video_details['snippet']
     best_thumbnail = self._find_biggest_thumbnail(snippet['thumbnails'])
     meta_info = {
         'unique_id': video_details['id'],
         'duration': timedelta_to_seconds(duration),
         'display_name': snippet['title'],
         'description': snippet['description'],
         'thumbnail': {
             'width': best_thumbnail['width'],
             'height': best_thumbnail['height'],
             'url': best_thumbnail['url'],
         },
         'type': VIDEO,
     }
     return Result(True, meta_info=meta_info, message=None)
Example #5
0
 def __init__(self, item):
     self.id = item['id']
     self.title = item['snippet']['title']
     self.channel = item['snippet']['channelTitle']
     self.duration = parse_duration(item['contentDetails']['duration'])
     self.dimension = item['contentDetails']['dimension']
     self.views = int(item['statistics']['viewCount'])
     self.comments = int(item['statistics']['commentCount'])
     self.likes = int(item['statistics']['likeCount'])
     self.dislikes = int(item['statistics']['dislikeCount'])
Example #6
0
def uri_youtube(bot, response, matches):
    """ Extract YouTube video information. """
    video = json.loads(response.text)
    info = video['items'][0]['snippet']
    details = video['items'][0]['contentDetails']

    duration = int(aniso8601.parse_duration(details['duration']).total_seconds())
    meta = '{}:{:02}'.format(*divmod(duration, 60))

    return 'YouTube: {}'.format(info['channelTitle']), info['title'], meta
def func_time_span(xule_context, *args):
    arg = args[0]
    
    if arg.type != 'string':
        raise XuleProcessingError(_("Function 'time-span' expects a string, fount '%s'." % arg.type), xule_context)
    
    try:
        return xv.XuleValue(xule_context, parse_duration(arg.value.upper()), 'time-period')
    except:
        raise XuleProcessingError(_("Could not convert '%s' into a time-period." % arg.value), xule_context)
Example #8
0
    def check_token(self):
        """Check if token is due to expire and renew if needed"""
        utc_now = datetime.now(timezone.utc)

        if utc_now > self.auth_valid_until - aniso8601.parse_duration("PT5S"):
            self.token, self.auth_valid_until = self.request_security_token()
            logger.debug("Requesting new Auth Token")

        elif self.debug:
            logger.debug(f"Auth Token still valid for {self.auth_valid_until - utc_now}")
Example #9
0
def make_timedelta(value):
    """Tries to convert the given value to a :class:`datetime.timedelta`.

    Strings will be parsed as ISO 8601 durations.

    If a number is provided, it will be interpreted as the number of
    seconds.

    If a `dict` is provided, does `datetime.timedelta(**value)`.

    :param value: something to convert
    :type value: str | unicode | float | int | datetime.timedelta | dict
    :return: the value after conversion
    :rtype: datetime.timedelta

    """

    if isinstance(value, basestring):
        try:
            return aniso8601.parse_duration(value)
        except Exception as e:
            msg = (
                "Conversion to datetime.timedelta failed. Could not "
                "parse the given string as an ISO 8601 duration: "
                "%s\n\n"
                "%s" %
                (
                    repr(value),
                    e.message,
                )
            )
            raise ValueError(msg)

    try:
        if isinstance(value, datetime.timedelta):
            return value
        elif isinstance(value, dict):
            return datetime.timedelta(**value)
        elif isinstance(value, (float, int)):
            return datetime.timedelta(seconds=value)
        else:
            return datetime.timedelta(value)
    except Exception as e:
        msg = (
            "Could not convert the given value of type '%s' to a "
            "datetime.timedelta: %s\n\n"
            "%s" %
            (
                value.__class__.__name__,
                repr(value),
                e.message,
            )
        )
        raise TypeError(msg) if isinstance(e, TypeError) else ValueError(msg)
Example #10
0
def test_parse_duration(sutime, input_duration, tomorrow, two_pm):
    result = sutime.parse(input_duration)

    assert len(result) == 3

    assert result[0][u'type'] == u'DATE'
    assert parser.parse(result[0][u'value']).date() == tomorrow

    assert result[1][u'type'] == u'TIME'
    assert parser.parse(result[1][u'value']).time() == two_pm

    assert result[2][u'type'] == u'DURATION'
    assert aniso8601.parse_duration(result[2][u'value']) == timedelta(hours=2)
Example #11
0
def get_single_duration(id):

    request_url = "https://www.googleapis.com/youtube/v3/videos?" \
                + "part=contentDetails" \
       + "&maxResults=50" \
       + "&id=" + id \
       + "&key=" + youtube_key
    durations_request = requests.get(request_url)
    durations_data = json.loads(durations_request.text)
    duration = aniso8601.parse_duration(
        durations_data['items'][0]['contentDetails']['duration']).seconds

    return duration
Example #12
0
 def get_timedelta(self, iso8601_time_duration_string):
     """ A facade method for the iso8601.parse_duration method that reads a string,
     containing an iso8601 time duration value, and returns a datetime.timedelta object.
     :param iso8601_time_duration_string: a string containing an iso8601 time duration.
     :return: datetime.timedelta
     """
     time_delta = None
     try:
         time_delta = parse_duration(iso8601_time_duration_string)
     except Exception as ex:
         self.logger.error("Time Duration Unparseable: {td}".format(td=iso8601_time_duration_string))
         self.logger.error(ex)
     finally:
         return time_delta or timedelta(seconds=0)
Example #13
0
def func_time_span(xule_context, *args):
    arg = args[0]

    if arg.type != 'string':
        raise XuleProcessingError(
            _("Function 'time-span' expects a string, fount '%s'." % arg.type),
            xule_context)

    try:
        return xv.XuleValue(xule_context, parse_duration(arg.value.upper()),
                            'time-period')
    except:
        raise XuleProcessingError(
            _("Could not convert '%s' into a time-period." % arg.value),
            xule_context)
Example #14
0
    def parseDetails(self, entry):
        item = {}
        try:
            item['id'] = entry['id']
            item['video'] = \
                self.mythxml.getInternetContentUrl("nv_python_libs/configs/HTML/youtube.html", \
                                                   item['id'])
            item['link'] = item['video']
            snippet = entry['snippet']
            item['title'] = snippet['title']
            item['media_description'] = snippet['description']
            item['thumbnail'] = snippet['thumbnails']['high']['url']
            item['author'] = snippet['channelTitle']
            item['published_parsed'] = snippet['publishedAt']

            try:
                duration = aniso8601.parse_duration(
                    entry['contentDetails']['duration'])
                item['duration'] = duration.days * 24 * 3600 + duration.seconds
            except Exception:
                pass

            for key in item.keys():
                # Make sure there are no item elements that are None
                if item[key] is None:
                    item[key] = u''
                elif key == 'published_parsed':  # 2010-01-23T08:38:39.000Z
                    if item[key]:
                        pub_time = time.strptime(item[key].strip(),
                                                 "%Y-%m-%dT%H:%M:%S.%fZ")
                        item[key] = time.strftime('%a, %d %b %Y %H:%M:%S GMT',
                                                  pub_time)
                elif key == 'media_description' or key == 'title':
                    # Strip the HTML tags
                    if item[key]:
                        item[key] = self.massageDescription(item[key].strip())
                        item[key] = item[key].replace(u'|', u'-')
                elif type(item[key]) == type(u''):
                    if item[key]:
                        item[key] = self.common.ampReplace(item[key].replace(
                            '"\n', ' ').strip())
        except KeyError:
            pass

        return item
Example #15
0
    def test_import(self):
        #Just some tests repeated from other places to make sure the
        #imports work
        time = aniso8601.parse_time('01:23:45')
        self.assertEqual(time.hour, 1)
        self.assertEqual(time.minute, 23)
        self.assertEqual(time.second, 45)

        resultdatetime = aniso8601.parse_datetime(
            '1981-04-05T23:21:28.512400Z')
        self.assertEqual(resultdatetime.year, 1981)
        self.assertEqual(resultdatetime.month, 4)
        self.assertEqual(resultdatetime.day, 5)
        self.assertEqual(resultdatetime.hour, 23)
        self.assertEqual(resultdatetime.minute, 21)
        self.assertEqual(resultdatetime.second, 28)
        self.assertEqual(resultdatetime.microsecond, 512400)
        tzinfoobject = resultdatetime.tzinfo
        self.assertEqual(tzinfoobject.utcoffset(None),
                         datetime.timedelta(hours=0))
        self.assertEqual(tzinfoobject.tzname(None), 'UTC')

        date = aniso8601.parse_date('19810405')
        self.assertEqual(date.year, 1981)
        self.assertEqual(date.month, 4)
        self.assertEqual(date.day, 5)

        resultduration = aniso8601.parse_duration('P1Y2M3DT4H54M6S')
        self.assertEqual(resultduration.days, 428)
        self.assertEqual(resultduration.seconds, 17646)

        resultinterval = aniso8601.parse_interval(
            '1980-03-05T01:01:00/1981-04-05T01:01:00')
        self.assertEqual(
            resultinterval[0],
            datetime.datetime(year=1980, month=3, day=5, hour=1, minute=1))
        self.assertEqual(
            resultinterval[1],
            datetime.datetime(year=1981, month=4, day=5, hour=1, minute=1))

        results = list(aniso8601.parse_repeating_interval('R3/1981-04-05/P1D'))
        self.assertEqual(results[0], datetime.date(year=1981, month=4, day=5))
        self.assertEqual(results[1], datetime.date(year=1981, month=4, day=6))
        self.assertEqual(results[2], datetime.date(year=1981, month=4, day=7))
Example #16
0
def get_duration_dict(id_list):

    id_string = ','.join(id_list)

    request_url = "https://www.googleapis.com/youtube/v3/videos?" \
                + "part=contentDetails" \
       + "&maxResults=50" \
       + "&id=" + id_string \
       + "&key=" + youtube_key
    durations_request = requests.get(request_url)
    durations_data = json.loads(durations_request.text)
    durations_dict = {}
    for item in durations_data['items']:
        duration = aniso8601.parse_duration(
            item['contentDetails']['duration']).seconds
        id = item['id']
        durations_dict[id] = duration

    return durations_dict
Example #17
0
    def parseDetails(self, entry):
        item = {}
        try:
            item['id'] = entry['id']
            item['video'] = \
                self.mythxml.getInternetContentUrl("nv_python_libs/configs/HTML/youtube.html", \
                                                   item['id'])
            item['link'] = item['video']
            snippet = entry['snippet']
            item['title'] = snippet['title']
            item['media_description'] = snippet['description']
            item['thumbnail'] = snippet['thumbnails']['high']['url']
            item['author'] = snippet['channelTitle']
            item['published_parsed'] = snippet['publishedAt']

            try:
                duration = aniso8601.parse_duration(entry['contentDetails']['duration'])
                item['duration'] = duration.days * 24 * 3600 + duration.seconds
            except Exception:
                pass

            for key in item.keys():
                # Make sure there are no item elements that are None
                if item[key] == None:
                    item[key] = u''
                elif key == 'published_parsed': # 2010-01-23T08:38:39.000Z
                    if item[key]:
                        pub_time = time.strptime(item[key].strip(), "%Y-%m-%dT%H:%M:%S.%fZ")
                        item[key] = time.strftime('%a, %d %b %Y %H:%M:%S GMT', pub_time)
                elif key == 'media_description' or key == 'title':
                    # Strip the HTML tags
                    if item[key]:
                        item[key] = self.massageDescription(item[key].strip())
                        item[key] = item[key].replace(u'|', u'-')
                elif type(item[key]) == type(u''):
                    if item[key]:
                        item[key] = self.common.ampReplace(item[key].replace('"\n',' ').strip())
        except KeyError:
            pass

        return item
Example #18
0
def getVideoDetails(httpProxy,developer_key,video_ids):
	# https://developers.google.com/youtube/v3/docs/videos/list
	youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION, developerKey=developer_key, http=httpProxy)

	response = youtube.videos().list(
		id=video_ids,
		part="contentDetails,status",
		maxResults=MAXRESULTS
	).execute()
	
	videos = dict()
	
	for item in response.get("items", []):
		video = dict()
		video["duration"] = parse_duration( item["contentDetails"]["duration"] ).seconds
		video["license"] = item["status"]["license"]
		video["embed"] = item["status"]["embeddable"]
		video["uploadStatus"] = item["status"]["uploadStatus"]
		video["privacyStatus"] = item["status"]["privacyStatus"]
		videos[item["id"]] = video

	return videos
Example #19
0
    async def search_videos(self, text, limit=5):
        logger.debug("utils/youtube/search_videos: Searching", text=text, limit=limit)
        results = await self.api.search(
            key=SUPER_YOUTUBE_API_KEY, text=text, max_results=limit, order="relevance"
        )

        logger.debug("utils/youtube/search_videos: Fetching metadatas")
        metadata = await self.api.videos(
            key=SUPER_YOUTUBE_API_KEY,
            video_ids=[
                video["id"]["videoId"]
                for video in results["items"]
                if video["snippet"]["liveBroadcastContent"] == "none"
            ],
            part=["snippet", "contentDetails"],
        )

        return [
            video
            for video in metadata["items"]
            if aniso8601.parse_duration(video["contentDetails"]["duration"]).seconds
            < SUPER_MAX_YOUTUBE_LENGTH
        ][:5]
Example #20
0
    def _get_entries(self):
        playlist = self.__api.playlistItems().list(
            playlistId=self.__uploads_id,
            part="contentDetails",
            maxResults=20
        ).execute()

        videos = self.__api.videos().list(
            id=','.join(item['contentDetails']['videoId'] for item in playlist['items']),
            part='snippet,contentDetails'
        ).execute()

        ret = []
        for item in videos['items']:
            snip = item['snippet']
            duration = self.__parse_duration(aniso8601.parse_duration(item['contentDetails']['duration']).seconds)
            title = '%s [%s]' % (snip['title'], duration)

            e = FeedEntry()
            e.load_extension('dc')
            e.dc.dc_creator('none')

            e.title(title)
            e.link(href=self.__VIDEO_URL % item['id'], rel='alternate')
            e.description(title)
            e.pubdate(aniso8601.parse_datetime(snip['publishedAt']))

            content_args = {
                'image': snip['thumbnails']['high']['url'],
                'content': self.__parse_content(snip)
                # TODO: some comments i think?
                # 'comments':
            }
            e.content(self.__CONTENT % content_args, type='CDATA')
            ret.append(e)

        return ret
    def test_import(self):
        #Just some tests repeated from other places to make sure the
        #imports work
        time = aniso8601.parse_time('01:23:45')
        self.assertEqual(time.hour, 1)
        self.assertEqual(time.minute, 23)
        self.assertEqual(time.second, 45)

        resultdatetime = aniso8601.parse_datetime('1981-04-05T23:21:28.512400Z')
        self.assertEqual(resultdatetime.year, 1981)
        self.assertEqual(resultdatetime.month, 4)
        self.assertEqual(resultdatetime.day, 5)
        self.assertEqual(resultdatetime.hour, 23)
        self.assertEqual(resultdatetime.minute, 21)
        self.assertEqual(resultdatetime.second, 28)
        self.assertEqual(resultdatetime.microsecond, 512400)
        tzinfoobject = resultdatetime.tzinfo
        self.assertEqual(tzinfoobject.utcoffset(None), datetime.timedelta(hours=0))
        self.assertEqual(tzinfoobject.tzname(None), 'UTC')

        date = aniso8601.parse_date('19810405')
        self.assertEqual(date.year, 1981)
        self.assertEqual(date.month, 4)
        self.assertEqual(date.day, 5)

        resultduration = aniso8601.parse_duration('P1Y2M3DT4H54M6S')
        self.assertEqual(resultduration.days, 428)
        self.assertEqual(resultduration.seconds, 17646)

        resultinterval = aniso8601.parse_interval('1980-03-05T01:01:00/1981-04-05T01:01:00')
        self.assertEqual(resultinterval[0], datetime.datetime(year=1980, month=3, day=5, hour=1, minute=1))
        self.assertEqual(resultinterval[1], datetime.datetime(year=1981, month=4, day=5, hour=1, minute=1))

        results = list(aniso8601.parse_repeating_interval('R3/1981-04-05/P1D'))
        self.assertEqual(results[0], datetime.date(year=1981, month=4, day=5))
        self.assertEqual(results[1], datetime.date(year=1981, month=4, day=6))
        self.assertEqual(results[2], datetime.date(year=1981, month=4, day=7))
Example #22
0
def main(argv):
    SCOPE = "https://www.googleapis.com/auth/youtube.readonly"
    os.environ["OAUTHLIB_INSECURE_TRANSPORT"] = "1"
    api_service_name = "youtube"
    api_version = "v3"

    parser = argparse.ArgumentParser(
        description="yt-rss args",
        formatter_class=argparse.RawDescriptionHelpFormatter,
        parents=[tools.argparser])
    flags = parser.parse_args(argv[1:])

    storage = file.Storage('creds.dat')
    credentials = storage.get()

    yt_api_url = "https://www.googleapis.com/youtube/v3/videos?id=%s&part=contentDetails&key=%s"
    config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                               "config.json")
    with open(config_file, "r") as fp:
        config = json.load(fp)
    with open(config["datastore_file"], "r") as fp:
        datastore = json.load(fp)

    if credentials is None or credentials.invalid:
        flow = oauth2client.client.OAuth2WebServerFlow(
            client_id=config.get('client_id'),
            client_secret=config.get('client_secret'),
            scope=SCOPE,
            user_agent="yt-rss",
            oauth_displayname="yt-rss",
        )
        credentials = tools.run_flow(flow, storage, flags)
    http = httplib2.Http()
    http = credentials.authorize(http)

    youtube = discovery.build(api_service_name,
                              api_version,
                              credentials=credentials)

    items = []
    request = youtube.subscriptions().list(
        part="snippet",
        maxResults=50,
        mine=True,
    )
    while request is not None:
        response = request.execute()
        items.extend(response.get("items"))
        request = youtube.subscriptions().list_next(
            previous_request=request,
            previous_response=response,
        )

    august_13th = datetime.fromisoformat("2020-08-13T00:00:00+00:00")
    messages = []
    found = False

    # Loop through subscribed channels
    for item in items:
        channel_id = item.get("snippet").get("resourceId").get("channelId")
        url = f"https://www.youtube.com/feeds/videos.xml?channel_id={channel_id}"
        entries = feedparser.parse(url).entries
        # Loop through videos in this channel's feed
        for entry in entries:
            livestream = entry.media_statistics["views"] == "0"
            if "published" in entry.keys():
                published_date = datetime.fromisoformat(entry.published)
            elif "updated" in entry.keys():
                published_date = datetime.fromisoformat(entry.updated)
            else:
                # Couldn't find a published/updated date, skip it and hope it's got
                # this data next time
                continue
            # Skip videos we already know about or are older than 8/13/2020
            if entry.link not in datastore.keys(
            ) and published_date > august_13th:
                try:
                    duration_data = json.loads(
                        requests.get(
                            yt_api_url %
                            (entry.yt_videoid, config["api_key"])).text)
                    if "items" in duration_data.keys():
                        duration = str(
                            aniso8601.parse_duration(
                                duration_data['items'][0]['contentDetails']
                                ['duration']))
                    else:
                        duration = "Unknown Duration"
                except Exception as e:
                    duration = "Unknown Duration"
                datastore[entry.link] = {
                    "title": entry.title,
                    "date": datetime.isoformat(published_date)
                }
                print(
                    f"Found new video for channel {entry.author}: {entry.title}"
                )
                image_html = ""
                for thumbnail in entry.media_thumbnail:
                    image_html += f"""<p><img src="{thumbnail['url']}"
                    width="{thumbnail['width']} height="{thumbnail['height']}"
                    /></p>"""
                    if not image_html:
                        image_html = "NO THUMBNAIL"
                message = MIMEMultipart("alternative")
                if livestream:
                    message[
                        "Subject"] = f"{item['snippet']['title']} just announced a LIVE STREAM"
                else:
                    message[
                        "Subject"] = f"{item['snippet']['title']} just uploaded a video"
                message["From"] = f'YouTube <{config["email"]}>'
                message["To"] = config["email"]
                text = f"""\
                    {entry.title}
                    {entry.link} ({duration})"""
                html = f"""\
                    <html>
                    <body>
                        <a href="{entry.link}">{image_html}</a>
                        <p><a href="{entry.link}">{entry.title}</a>
                        ({duration})</p>
                    </body>
                   </html>
                   """
                message.attach(MIMEText(text, "plain"))
                message.attach(MIMEText(html, "html"))
                messages.append(message)
                found = True

    if found:
        with smtplib.SMTP(config["smtp_server"],
                          config["smtp_port"]) as server:
            server.ehlo()
            for message in messages:
                server.sendmail(config["email"], config["email"],
                                message.as_string())

        with open(config["datastore_file"], "w") as fp:
            json.dump(datastore, fp, indent=2)
Example #23
0
 def _set_mpd_duration(self, duration):
     """Set the duration of playback defined in the MPD."""
     self.mpd_duration = aniso8601.parse_duration(duration).seconds
Example #24
0
    if type(query_result['sm:QueryResult']['sm:part']) == str:

        print("No data")

        return

    query_result['sm:QueryResult']['sm:part'].pop(0)

    query_dataframe = pandas.DataFrame(
        query_result['sm:QueryResult']['sm:part'])

    return query_dataframe


delivery_date = get_day_start() + aniso8601.parse_duration("P0D")
base_bath = "/RSC/CGM/OUT/{}/".format(delivery_date.strftime("%Y%m%d"))

sys.path.append("C:\GIT")
import baltic_ftp
files_list = baltic_ftp.get_file_list(base_bath)

TSOs = ['ELERING', 'LITGRID', 'AST']
meta_list = []

for n, file_name in enumerate(files_list):

    print(file_name)

    if ".zip" in file_name:
Example #25
0
            'id': d,
            'key': api_key,
        }
        page = requests.request(method="get",
                                url=url_vid,
                                params=parameters_duration)
        d_results = json.loads(page.text)
        Duration.append(d_results)
    # Put together title of podcast and stats of podcasts
    #print (Duration)
    myList = []
    Data = []
    for l in range(len(title_video)):
        title = title_video[l]
        viewCount = Statistics[l]['items'][0]['statistics']['viewCount']
        commentCount = Statistics[l]['items'][0]['statistics']['commentCount']
        dislikeCount = Statistics[l]['items'][0]['statistics']['dislikeCount']
        favCount = Statistics[l]['items'][0]['statistics']['favoriteCount']
        Date = Date_video[l]
        Video_length = Duration[l]['items'][0]['contentDetails']['duration']
        parsed_Video_length = aniso8601.parse_duration(Video_length)

        #print parsed_Video_length
        cur.execute(
            '''CREATE TABLE IF NOT EXISTS EveryDayStruggle (Date_published DATE,title VARCHAR,viewCount Number,commentCount Number,dislikeCount Number, favCount Number,Video_length Number)'''
        )
        cur.execute(
            '''INSERT INTO EveryDayStruggle (Date_published,title,viewCount,commentCount,dislikeCount,favCount,Video_length) VALUES ( ?, ?,?,?,?,?,? )''',
            (Date, title, viewCount, commentCount, dislikeCount, favCount,
             Video_length))
        conn.commit()
Example #26
0
logging_data = []
logging_columns = [
    "query_id", "sceanrio_time", "query", "query_start", "query_end",
    "query_duration", "query_status"
]

loging_dataframe = pandas.DataFrame()

for config in report_configs:

    metadata_dict = {
        'pmd:timeHorizon': config["time_horizon"],
        'pmd:cgmesProfile': "SV"
    }

    period_start_time = config["reference_time"] + aniso8601.parse_duration(
        config["delta_start_time"], relative=True)

    period_end_time = config["reference_time"] + aniso8601.parse_duration(
        config["delta_end_time"], relative=True)

    print(period_start_time.isoformat(), period_end_time.isoformat())

    start_time = period_start_time

    while start_time <= period_end_time:

        metadata_dict['pmd:scenarioDate'] = start_time.isoformat()

        print(metadata_dict['pmd:scenarioDate'])

        response_start = datetime.datetime.now()
Example #27
0
def to_timedelta(amazon_duration):
    return aniso8601.parse_duration(amazon_duration)
Example #28
0
File: voice.py Project: sim1/super
 def __len__(self):
     return aniso8601.parse_duration(
         self.metadata["contentDetails"]["duration"]
     ).seconds
Example #29
0
def parse_literal(ast_node, _variables=None):
    if isinstance(ast_node, ast.StringValueNode):
        return aniso8601.parse_duration(ast_node.value)
    return graphql.INVALID
Example #30
0
def parse_duration(amzduration):
    return aniso8601.parse_duration(amzduration)
Example #31
0
    session = ftplib.FTP(server_ip, username, password)
    session.cwd(destination_path)

    with open(file_path, 'rb') as file_object:
        session.storbinary('STOR {}'.format(os.path.basename(file_path)),
                           file_object)

    session.quit()


# Lets query for all profiles and store their metadata to a dataframe

report_dataframe = pandas.DataFrame()

start_time = get_day_start(datetime.datetime.now(CET)).astimezone(
    UTC) + aniso8601.parse_duration("P1DT30M")
end_time = start_time + aniso8601.parse_duration("P1D")

print(start_time.isoformat(), end_time.isoformat())

metadata_dict = {"TSO": "AST", "timeHorizon": "1D"}

while start_time <= end_time:

    metadata_dict["scenarioDate"] = start_time.isoformat()

    print metadata_dict

    response_start = datetime.datetime.now()
    response = OPDM_SOAP_client.query_profile(metadata_dict)
    response_end = datetime.datetime.now()
Example #32
0
 def _set_mpd_duration(self, duration):
     """Set the duration of playback defined in the MPD."""
     self.mpd_duration = aniso8601.parse_duration(duration).seconds
Example #33
0
    vehicle.close()

    # Shut down simulator if it was started.
    if connection_to != "solo":
        sitl.stop()


### INIATE LOGGING ###

#Callback to log telemetery
log_list = []
telemetry_dict = {}
telemetry_dict["timestamp"] = datetime.datetime.utcnow()
flight_UUID = str(uuid4())
telemetry_dict["flight_UUID"] = flight_UUID
update_interval = aniso8601.parse_duration("PT1S")


def telemetry_callback(self, key, value):

    telemetry_dict[key] = value

    # get current time
    time_now_utc = datetime.datetime.utcnow()

    # get previous timestamp
    previous_timestamp = telemetry_dict["timestamp"]

    # Only record whole set of data when interval has passed
    if time_now_utc - previous_timestamp >= update_interval:
Example #34
0
def duration(param):
    try:
        return aniso8601.parse_duration(param)
    except ValueError as e:
        raise ValueError("Unable to parse duration {}, {}".format(
            param, e.message))
Example #35
0
def coerce(value: str):
    return aniso8601.parse_duration(value)