def pomo_Gen_Execute_Plan(types_To_Apply_Pomo, time_Table, type_Table, Other_Params, pomo_Modes):
    """

    :param time_Table: 2d List of Strings. %H:%M
                        Must start from 0:00, end at End_Of_Day_Symbol
    :param type_Table: 1d List of Strings.
    :param types_To_Apply_Pomo: 1d List of Strings. Types pomo algo apply on.

    :param Other_Params:
    :param pomo_Modes:
    :return:
    pomo_Execute_Plan: [[start, end, type]...]
    """
    pomo_Params = Other_Params['Pomo_Params']
    pomo_Execute_Plan = list()

    for [start, end], t in zip(time_Table, type_Table):
        if t in types_To_Apply_Pomo:
            start = parseTime(start)
            end = parseTime(end)

            execute_Plan = pomo_Algo(start, end, *pomo_Modes, **pomo_Params)
            # Loop: Reformat Execute Plan to [Start, End, Mode] format.
            lenPlan = len(execute_Plan)
            for i in range(lenPlan - 1):
                pomo_Execute_Plan.append([str(execute_Plan[i][0].time()),  # Start time
                                          str(execute_Plan[i + 1][0].time()),  # End time
                                          str(execute_Plan[i][1])])  # Mode
        else:
            pomo_Execute_Plan.append([start, end, t])

    return pomo_Execute_Plan
Example #2
0
    def _parse_get_bucket(self, xml_bytes):
        root = XML(xml_bytes)
        name = root.findtext("Name")
        prefix = root.findtext("Prefix")
        marker = root.findtext("Marker")
        max_keys = root.findtext("MaxKeys")
        is_truncated = root.findtext("IsTruncated")
        contents = []

        for content_data in root.findall("Contents"):
            key = content_data.findtext("Key")
            date_text = content_data.findtext("LastModified")
            modification_date = parseTime(date_text)
            etag = content_data.findtext("ETag")
            size = content_data.findtext("Size")
            storage_class = content_data.findtext("StorageClass")
            owner_id = content_data.findtext("Owner/ID")
            owner_display_name = content_data.findtext("Owner/DisplayName")
            owner = ItemOwner(owner_id, owner_display_name)
            content_item = BucketItem(key, modification_date, etag, size,
                                      storage_class, owner)
            contents.append(content_item)

        common_prefixes = []
        for prefix_data in root.findall("CommonPrefixes"):
            common_prefixes.append(prefix_data.text)

        return BucketListing(name, prefix, marker, max_keys, is_truncated,
                             contents, common_prefixes)
Example #3
0
    def _parse_get_bucket(self, response):
        status, xml_bytes = response
        root = XML(xml_bytes)
        name = root.findtext("Name")
        prefix = root.findtext("Prefix")
        marker = root.findtext("Marker")
        max_keys = root.findtext("MaxKeys")
        is_truncated = root.findtext("IsTruncated")
        contents = []

        for content_data in root.findall("Contents"):
            key = content_data.findtext("Key")
            date_text = content_data.findtext("LastModified")
            modification_date = parseTime(date_text)
            etag = content_data.findtext("ETag")
            size = content_data.findtext("Size")
            storage_class = content_data.findtext("StorageClass")
            owner_id = content_data.findtext("Owner/ID")
            owner_display_name = content_data.findtext("Owner/DisplayName")
            owner = ItemOwner(owner_id, owner_display_name)
            content_item = BucketItem(key, modification_date, etag, size,
                                      storage_class, owner)
            contents.append(content_item)

        common_prefixes = []
        for prefix_data in root.findall("CommonPrefixes"):
            common_prefixes.append(prefix_data.text)

        return BucketListing(name, prefix, marker, max_keys, is_truncated,
                             contents, common_prefixes)
Example #4
0
def parse(text):
    t = 0

    ret = {}
    cTime = None
    cData = None
    for line in text.strip().split('\n'):
        if 'Nov' in line:
            if cTime is not None:
                ret[cTime] = cData
                t=0

            cTime = int(parseTime(line).timestamp())
            cData = {}
            cHost = None
            cUsers = None
            continue

        if line.startswith('cslab'):
            if cHost is not None:
                cData[cHost] = cUsers
                t += len(cUsers)
            cHost = line
            cUsers = []
            continue

        l = re.findall(r'([a-z0-9]+)\s+(pts/\d+|tty\d+)\s+(\d{4}-\d{2}-\d{2} \d{2}:\d{2}) ?(.+)?', line)
        if len(l) != 1:
            continue

        user, term, time, src = l[0]
        time = int(parseTime(time).timestamp())

        cUsers.append({
            'user': user,
            'term': term,
            'time': time,
            'src' : src,
        })
    return ret
Example #5
0
 def _parse_list_buckets(self, xml_bytes):
     """
     Parse XML bucket list response.
     """
     root = XML(xml_bytes)
     buckets = []
     for bucket_data in root.find("Buckets"):
         name = bucket_data.findtext("Name")
         date_text = bucket_data.findtext("CreationDate")
         date_time = parseTime(date_text)
         bucket = Bucket(name, date_time)
         buckets.append(bucket)
     return buckets
Example #6
0
 def _parse_list_buckets(self, xml_bytes):
     """
     Parse XML bucket list response.
     """
     root = XML(xml_bytes)
     buckets = []
     for bucket_data in root.find("Buckets"):
         name = bucket_data.findtext("Name")
         date_text = bucket_data.findtext("CreationDate")
         date_time = parseTime(date_text)
         bucket = Bucket(name, date_time)
         buckets.append(bucket)
     return buckets
Example #7
0
def tt2str(
        tt,
        zone=None):  # TODO: instead convert to datetime and use .isoformat()?
    '''
    Formats a given struct_time and display zone as an ISO-8601 string.
    The zone may be specified as an integer representing seconds *before* UTC, 
    or as a string to be appended at the end of the time.
    If 'zone' is None, it will be omitted from the string. 
    No timezone conversions will be performed by this method.
    '''
    formatted = time.strftime(timestamp_format_string(), tt)
    if zone is None:
        tz = ''
    elif isinstance(zone, int):
        tz = '{0:+06.2f}'.format(-float(zone) / 3600).replace('.', ':')
    else:
        tz = zone
    result = formatted + tz
    try:
        parseTime(result)
    except:
        print(result)
    return result
Example #8
0
def createReleasesShields(tag='latest'):
   d = getJSON()
   from dateutil.parser import parse as parseTime
   releases = [['Date', 'Downloads']]
   if tag == 'latest':
      t = d[0]
   for x in d:
      name = x['tag_name']
      if tag == name:
         t = x
      date = parseTime( x['published_at'] ).strftime("%Y-%m-%d")
      releases.append([date, '|'+name+'/total|'])
      i = name
      for l in [False, True]:
         printShieldSrc(i+'/total', i+' Total',
            'https://img.shields.io/github/downloads/ghdl/ghdl/' + i + '/total.svg?longCache=true&style=flat-square&logo=github&label=%7F',
            'https://github.com/ghdl/ghdl/releases/' + i, l)

   out = {'releases': releases, 'assets': createTagShields(t)}
   import json
   json.dump(out, open('data.json', 'w'), indent=4)
   return out
Example #9
0
        d.addCallback(self._parse_get_bucket)
        return d

    def _parse_get_bucket(self, (response, xml_bytes)):
        root = XML(xml_bytes)
        name = root.findtext("Name")
        prefix = root.findtext("Prefix")
        marker = root.findtext("Marker")
        max_keys = root.findtext("MaxKeys")
        is_truncated = root.findtext("IsTruncated")
        contents = []

        for content_data in root.findall("Contents"):
            key = content_data.findtext("Key")
            date_text = content_data.findtext("LastModified")
            modification_date = parseTime(date_text)
            etag = content_data.findtext("ETag")
            size = content_data.findtext("Size")
            storage_class = content_data.findtext("StorageClass")
            owner_id = content_data.findtext("Owner/ID")
            owner_display_name = content_data.findtext("Owner/DisplayName")
            owner = ItemOwner(owner_id, owner_display_name)
            content_item = BucketItem(key, modification_date, etag, size,
                                      storage_class, owner)
            contents.append(content_item)

        common_prefixes = []
        for prefix_data in root.findall("CommonPrefixes"):
            common_prefixes.append(prefix_data.text)

        return BucketListing(name, prefix, marker, max_keys, is_truncated,
Example #10
0
 def time(self):
     time = self.get_meta_data("time")
     return parseTime(time)
Example #11
0
 def transformTimeIntoLocal(timeStrFromJSON: str) -> str:
     t = parseTime(timeStrFromJSON).astimezone(tz)
     tStr = t.strftime("%a, %x %X").encode("CP1252").decode(
         "CP1251")  # WTF?
     return tStr