Пример #1
0
def parse_legacy_time(ts, return_date=False):
    """
    The new timestrings are of the format YYYY-MM-DDThh:mm+oooo.
    They contain the timezone offset!
    
    Old legacy time strings are of format DD/MM/YY hh:mm without time zone 
    offset.
    
    This function parses string and returns the new formatted time string 
    including the timezone offset.
    :param timestring: 
    :param return_date: If set to True a date is returned instead of a string
    :return: 
    """
    from privacyidea.lib.tokenclass import DATE_FORMAT
    d = parse_date_string(ts)
    if not d.tzinfo:
        # we need to reparse the string
        d = parse_date_string(ts,
                              dayfirst=re.match("^\d\d[/\.]",ts)).replace(
                                  tzinfo=tzlocal())
    if return_date:
        return d
    else:
        return d.strftime(DATE_FORMAT)
Пример #2
0
def parse_legacy_time(ts, return_date=False):
    """
    The new timestrings are of the format YYYY-MM-DDThh:mm+oooo.
    They contain the timezone offset!
    
    Old legacy time strings are of format DD/MM/YY hh:mm without time zone 
    offset.
    
    This function parses string and returns the new formatted time string 
    including the timezone offset.
    :param timestring: 
    :param return_date: If set to True a date is returned instead of a string
    :return: 
    """
    from privacyidea.lib.tokenclass import DATE_FORMAT
    d = parse_date_string(ts)
    if not d.tzinfo:
        # we need to reparse the string
        d = parse_date_string(ts,
                              dayfirst=re.match("^\d\d[/\.]",ts)).replace(
                                  tzinfo=tzlocal())
    if return_date:
        return d
    else:
        return d.strftime(DATE_FORMAT)
Пример #3
0
def _get_date(arg):
    if isinstance(arg, datetime):
        return arg

    if arg is None:
        return None

    return parse_date_string(arg).date()
Пример #4
0
def _get_date(arg):
    if isinstance(arg, datetime):
        return arg

    if arg is None:
        return None

    return parse_date_string(arg).date()
Пример #5
0
 def variables(self, *args):
     vars = super().variables(*args)
     since_time = None
     if len(vars["url"]) >= 2:
         since_time = vars["url"][1]
         if isinstance(since_time, str):
             since_time = parse_date_string(since_time)
     vars["since"] = make_aware(since_time)
     return vars
Пример #6
0
def parse_legacy_time(ts):
    """
    The new timestrings are of the format YYYY-MM-DDThh:mm+oooo.
    They contain the timezone offset!
    
    Old legancy time strings are of format DD/MM/YY hh:mm without time zone 
    offset.
    
    This function parses string and returns the new formatted time string 
    including the timezone offset.
    :param timestring: 
    :return: 
    """
    from privacyidea.lib.tokenclass import DATE_FORMAT
    d = parse_date_string(ts)
    if not d.tzinfo:
        # we need to reparse the string
        d = parse_date_string(ts, tzinfos=tzlocal, dayfirst=True)
    return d.strftime(DATE_FORMAT)
Пример #7
0
def parse_date(date_string):
    """
    Parses a string like

      +30d
      +12h
      +10m

    and returns a datetime object that is 30 days, 12 hours or 10 minutes
    in the future.

    It can also parse fixed date_strings like
    
      23.12.2016 23:30
      23.12.2016
      2016/12/23 11:30pm
      2016/12/23
      2017-04-27T20:00+0200

    :param date_string: a string containing a date or an offset
    :return: datetime object
    """
    date_string = date_string.strip()
    if date_string == "":
        return datetime.now(tzlocal())
    if date_string.startswith("+"):
        # We are using an offset
        delta_specifier = date_string[-1].lower()
        if delta_specifier not in 'mhd':
            return datetime.now(tzlocal()) + timedelta()
        delta_amount = int(date_string[1:-1])
        if delta_specifier == "m":
            td = timedelta(minutes=delta_amount)
        elif delta_specifier == "h":
            td = timedelta(hours=delta_amount)
        else:
            # delta_specifier must be "d"
            td = timedelta(days=delta_amount)
        return datetime.now(tzlocal()) + td

    # check 2016/12/23, 23.12.2016 and including hour and minutes.
    d = None
    try:
        # We only do dayfirst, if the datestring really starts with a 01/
        # If it stars with a year 2017/... we do NOT dayfirst.
        # See https://github.com/dateutil/dateutil/issues/457
        d = parse_date_string(date_string,
                              dayfirst=re.match(r"^\d\d[/\.]", date_string))
    except ValueError:
        log.debug("Dateformat {0!s} could not be parsed".format(date_string))

    return d
Пример #8
0
def parse_date(date_string):
    """
    Parses a string like

      +30d
      +12h
      +10m

    and returns a datetime object that is 30 days, 12 hours or 10 minutes
    in the future.

    It can also parse fixed date_strings like
    
      23.12.2016 23:30
      23.12.2016
      2016/12/23 11:30pm
      2016/12/23
      2017-04-27T20:00+0200

    :param date_string: a string containing a date or an offset
    :return: datetime object
    """
    date_string = date_string.strip()
    if date_string == "":
        return datetime.now(tzlocal())
    if date_string.startswith("+"):
        # We are using an offset
        delta_specifier = date_string[-1].lower()
        if delta_specifier not in 'mhd':
            return datetime.now(tzlocal()) + timedelta()
        delta_amount = int(date_string[1:-1])
        if delta_specifier == "m":
            td = timedelta(minutes=delta_amount)
        elif delta_specifier == "h":
            td = timedelta(hours=delta_amount)
        else:
            # delta_specifier must be "d"
            td = timedelta(days=delta_amount)
        return datetime.now(tzlocal()) + td

    # check 2016/12/23, 23.12.2016 and including hour and minutes.
    d = None
    try:
        # We only do dayfirst, if the datestring really starts with a 01/
        # If it stars with a year 2017/... we do NOT dayfirst.
        # See https://github.com/dateutil/dateutil/issues/457
        d = parse_date_string(date_string,
                              dayfirst=re.match(r"^\d\d[/\.]", date_string))
    except ValueError:
        log.debug("Dateformat {0!s} could not be parsed".format(date_string))

    return d
Пример #9
0
def parse_date(date_string):
    """
    Parses a string like

      +30d
      +12h
      +10m

    and returns a datetime object that is 30 days, 12 hours or 10 minutes
    in the future.

    It can also parse fixed date_strings like
    
      23.12.2016 23:30
      23.12.2016
      2016/12/23 11:30pm
      2016/12/23
      2017-04-27T20:00+0200

    :param date_string: a string containing a date or an offset
    :return: datetime object
    """
    date_string = date_string.strip()
    if date_string == "":
        return datetime.now(tzlocal())
    if date_string.startswith("+"):
        # We are using an offset
        delta_specifier = date_string[-1].lower()
        delta_amount = int(date_string[1:-1])
        if delta_specifier == "m":
            td = timedelta(minutes=delta_amount)
        elif delta_specifier == "h":
            td = timedelta(hours=delta_amount)
        elif delta_specifier == "d":
            td = timedelta(days=delta_amount)
        else:
            td = timedelta()
        return datetime.now(tzlocal()) + td

    # check 2016/12/23, 23.12.2016 and including hour and minutes.
    d = None
    try:
        d = parse_date_string(date_string, dayfirst=True)
    except ValueError:
        log.debug("Dateformat {0!s} could not be parsed".format(date_string))

    return d
def get_newest_log_entry(local_git_path):
    log_file_content = _get_all_logfile_contents(local_git_path)

    log_table_lines = filter(lambda s: s.startswith("|"), log_file_content)
    log_table_lines = list(
        map(lambda s: s.strip("|").split("|"),
            list(log_table_lines)[2:]))

    log_entries = []
    for row in log_table_lines:
        registered = parse_date_string(row[0].strip())
        checksum = row[1].strip()
        commit_hash = row[2].strip()
        log_entries.append(UpdateLogEntry(registered, checksum, commit_hash))

    return max(log_entries,
               key=lambda e: e.registered) if log_entries else None
Пример #11
0
    def __call__(self, value):
        if self.is_empty(value):
            return self.empty_value

        if self.format:
            dt = datetime.strptime(value, self.format)
            if self.timezone:
                dt = dt.replace(tzinfo=self.timezone)
        else:
            dt = parse_date_string(value,
                                   default=datetime(1, 1, 1),
                                   tzinfos=tzinfos)

        if self.truncate_timezone:
            dt = dt.replace(tzinfo=None)

        if self.truncate_time:
            return dt.date()

        return dt
Пример #12
0
def main(birthday: str,
         books_per_month: int = 0,
         coffee_per_month: int = 0,
         male: bool = False):
    birthday = parse_date_string(birthday)
    age_delta = relativedelta.relativedelta(datetime.now(), birthday)
    age_months = (age_delta.years * 12) + age_delta.months

    render_to_year = 90

    # Each entry in p_die_in_one_year is the probability of dying at a certain age,
    # given that you've just turned that age.
    # So, p_die_in_one_year[0] is the probability you'll survive your first year of life, given that you've just been
    # born
    # Data comes from this table: https://www.ssa.gov/oact/STATS/table4c6.html
    p_die_in_one_year = []
    death_prob_per_year_file = './death-prob-men.tsv' if male else './death-prob-women.tsv'
    with open(death_prob_per_year_file, 'r') as death_probability_raw:
        for death_prob_per_year in death_probability_raw:
            p_die_in_one_year.append(float(death_prob_per_year))

    render(p_die_in_one_year, render_to_year, age_months, books_per_month,
           coffee_per_month)
Пример #13
0
def parse_non_naive_dates(datetimes: typing.Sequence[str], *args,
                          **kwargs) -> typing.Sequence[datetime]:
    """
    A datetime parser for pandas that ensures that all parsed dates and times have a time zone

    The timezone will be utc if none is given

    Args:
        datetimes: A sequence of strings to be parsed as dates

    Returns:
        A sequence of non-naive datetimes
    """
    data = list()

    for date_string in datetimes:
        date_and_time = parse_date_string(str(date_string))

        if date_and_time.tzinfo is None:
            date_and_time = date_and_time.replace(tzinfo=timezone.utc)

        data.append(date_and_time)

    return data
Пример #14
0
def statistics():
    """
    get the statistics values from the audit log

    :jsonparam days: The number of days to run the stats
    :jsonparam start: The start time to run the stats
    :jsonparam end: The end time to run the stats

    If start or end is missing, the ``days`` are used.

    The time is to be passed in the format
        yyyy-MM-ddTHH:mmZ

    **Example request**:

    .. sourcecode:: http

       GET /audit/statistics HTTP/1.1
       Host: example.com
       Accept: application/json

    **Example response**:

    .. sourcecode:: http

       HTTP/1.1 200 OK
       Content-Type: text/csv

        {
          "id": 1,
          "jsonrpc": "2.0",
          "result": {
            "status": true,
            "value": [
              {
                 "serial_plot": "...image data...",
              }
            ]
          },
          "version": "privacyIDEA unknown"
        }
    """
    days = int(getParam(request.all_data, "days", default=7))
    start = getParam(request.all_data, "start")
    if start:
        start = parse_date_string(start)

    end = getParam(request.all_data, "end")
    if end:
        end = parse_date_string(end)

    if not end and not start:
        end = datetime.datetime.now(tzlocal())
        start = end - datetime.timedelta(days=days)

    else:
        if not end:
            end = start + datetime.timedelta(days=days)
        elif not start:
            start = end - datetime.timedelta(days=days)

    stats = get_statistics(g.audit_object,
                           start_time=start, end_time=end)
    stats["time_start"] = start
    stats["time_end"] = end
    g.audit_object.log({'success': True})
    return send_result(stats)
Пример #15
0
def get_episode_list(series_soup, series):
    episode_list = []
    season = 0
    from_wikipedia = WIKIPEDIA in app.config['SHOW_DICT_WITH_NAMES'][series]['root']

    if not from_wikipedia:
        tables = series_soup.find_all('table')
    else:
        tables = series_soup.find_all('table', class_='wikiepisodetable')

    for table in tables:
        table_name = table.getText().lower()
        if 'series overview' in table_name:
            continue

        if 'season' not in table_name:
            if series.upper() not in [CONSTANTINE, FREEDOM_FIGHTERS]:
                continue

        season += 1

        if not from_wikipedia:
            table = [
                row.strip().split('\n')
                for row in table.getText().split('\n\n') if row.strip()
            ]
        else:
            table_heading = table.find(name='tr', class_=None)
            table_headings = [
                heading.getText().replace(' ', '').lower()
                for heading in table_heading.children
            ]
            episode_num_index = table_headings.index('no.inseason')
            title_index = table_headings.index('title')
            air_date_index = table_headings.index('originalairdate')

            wikipedia_row_unpacker = itemgetter(episode_num_index, title_index, air_date_index)

            table = [
                [episode_row_col.getText() for episode_row_col in wikipedia_row_unpacker(episode_row.contents)]
                for episode_row in table.find_all(class_='vevent')
            ]

        for row in table:
            # TODO: Make more robust - protects against rows that don't have enough data
            if len(row) < 2:
                continue

            if from_wikipedia:
                row[-1] = row[-1].split('(')[0].replace('\xa0', ' ').strip()

            episode_name = row[-2].replace('"', '')
            if '[' in episode_name:
                episode_name = episode_name.split('[')[0]

            episode_num = row[-3]
            try:
                date = row[-1]
                reference = re.search(r'\[\d+\]$', row[-1])
                date = date[:reference.start()] if reference else date
                row[-1] = air_date = parse_date_string(date).date()
            except ValueError:
                continue

            if air_date and 'TBA' not in row:
                episode_id = f'S{season:>02}E{episode_num:>02}'
                episode_data = {
                    'series': series,
                    'episode_id': episode_id,
                    'episode_name': episode_name,
                    'air_date': air_date,
                }
                episode_list.append(episode_data)

    return episode_list
def update_case_meta_data(project_id):
    """
    View for handling changes to datapool objects (assignments etc.)
    """

    data = request.form
    app.logger.info('---------------')
    app.logger.info(request.data)
    app.logger.info(request.form)
    app.logger.info('===============')

    return json.dumps({'success': True}), 200, {
        'ContentType': 'application/json'
    }

    image_object = request.json
    segmentation_object = image_object["manual_segmentation"]

    # Find the image and segmentation object
    image = db.session.query(Image).filter(Image.id == case_id).first()
    manual_segmentation = image.manual_segmentation

    # Update values for image
    for column in Image.__table__.columns:
        column_name = column.name
        if column_name in image_object:
            value = image_object[column_name]
            if value is None:
                continue
            if value is not None and type(column.type) == DateTime or type(
                    column.type) == Date:
                value = datetime.strptime(image_object[column_name],
                                          '%a, %d %b %Y %H:%M:%S %Z')
            setattr(image, column_name, value)

    # split type, Contrast type and modality
    modality_name = image_object["modality"]
    modality = db.session.query(Modality).filter(
        Modality.name == modality_name).filter(
            Modality.project_id == image.project_id).first()
    image.modality = modality

    contrast_type_name = image_object["contrast_type"]
    contrast_type = db.session.query(ContrastType).filter(
        ContrastType.name == contrast_type_name).filter(
            ContrastType.project_id == image.project_id).first()

    image.contrast_type = contrast_type

    split_type_name = image_object["split_type"]
    split_type = db.session.query(SplitType).filter(
        SplitType.name == split_type_name).filter(
            SplitType.project_id == image.project_id).first()

    image.split_type = split_type

    # Update values for segmentation
    for column in ManualSegmentation.__table__.columns:
        column_name = column.name
        value = segmentation_object[column_name]
        if value is not None and type(column.type) == DateTime or type(
                column.type) == Date:
            value = parse_date_string(value)

        setattr(manual_segmentation, column_name, value)

    # Append messages
    if "new_message" in segmentation_object:
        message = segmentation_object["new_message"]
        message = Message(user=current_user,
                          date=datetime.now(),
                          message=message,
                          image=image,
                          image_id=image.id)
        image.messages.append(message)

    db.session.commit()

    return json.dumps({'success': True}), 200, {
        'ContentType': 'application/json'
    }
Пример #17
0
def get_episode_list(series_soup, series):
    episode_list = []
    season = 0
    from_wikipedia = WIKIPEDIA in app.config['SHOW_DICT_WITH_NAMES'][series]['root']

    if not from_wikipedia:
        tables = series_soup.find_all('table')
    else:
        tables = series_soup.find_all('table', class_='wikiepisodetable')

    for table in tables:
        table_name = table.getText().lower()
        if 'series overview' in table_name:
            continue

        if 'season' not in table_name:
            if series.upper() not in [CONSTANTINE, FREEDOM_FIGHTERS, BATWOMAN]:
                continue

        season += 1

        if not from_wikipedia:
            table = [
                row.strip().split('\n')
                for row in table.getText().split('\n\n') if row.strip()
            ]
        else:
            table_heading = table.find(name='tr', class_=None)
            table_headings = [
                heading.getText().replace(' ', '').lower()
                for heading in table_heading.children
            ]
            episode_num_index = table_headings.index('no.inseason')
            title_index = table_headings.index('title')
            air_date_index = table_headings.index('originalairdate')

            wikipedia_row_unpacker = itemgetter(episode_num_index, title_index, air_date_index)

            table = [
                [
                    episode_row_col.getText()
                    for episode_row_col in wikipedia_row_unpacker(episode_row.contents)
                ]
                for episode_row in table.find_all(class_='vevent')
            ]

        for row in table:
            # TODO: Make more robust - protects against rows that don't have enough data
            if len(row) < 2:
                continue

            if from_wikipedia:
                row[-1] = row[-1].split('(')[0].replace('\xa0', ' ').strip()

            episode_name = row[-2].replace('"', '')
            if '[' in episode_name:
                episode_name = episode_name.split('[')[0]

            episode_num = row[-3]
            try:
                date = row[-1]
                reference = re.search(r'\[\d+\]$', row[-1])
                date = date[:reference.start()] if reference else date
                row[-1] = air_date = parse_date_string(date).date()
            except ValueError:
                continue

            if air_date and 'TBA' not in row:
                episode_id = f'S{season:>02}E{episode_num:>02}'
                episode_data = {
                    'series': series,
                    'episode_id': episode_id,
                    'episode_name': episode_name,
                    'air_date': air_date,
                }
                episode_list.append(episode_data)

    return episode_list
Пример #18
0
def statistics():
    """
    get the statistics values from the audit log

    :jsonparam days: The number of days to run the stats
    :jsonparam start: The start time to run the stats
    :jsonparam end: The end time to run the stats

    If start or end is missing, the ``days`` are used.

    The time is to be passed in the format
        yyyy-MM-ddTHH:mmZ

    **Example request**:

    .. sourcecode:: http

       GET /audit/statistics HTTP/1.1
       Host: example.com
       Accept: application/json

    **Example response**:

    .. sourcecode:: http

       HTTP/1.1 200 OK
       Content-Type: text/csv

        {
          "id": 1,
          "jsonrpc": "2.0",
          "result": {
            "status": true,
            "value": [
              {
                 "serial_plot": "...image data...",
              }
            ]
          },
          "version": "privacyIDEA unknown"
        }
    """
    days = int(getParam(request.all_data, "days", default=7))
    start = getParam(request.all_data, "start")
    if start:
        start = parse_date_string(start)

    end = getParam(request.all_data, "end")
    if end:
        end = parse_date_string(end)

    if not end and not start:
        end = datetime.datetime.now(tzlocal())
        start = end - datetime.timedelta(days=days)

    else:
        if not end:
            end = start + datetime.timedelta(days=days)
        elif not start:
            start = end - datetime.timedelta(days=days)

    stats = get_statistics(g.audit_object, start_time=start, end_time=end)
    stats["time_start"] = start
    stats["time_end"] = end
    g.audit_object.log({'success': True})
    return send_result(stats)
Пример #19
0
 def get_datetime_created_at(obj):
     try:
         return parse_date_string(obj.get('occurred_at'))
     except ValueError:
         raise ValidationError('Not a valid datetime string.')
Пример #20
0
def get_episode_list(series_soup, series):
    episode_list = []
    season = 0
    from_wikipedia = WIKIPEDIA in app.config['SHOW_DICT_WITH_NAMES'][series][
        'root']

    if not from_wikipedia:
        tables = series_soup.find_all('table')
    else:
        tables = series_soup.find_all('table', class_='wikiepisodetable')

    for table in tables:
        table_name = table.getText().lower()
        if 'series overview' in table_name:
            continue

        if 'season' not in table_name:
            if series.upper() not in [CONSTANTINE, FREEDOM_FIGHTERS, BATWOMAN]:
                continue

        season += 1

        if not from_wikipedia:
            table_text = table.getText()
            # TODO: Kill this HOTFIX
            if 'Crisis on Infinite Earths: Part Five' in table_text:
                table_text = table_text.replace(
                    '68\n\n"Crisis on Infinite Earths: Part Five"',
                    '68\n0\n"Crisis on Infinite Earths: Part Five"',
                )

            table = [
                row.strip().split('\n\n')
                for row in table.getText().split('\n\n\n') if row.strip()
            ]
        else:
            table_heading = table.find(name='tr', class_=None)
            # NOTE: The split here is a fix for a reference in the Stargirl air date header
            table_headings = [
                heading.getText().replace(' ', '').lower().split('\u200a',
                                                                 1)[0]
                for heading in table_heading.children
            ]

            episode_num_index = None
            title_index = None
            air_date_index = None
            for index, heading in enumerate(table_headings):
                if 'no.inseason' in heading:
                    episode_num_index = index
                elif 'title' in heading:
                    title_index = index
                elif 'originalairdate' in heading:
                    air_date_index = index
                elif 'originalreleasedate' in heading:
                    air_date_index = index

            wikipedia_row_unpacker = itemgetter(episode_num_index, title_index,
                                                air_date_index)

            table = [[
                episode_row_col.getText() for episode_row_col in
                wikipedia_row_unpacker(episode_row.contents)
            ] for episode_row in table.find_all(class_='vevent')]

        for row in table:
            # TODO: Make more robust - protects against rows that don't have enough data
            if len(row) <= 2:
                continue

            if from_wikipedia:
                row[-1] = row[-1].split('(')[0].replace('\xa0', ' ').strip()

            episode_name = row[-2].replace('"', '')
            if '[' in episode_name:
                episode_name = episode_name.split('[')[0]

            episode_num = row[-3]
            try:
                date = row[-1]
                reference = re.search(r'\[\d+\]$', row[-1])
                date = date[:reference.start()] if reference else date
                row[-1] = air_date = parse_date_string(date).date()
            except ValueError:
                continue

            if air_date and 'TBA' not in row:
                episode_id = f'S{season:>02}E{episode_num:>02}'
                episode_data = {
                    'series': series,
                    'episode_id': episode_id,
                    'episode_name': episode_name,
                    'air_date': f'{air_date:%Y-%m-%d}',
                }
                episode_list.append(episode_data)

    return episode_list