Example #1
0
    def get_allowed_time_periods(self):
        time_periods = []

        if self.allowed_times:
            for p in self.allowed_times.split('\n'):
                if p:
                    start, end = p.split(' > ')
                    try:
                        time_periods.append(
                            period(
                                parse_date(start.strip()),
                                parse_date(end.strip()),
                            )
                        )
                    # If someone has entered garbage, dump the lot
                    except ValueError as e:
                        time_periods = []
                        break

        # If we've not overridden it, use the user-specified periods
        if not time_periods and self.available_times:
            for p in self.available_times.split(','):
                if p:
                    time_periods.append(timeslot_to_period(p.strip(), type=self.type))

        time_periods = self.fix_hard_time_limits(time_periods)
        return make_periods_contiguous(time_periods)
Example #2
0
def valid_date(key, config_date):
    """ traildb bucket folders are not zero-padded so this validation
        checks that the keys returned by the paginator are
        *really after* the config date
    """
    key_date = "/".join(key.split("/")[4:7])
    return parse_date(key_date) > parse_date(config_date)
Example #3
0
def get_json_events(url):
    f = urllib.urlopen(url)
    raw_json = f.read()
    f.close()

    events_json = json.loads(raw_json)

    events = events_json.get('events', [])
    events = [event for event in events if upcoming(event)]
    items = []
    if len(events) > 0:
        events.sort(key = lambda e: parse_date(e.get('date')))
        for event in events:
            title = event.get('title', 'Untitled event')
            start_date = parse_date(event.get('date'))
            event_date_str = make_date_range(start_date, start_date) # only handle single day events for now
            event_tags = event.get('tags', [])
            event_url = event.get('url', None)
            entry = make_link(title, event_url) + "<br>"
            entry += event_date_str
            if len(event_tags) > 0:
                entry += "<br>"
                entry += " ".join([make_tag(tag) for tag in event_tags])
            items.append(entry)
        return make_ul(items)
    else:
        return "<p>No upcoming events</p>"
Example #4
0
def get_category():
    category = request.args.get('category')
    date_from = request.args.get('date_from')
    date_to = request.args.get('date_to')
    currency = request.args.get('currency') or app.config['DEFAULT_CURRENCY']

    if not (category and date_from and date_to):
        return abort(400)

    date_from = parse_date(date_from)
    date_to = parse_date(date_to)

    payments = Payment.query.filter_by(category=category).\
                  filter(Payment.date.between(date_from, date_to)).\
                  filter(Payment.amount > 0).\
                  order_by(Payment.date.desc()).all()

    response = []
    for payment in payments:
        item = {
        'date': payment.date.strftime('%Y-%m-%d'),
        'amount': '%.2f' % payment.convert_to(currency),
        'description': payment.description
        }
        response.append(item)

    return json.dumps(response)
Example #5
0
	def add_arguments(self, parser):
		parser.add_argument('dataset', help='The id of the dataset')
		parser.add_argument('start_date', type = lambda s: parse_date(s), help='The start date')
		parser.add_argument('end_date', nargs = '?', type = lambda s: parse_date(s), default = datetime.utcnow(), help='The end date')
		parser.add_argument('--update', default = False, action='store_true', help='Update metadata even if already present in DB')
		parser.add_argument('--tags', default = [], nargs='*', help='A list of tag names to set to the metadata')
		parser.add_argument('--debug', default = False, action='store_true', help='Show debugging info')
def process_commit(c, r, metrics, stats, since, now):
    # Find the oldest of the pr commit/dates
    # TODO find commit dates
    commit_date = parse_date(max(filter(
        None, (
            c['authoredDate'],
            c['committedDate'],
            c['pushedDate']))))

    if commit_date < since:
        return

    found = False
    for status_ctx in c['status']['contexts']:
        if status_ctx['context'] == metrics.dims['Hook']:
            status_time = parse_date(status_ctx['createdAt'])
            found = True
    if found:
        tdelta = (status_time - commit_date).total_seconds()
        metrics.put_metric('RepoHookLatency', tdelta, 'Seconds')
        stats['runtime'] += tdelta
        stats['count'] += 1
    else:
        stats['missing'] += 1
        stats['missing_time'] += (now - commit_date).total_seconds()
Example #7
0
    def _make_issue(self, project_id, issue_number, json):
        d = {}
        d['number'] = issue_number
        d['title'] = json['title']
        d['body'] = json['body'].strip()
        d['creation'] = parse_date(json['created_at'])
        d['updated'] = parse_date(json['updated_at'])
        d['author'] = json['user']['login']
        d['status'] = json['state']
        d['url'] = 'https://github.com/%s/issues/%s' % (project_id, issue_number)

        if json['assignee']:
            d['assignee'] = json['assignee']['login']
        else:
            d['assignee'] = None
        if json['milestone']:
            d['version'] = json['milestone']
        else:
            d['version'] = None
        d['has_comments'] = (json['comments'] > 0)
        d['attachments'] = list(self._extract_attachments(d['body']))
        d['labels'] = json['labels']

        # TODO fetch other updates?
        return d
    def create_calendar(self, data, **kwargs):
        for row in data:
            try:
                id = int(row['service_id'])
            except ValueError:
                print 'WARNING: Could not process service id of %s' % row
                continue

            begin, end = None, None

            if row['start_date']:
                begin = parse_date(row['start_date'])

            if row['end_date']:
                end = parse_date(row['end_date'])

            defaults = {
                'begin': begin,
                'end': end
            }

            for dow in ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday']:
                defaults[dow] = str(row[dow]) == '1'

            try:
                Schedule(id=id, **defaults).save()
            except:
                print 'Schedule exists'
                continue
Example #9
0
    def setUp(self):
        super(VolumeTests, self).setUp()
        self.test_vol_1_box = BoxVolume(self.test_project_id, self.test_user_id, {
                'title': 'Test volume 1',
                'type': 'box',
                'comment': 'Comment on test volume 1',
                'min_x': -1,
                'min_y': -1,
                'min_z': -1,
                'max_x': 1,
                'max_y': 1,
                'max_z': 1
            })
        self.test_vol_1_id = self.test_vol_1_box.save()

        cursor = connection.cursor()
        cursor.execute("""
            SELECT row_to_json(v) FROM (
                SELECT id, project_id, name, comment, user_id, editor_id,
                    creation_time, edition_time, Box3D(geometry) as bbox,
                    ST_Asx3D(geometry) as geometry
                FROM catmaid_volume v
            ) v
        """)
        self.test_vol_1_data = cursor.fetchall()[0][0]
        self.test_vol_1_data['creation_time'] = parse_date(self.test_vol_1_data['creation_time'])
        self.test_vol_1_data['edition_time'] = parse_date(self.test_vol_1_data['edition_time'])
Example #10
0
def _transaction_from_tree(tree, accountdict, commoditydict):
    trn = '{http://www.gnucash.org/XML/trn}'
    cmdty = '{http://www.gnucash.org/XML/cmdty}'
    ts = '{http://www.gnucash.org/XML/ts}'
    split = '{http://www.gnucash.org/XML/split}'

    guid = tree.find(trn + "id").text
    currency_space = tree.find(trn + "currency/" +
                               cmdty + "space").text
    currency_name = tree.find(trn + "currency/" +
                               cmdty + "id").text
    currency = commoditydict[(currency_space, currency_name)]
    date = parse_date(tree.find(trn + "date-posted/" +
                                       ts + "date").text)
    date_entered = parse_date(tree.find(trn + "date-entered/" +
                                        ts + "date").text)
    description = tree.find(trn + "description").text
    slots = _slots_from_tree(tree.find(trn + "slots"))
    transaction = Transaction(guid=guid,
                              currency=currency,
                              date=date,
                              date_entered=date_entered,
                              description=description,
                              slots=slots)

    for subtree in tree.findall(trn + "splits/" + trn + "split"):
        split = _split_from_tree(subtree, accountdict, transaction)
        transaction.splits.append(split)

    return transaction
Example #11
0
def _received_in_time(record, extra_data):
    """Check if publication is not older than 24h """
    api_url = current_app.config.get('CROSSREF_API_URL')

    api_response = requests.get(api_url % get_first_doi(record))
    if api_response.status_code != 200:
        return True, ('Article is not on crossref.', ), 'Api response: %s' % api_response.text

    details_message = ""
    api_message = api_response.json()['message']

    if 'publication_info' in record and \
            record['publication_info'][0]['journal_title'] == 'Progress of Theoretical and Experimental Physics':
        parts = api_message['published-online']['date-parts'][0]
        # if we don't have month or day substitute it with 1
        if len(parts) < 3:
            parts.extend([1] * (3 - len(parts)))
            details_message += 'Month and/or day is missing, substitute it with "1".'
        # only contains day of publication, check for end of day
        api_time = datetime(*parts, hour=23, minute=59, second=59)
        time_source = '"published online" field'
    else:
        api_time = parse_date(api_message['created']['date-time'], ignoretz=True)
        time_source = 'crossref'
    received_time = parse_date(record['record_creation_date'])
    delta = received_time - api_time

    check_accepted = delta <= timedelta(hours=24)
    details_message += 'Arrived %d hours later then creation date on crossref.org.' % (delta.total_seconds() / 3600)
    debug = 'Time from %s: %s, Received time: %s' % (time_source, api_time, received_time)

    return check_accepted, (details_message, ), debug
Example #12
0
def parse_date_safely(x):
    x = str(x)
    try:
        return parse_date(x)
    except ValueError:
        # http://stackoverflow.com/questions/14595401
        return parse_date(x + '-01-01')
Example #13
0
def _slots_from_tree(tree):
    if tree is None:
        return {}
    slot = "{http://www.gnucash.org/XML/slot}"
    ts = "{http://www.gnucash.org/XML/ts}"
    slots = {}
    for elt in tree.findall("slot"):
        key = elt.find(slot + "key").text
        value = elt.find(slot + "value")
        type_ = value.get('type', 'string')
        if type_ in ('integer', 'double'):
            slots[key] = int(value.text)
        elif type_ == 'numeric':
            slots[key] = _parse_number(value.text)
        elif type_ in ('string', 'guid'):
            slots[key] = value.text
        elif type_ == 'gdate':
            slots[key] = parse_date(value.find("gdate").text)
        elif type_ == 'timespec':
            slots[key] = parse_date(value.find(ts + "date").text)
        elif type_ == 'frame':
            slots[key] = _slots_from_tree(value)
        else:
            raise RuntimeError("Unknown slot type {}".format(type_))
    return slots
Example #14
0
    def ValidTravelPlanned(self, userInfo, request):
        passFlag = True
        if len(userInfo['destination']) < 1:
            messages.warning(request, 'Destination field left blank.')
            passFlag = False
        if len(userInfo['description']) < 1:
            messages.warning(request, 'Description field left blank.')
            passFlag = False

        unicode_text_from = userInfo['travel_date_from']
        from_date = parse_date(unicode_text_from)
        if from_date.date() < date.today():
            messages.warning(request, 'Travel from date cannot be in the past.')
            passFlag = False
        unicode_text_to = userInfo['travel_date_to']
        to_date = parse_date(unicode_text_to)
        if to_date.date() < date.today():
            messages.warning(request, 'Travel to date cannot be in the past.')
            passFlag = False
        if to_date < from_date:
            messages.warning(request, 'Travel to date cannot be before travel from date.')
            passFlag = False

        if passFlag:
            logged_in = request.session['logged_in']
            travelplanner = User.objects.get(id=logged_in)
            travelmaker = User.objects.get(id=logged_in)
            travel = self.create(destination = userInfo['destination'], description = userInfo['description'], travel_date_from = userInfo['travel_date_from'], travel_date_to = userInfo['travel_date_to'], travelplanner_id = travelplanner)
        return passFlag
Example #15
0
    def __query_loads__(self, request_query):
        """ """
        # Some fields might express a date
        # We try to convert those strings to datetime
        indexes = self.getParentDatabase().getIndex().Indexes
        request_query = json.loads(request_query)
        for key, value in request_query.iteritems():
            if key in indexes:
                index = indexes[key]
                # This is lame: we should check if it quacks, not
                # if it's a duck!
                # XXX Use a more robust method to tell apart
                # date indexes from non-dates

                if isinstance(index, DateIndex):
                    # convert value(s) to date(s)
                    if isinstance(value, basestring):
                        request_query[key] = parse_date(value)
                    elif 'query' not in value:
                        # it means value is a list of date values to be used
                        # with the implicit default operator query OR
                        request_query[key] = map(parse_date, value)
                    else:
                        # it means value is a dictionary
                        if isinstance(value['query'], basestring):
                            # query got a single comparison value
                            request_query[key]['query'] = parse_date(
                                value['query'])
                        else:
                            # query got a list of comparison values
                            request_query[key]['query'] = map(
                                parse_date, value['query']
                            )

        return request_query
    def test_project_creation(
            self, app, user, public_project, private_project,
            public_url, private_url):

        #   test_project_created
        res = app.get(public_url)
        assert res.status_code == 200
        assert len(res.json['data']) == public_project.logs.count()
        assert public_project.logs.first().action == 'project_created'
        assert public_project.logs.first(
        ).action == res.json['data'][API_LATEST]['attributes']['action']

    #   test_log_create_on_public_project
        res = app.get(public_url)
        assert res.status_code == 200
        assert len(res.json['data']) == public_project.logs.count()
        assert_datetime_equal(
            parse_date(
                res.json['data'][API_FIRST]['attributes']['date']),
            public_project.logs.first().date)
        assert res.json['data'][API_FIRST]['attributes']['action'] == public_project.logs.first(
        ).action

    #   test_log_create_on_private_project
        res = app.get(private_url, auth=user.auth)
        assert res.status_code == 200
        assert len(res.json['data']) == public_project.logs.count()
        assert_datetime_equal(
            parse_date(
                res.json['data'][API_FIRST]['attributes']['date']),
            private_project.logs.first().date)
        assert res.json['data'][API_FIRST]['attributes']['action'] == private_project.logs.first(
        ).action
Example #17
0
def search(request):
	if 'q' in request.GET and request.GET['q']:
		TAG = request.GET['q']
	if 'start' in request.GET and request.GET['start']:
		START = parse_date(request.GET['start'])
		END = parse_date(request.GET['end'])
	store_data(TAG, START, END)
	return render(request, 'webapp/base.html')
Example #18
0
    def parse(self, filename):
        tree = etree.parse(filename)
        root = tree.getroot()
        for entry in root.findall("default:entry", CveXmlParser.NS):
            cve = {}
            cve["cveid"] = entry.find("vuln:cve-id", CveXmlParser.NS).text
            cve["summary"] = entry.find("vuln:summary", CveXmlParser.NS).text

            try:
                cve["cwe"] = entry.find("vuln:cwe", CveXmlParser.NS).get("id")
            except AttributeError:
                cve["cwe"] = ""
                pass

            cve["published_date"] = parse_date(entry.find("vuln:published-datetime", CveXmlParser.NS).text)
            cve["modified_date"] = parse_date(entry.find("vuln:last-modified-datetime", CveXmlParser.NS).text)

            if self.ignore_before_date and cve["modified_date"] < self.ignore_before_date:
                continue

            try:
                cvss_base = entry.find("vuln:cvss", CveXmlParser.NS).find("cvss:base_metrics", CveXmlParser.NS)
                cve["cvss_score"] = cvss_base.find("cvss:score", CveXmlParser.NS).text

                cve["cvss"] = "/".join("%s:%s" % (k,cvss_base.find("cvss:%s" % k, CveXmlParser.NS).text)
                                       for k in ["access-vector", "access-complexity",
                                                 "authentication", "confidentiality-impact",
                                                 "integrity-impact", "availability-impact"])
            except AttributeError:
                cve["cvss_score"] = "-1"
                cve["cvss"] = ""

            try:
                cpes = []
                for cpe in entry.find("vuln:vulnerable-software-list", CveXmlParser.NS).findall("vuln:product", CveXmlParser.NS):
                    cpes.append(cpe.text)
                cve["cpes"] = cpes
            except AttributeError:
                cve["cpes"] = ""



            try:
                refs = []
                for ref in entry.findall("vuln:references", CveXmlParser.NS):
                    ref_obj = {}
                    ref_obj["type"] = ref.get("reference_type")
                    ref_obj["source"] = ref.find("vuln:source", CveXmlParser.NS).text
                    ref_obj["href"] = ref.find("vuln:reference", CveXmlParser.NS).get("href")
                    ref_obj["value"] = ref.find("vuln:reference", CveXmlParser.NS).text
                    refs.append(ref_obj)
                cve["references"] = refs
            except AttributeError as e:
                cve["references"] = []

            self.cve_list.append(cve)

        return self.cve_list
Example #19
0
    def iter_threads(self):
        for thread in self.browser.get_threads():
            if not "person" in thread:
                # The account has been removed, probably because it was a
                # spammer.
                continue

            t = Thread(thread["_id"])
            t.flags = Thread.IS_DISCUSSION
            t.title = u"Discussion with %s" % thread["person"]["name"]
            contact = self.storage.get("contacts", t.id, default={"lastmsg": 0})

            birthday = parse_date(thread["person"]["birth_date"]).date()
            signature = u"Age: %d (%s)" % ((datetime.date.today() - birthday).days / 365.25, birthday)
            signature += u"\nLast ping: %s" % parse_date(thread["person"]["ping_time"]).strftime("%Y-%m-%d %H:%M:%S")
            signature += u"\nPhotos:\n\t%s" % "\n\t".join([photo["url"] for photo in thread["person"]["photos"]])
            signature += u"\n\n%s" % thread["person"]["bio"]

            t.root = Message(
                thread=t,
                id=1,
                title=t.title,
                sender=unicode(thread["person"]["name"]),
                receivers=[self.browser.my_name],
                date=parse_date(thread["created_date"]),
                content=u"Match!",
                children=[],
                signature=signature,
                flags=Message.IS_UNREAD if int(contact["lastmsg"]) < 1 else 0,
            )
            parent = t.root

            for msg in thread["messages"]:
                flags = 0
                if int(contact["lastmsg"]) < msg["timestamp"]:
                    flags = Message.IS_UNREAD

                msg = Message(
                    thread=t,
                    id=msg["timestamp"],
                    title=t.title,
                    sender=unicode(
                        self.browser.my_name if msg["from"] == self.browser.my_id else thread["person"]["name"]
                    ),
                    receivers=[
                        unicode(self.browser.my_name if msg["to"] == self.browser.my_id else thread["person"]["name"])
                    ],
                    date=parse_date(msg["sent_date"]),
                    content=unicode(msg["message"]),
                    children=[],
                    parent=parent,
                    signature=signature if msg["to"] == self.browser.my_id else u"",
                    flags=flags,
                )
                parent.children.append(msg)
                parent = msg

            yield t
Example #20
0
  def next_build_in(self):
    # returns the number of seconds until the next build or INFINITY
    # if the project well never be built (errors, paused, etc)

    if hasattr(self, 'next_build_at') and self.next_build_at is not None:
      interval = parse_date(self.next_build_at) - parse_date(self.current_date_time)
      return interval.total_seconds()
    else:
      return INFINITY
Example #21
0
 def sortbyctime(a, b):
     da = parse_date(a)
     db = parse_date(b)
     if (da > db):
     	return 1
     elif da == db:
         	return 0
     else:
         	return -1
 def process(self, row):
     _, author_names, editor_names, published, publisher, title, subtitle, url, resource_type, \
         keyword_names, abstract, review, fulltext_url, category, discussion, journal, volume, \
         number, startpage, endpage, series, edition, sourcetype = row[:23]
     if Resource.objects.filter(url=url).exists():
         logger.info('Skipping existing entry %r', title)
         return
     authors = [Person.objects.get_or_create(name=author_name.strip())[0]
                for author_name in author_names.split(',')
                if author_name.strip()]
     editors = [Person.objects.get_or_create(name=editor_name.strip())[0]
                for editor_name in editor_names.split(',')
                if editor_name.strip()]
     keywords = [Keyword.objects.get_or_create(name=keyword_name.strip())[0]
                 for keyword_name in keyword_names.split(',')
                 if keyword_name.strip()]
     categories = Category.objects.get_or_create(name=category.strip())[:1]
     if discussion.strip():
         review += '\n\nDiscussion: {}'.format(discussion)
     published = parse_date(published)
     accessed = parse_date('2015-11-03')
     resource_type = {
         'Academic Paper': models_choices.STUDY,
         'Academic Paper (Unpublished)': models_choices.STUDY,
         'Blog Post': models_choices.BLOG_ARTICLE,
         'Book': models_choices.BOOK,
         'Historical Document': models_choices.HISTORICAL_DOCUMENT,
         'Industry Publication': models_choices.RESEARCH_SUMMARY,
         'Newspaper opinion piece': models_choices.OPINION_PIECE,
         'Research Summary': models_choices.RESEARCH_SUMMARY,
         'Wikipedia Entry': models_choices.ENCYCLOPEDIA_ARTICLE,
         '': models_choices.OTHER}[resource_type]
     resource = Resource(
         published=published,
         accessed=accessed,
         publisher=publisher.strip(),
         title=title.strip(),
         subtitle=subtitle.strip(),
         url=url.strip(),
         fulltext_url=fulltext_url.strip(),
         resource_type=resource_type,
         abstract=abstract.strip(),
         review=review.strip(),
         journal=journal.strip(),
         volume=int(volume.strip()) if volume.strip() else None,
         number=int(number.strip()) if number.strip() else None,
         startpage=int(startpage.strip()) if startpage.strip() else None,
         endpage=int(endpage.strip()) if endpage.strip() else None,
         series=series.strip(),
         edition=edition.strip(),
         sourcetype=sourcetype.strip())
     resource.save()
     resource.authors = authors
     resource.editors = editors
     resource.keywords = keywords
     resource.categories = categories
     resource.save()
Example #23
0
def parse_prometheus(alert, external_url):

    status = alert.get('status', 'firing')

    labels = copy(alert['labels'])
    annotations = copy(alert['annotations'])

    starts_at = parse_date(alert['startsAt'])
    if alert['endsAt'] == '0001-01-01T00:00:00Z':
        ends_at = None
    else:
        ends_at = parse_date(alert['endsAt'])

    if status == 'firing':
        severity = labels.pop('severity', 'warning')
        create_time = starts_at
    elif status == 'resolved':
        severity = 'normal'
        create_time = ends_at
    else:
        severity = 'unknown'
        create_time = ends_at or starts_at

    summary = annotations.pop('summary', None)
    description = annotations.pop('description', None)
    text = description or summary or '%s: %s on %s' % (labels['job'], labels['alertname'], labels['instance'])

    try:
        timeout = int(labels.pop('timeout', 0)) or None
    except ValueError:
        timeout = None

    if external_url:
        annotations['externalUrl'] = external_url
    if 'generatorURL' in alert:
        annotations['moreInfo'] = '<a href="%s" target="_blank">Prometheus Graph</a>' % alert['generatorURL']

    return Alert(
        resource=labels.pop('exported_instance', None) or labels.pop('instance', 'n/a'),
        event=labels.pop('alertname'),
        environment=labels.pop('environment', 'Production'),
        severity=severity,
        correlate=labels.pop('correlate').split(',') if 'correlate' in labels else None,
        service=labels.pop('service', '').split(','),
        group=labels.pop('job', 'Prometheus'),
        value=labels.pop('value', None),
        text=text,
        attributes=annotations,
        origin='prometheus/' + labels.pop('monitor', '-'),
        event_type='prometheusAlert',
        create_time=create_time.astimezone(tz=pytz.UTC).replace(tzinfo=None),
        timeout=timeout,
        raw_data=alert,
        tags=["%s=%s" % t for t in labels.items()]  # any labels left are used for tags
    )
Example #24
0
def fetch_ticket(ticket_id, overwrite=False):
    ticket = zendesk.ticket_show(id=ticket_id)['ticket']

    assignee_id = ticket['assignee_id']

    summary = ticket['subject']
    description = ticket['description']
    url = urllib.parse.urljoin(app.config['ZENDESK_URL'],
                               'tickets/%d/' % ticket_id)

    field_ids = app.config['ZENDESK_FIELD_IDS']
    custom_fields = fields_to_dict(ticket['custom_fields'])

    start_date = parse_date(custom_fields[field_ids['start_date']]).date()
    start_time = parse_date(custom_fields[field_ids['start_time']]).time()
    end_date = parse_date(custom_fields[field_ids['end_date']]).date()
    end_time = parse_date(custom_fields[field_ids['end_time']]).time()

    start = datetime.datetime.combine(start_date, start_time)
    end = datetime.datetime.combine(end_date, end_time)

    assignee = zendesk.user_show(id=assignee_id)['user']
    timezone = friendly_to_tz(assignee.get('time_zone'))

    event = {
        'summary': summary,
        'description': description,
        'start': {
            'dateTime': start.isoformat(),
            'timeZone': timezone
        },
        'end': {
            'dateTime': end.isoformat(),
            'timeZone': timezone
        },
        'source': {
            'title': ticket_id,
            'url': url
        }
    }

    if not overwrite:
        event_id = insert_event(assignee_id, event)
    else:
        event_id = insert_event(assignee_id, event, ticket_id)

    redis.hmset('ticket:%s' % ticket_id, {
        'event_id': event_id.encode(),
        'profile_id': str(assignee_id).encode(),
    })
    redis.set('event:%s' % event_id, str(ticket_id).encode())

    return event
    def _object_changelog(self, uid, mailbox, msguid, limit=None):
        """
            Query storage for changelog events related to the given UID
        """
        # this requires a user context
        if not self.env.has_key('REQUEST_USER') or not self.env['REQUEST_USER']:
            return None

        # fetch event log from storage
        eventlog = self.storage.get_events(uid, self._resolve_mailbox_uri(mailbox), msguid, limit)

        # convert logstash entries into a sane changelog
        event_op_map = {
            'MessageNew': 'APPEND',
            'MessageAppend': 'APPEND',
            'MessageTrash': 'DELETE',
            'MessageMove': 'MOVE',
        }
        last_append_uid = 0
        result = []

        if eventlog is not None:
            for _log in eventlog:
                # filter MessageTrash following a MessageAppend event (which is an update operation)
                if _log['event'] == 'MessageTrash' and last_append_uid > int(_log['uidset']):
                    continue

                # remember last appended message uid
                if _log['event'] == 'MessageAppend' and _log.has_key('uidset'):
                    last_append_uid = int(_log['uidset'])

                # compose log entry to return
                logentry = {
                    'rev': _log.get('revision', None),
                    'op': event_op_map.get(_log['event'], 'UNKNOWN'),
                    'mailbox': self._convert_mailbox_uri(_log.get('mailbox', None))
                }
                try:
                    timestamp = parse_date(_log['timestamp_utc'])
                    logentry['date'] = timestamp.strftime("%Y-%m-%dT%H:%M:%SZ")
                except Exception, e:
                    try:
                        timestamp = parse_date(_log['timestamp'])
                        logentry['date'] = timestamp.astimezone(pytz.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
                    except Exception, e:
                        log.warning("Failed to parse timestamp %r: %r", _log['timestamp'], str(e))
                        logentry['date'] = _log['timestamp']

                logentry['user'] = self._get_user_info(_log)

                result.append(logentry)
Example #26
0
    def get_thread(self, thread):
        if not isinstance(thread, Thread):
            thread = Thread(thread)
            thread.flags = Thread.IS_DISCUSSION

        info = self.browser.get_thread(thread.id)
        for user in info['participants']:
            if user['user']['fb_id'] is not None:
                user['user']['fb'] = self.browser.get_facebook(user['user']['fb_id'])
            if user['user']['id'] == self.browser.my_id:
                me = HappnContact(user['user'])
            else:
                other = HappnContact(user['user'])

        thread.title = u'Discussion with %s' % other.name

        contact = self.storage.get('contacts', thread.id, default={'lastmsg_date': '1970-01-01T01:01:01+00:00'})

        child = None

        for msg in info['messages']:
            flags = 0
            if parse_date(contact['lastmsg_date']) < parse_date(msg['creation_date']):
                flags = Message.IS_UNREAD

            if msg['sender']['id'] == me.id:
                sender = me
                receiver = other
            else:
                sender = other
                receiver = me

            msg = Message(thread=thread,
                          id=msg['id'],
                          title=thread.title,
                          sender=sender.name,
                          receivers=[receiver.name],
                          date=parse_date(msg['creation_date']),
                          content=msg['message'],
                          children=[],
                          parent=None,
                          signature=sender.get_text(),
                          flags=flags)

            if child:
                msg.children.append(child)
                child.parent = msg
            child = msg
        thread.root = child

        return thread
Example #27
0
    def _get_dates(self):
        start_date = request.params.get('start_date')
        end_date = request.params.get('end_date')
        
        if start_date:
            start_date = parse_date(start_date).date()

        if end_date:
            end_date = parse_date(end_date).date()

        if start_date and end_date:
            if end_date < start_date:
                (start_date, end_date) = (end_date, start_date)
        return (start_date, end_date)
Example #28
0
 def get_date(txt):
     # dates in the statement don't have year on them
     # use st_date to fill in this gap, but be careful
     # when dates cross new year
     # ex: st_date = 31 Jan 2011
     # dates: 24 Dec & 24 Jan
     # naive parsing would extract them as Jan & Dec 2011
     # instead of Dec 2010 & Jan 2011
     date = parse_date(txt, default = st_date)
     if date > st_date:
         date = parse_date(
                 txt,
                 default = datetime.date(st_date.year-1, st_date.month, 1))
     return date
Example #29
0
    def parse_item(self, item):
        guid = self.get_text(item, 'guid')
        if len(guid) > 25:
            # date can be extracted from last 25 chars
            name = guid[:-25]
            created_at = guid[-25:]
        else:
            name = guid
            created_at = None
        name = name.replace('_', ' ')
        description = self.get_text(item, 'title')
        address = self.get_text(item, 'description')
        updated_at = self.get_text(item, 'updated')
        # ensure created_at and updated_at are dates
        if created_at:
            try:
                parse_date(created_at)
            except ValueError:
                created_at = None
        if updated_at:
            try:
                parse_date(updated_at)
            except ValueError:
                updated_at = None
        try:
            lat, lng = self.get_text(item, 'georss:point').split(' ')
        except IndexError:
            # detail view
            lat = self.get_text(item, 'georss:lat')
            lng = self.get_text(item, 'georss:long')

        # point object
        geometry = Point(float(lng), float(lat))

        result = {
            "name": name,
            "status": None,
            "address": address,
            "is_published": True,
            "user": None,
            "geometry": geometry,
            "elev": None,
            "description": description,
            "notes": guid,
            "added": created_at,
            "updated": updated_at,
            "data": {}
        }

        return result
Example #30
0
def is_endTime_after_startTime(startDateTime, endDateTime):
    if (startDateTime and endDateTime):
        startTime = startDateTime
        endTime = endDateTime
        
        if type(startDateTime) is types.UnicodeType:
           startTime = parse_date(startDateTime)
        if type(endDateTime) is types.UnicodeType:
           endTime = parse_date(endDateTime)
           
        startTime = startTime.replace(tzinfo=None)
        endTime = endTime.replace(tzinfo=None)
        return endTime >= startTime
    return True
Example #31
0
    def workload_status_since(self):
        """Get the time when the `workload_status` was last updated.

        """
        return parse_date(self.safe_data['workload-status']['since'])
Example #32
0
    def agent_status_since(self):
        """Get the time when the `agent_status` was last updated.

        """
        return parse_date(self.safe_data['agent-status']['since'])
Example #33
0
def change_date_format(t):
    return parse_date(t).strftime('%Y-%m-%d %H:%M:%S')
Example #34
0
 def test_bid_has_custom_values(self, time, date):
     bid_id = client.create_bid(76137, time=time, date=date)
     created_bid = client.get_bid(bid_id)
     expected = parse_date(time + " " + date,
                           dayfirst=True).astimezone(datetime.timezone.utc)
     self.assertEqual(expected, created_bid.start_time)
Example #35
0
def datapusher_hook(context, data_dict):
    ''' Update datapusher task. This action is typically called by the
    datapusher whenever the status of a job changes.

    :param metadata: metadata produced by datapuser service must have
       resource_id property.
    :type metadata: dict
    :param status: status of the job from the datapusher service
    :type status: string
    '''

    metadata, status = _get_or_bust(data_dict, ['metadata', 'status'])

    res_id = _get_or_bust(metadata, 'resource_id')

    # Pass metadata, not data_dict, as it contains the resource id needed
    # on the auth checks
    p.toolkit.check_access('datapusher_submit', context, metadata)

    task = p.toolkit.get_action('task_status_show')(context, {
        'entity_id': res_id,
        'task_type': 'datapusher',
        'key': 'datapusher'
    })

    task['state'] = status
    task['last_updated'] = str(datetime.datetime.utcnow())

    resubmit = False

    if status == 'complete':
        # Create default views for resource if necessary (only the ones that
        # require data to be in the DataStore)
        resource_dict = p.toolkit.get_action('resource_show')(
            context, {'id': res_id})

        dataset_dict = p.toolkit.get_action('package_show')(
            context, {'id': resource_dict['package_id']})

        for plugin in p.PluginImplementations(interfaces.IDataPusher):
            plugin.after_upload(context, resource_dict, dataset_dict)

        logic.get_action('resource_create_default_resource_views')(
            context,
            {
                'resource': resource_dict,
                'package': dataset_dict,
                'create_datastore_views': True,
            })

        # Check if the uploaded file has been modified in the meantime
        if (resource_dict.get('last_modified') and
                metadata.get('task_created')):
            try:
                last_modified_datetime = parse_date(
                    resource_dict['last_modified'])
                task_created_datetime = parse_date(metadata['task_created'])
                if last_modified_datetime > task_created_datetime:
                    log.debug('Uploaded file more recent: {0} > {1}'.format(
                        last_modified_datetime, task_created_datetime))
                    resubmit = True
            except ValueError:
                pass
        # Check if the URL of the file has been modified in the meantime
        elif (resource_dict.get('url') and
                metadata.get('original_url') and
                resource_dict['url'] != metadata['original_url']):
            log.debug('URLs are different: {0} != {1}'.format(
                resource_dict['url'], metadata['original_url']))
            resubmit = True

    context['ignore_auth'] = True
    p.toolkit.get_action('task_status_update')(context, task)

    if resubmit:
        log.debug('Resource {0} has been modified, '
                  'resubmitting to DataPusher'.format(res_id))
        p.toolkit.get_action('datapusher_submit')(
            context, {'resource_id': res_id})
Example #36
0
    def create_registration_report(self,
                                   before=None,
                                   since=None,
                                   report_type=None):
        """
        Create a new registration report.

        `Args:`
            before: str
                Limit to registrations that were started before this date, in
                ISO format (e.g. 2020-01-01)
            since: str
                Limit to registrations that were started since this date, in
                ISO format (e.g. 2020-01-01)
            report_type: str
                The type of report to create. If left as None, it creates the default report. The
                ``extended`` report includes additional fields. Currently only accepts ``extended``.
        `Returns:`
            int
                The ID of the created report.
        """
        report_url = f'registrant_reports.json'
        # Create the report for the new data
        report_parameters = {
            'partner_id': self.partner_id,
            'partner_API_key': self.partner_api_key,
        }

        # Declare these here so the logging doesn't error out
        since_date = before_date = None

        if report_type:
            if report_type not in VALID_REPORT_TYPES:
                raise RTVFailure(
                    f"Invalid report type. Must be one of {VALID_REPORT_TYPES}"
                )
            report_parameters["report_type"] = report_type
        if since:
            since_date = parse_date(since).strftime(DATETIME_FORMAT)
            report_parameters['since'] = since_date
        if before:
            before_date = parse_date(before).strftime(DATETIME_FORMAT)
            report_parameters['before'] = before_date

        # The report parameters get passed into the request as JSON in the body
        # of the request.
        report_str = f"{report_type} report" if report_type else "report"
        logger.info(f"Creating {report_str} for {self.partner_id} "
                    f"for dates: {since_date} to {before_date}...")
        response = self.client.request(report_url,
                                       'post',
                                       json=report_parameters)
        if response.status_code != requests.codes.ok:
            raise RTVFailure("Couldn't create RTV registrations report")

        response_json = response.json()
        # The RTV API says the response should include the report_id, but I have not found
        # that to be the case
        report_id = response_json.get('report_id')
        if report_id:
            logger.info(f"Created report with id {report_id}.")
            return report_id

        # If the response didn't include the report_id, then we will parse it out of the URL.
        status_url = response_json.get('status_url')
        url_match = STATUS_URL_PARSE_REGEX.search(status_url)
        if url_match:
            report_id = url_match.group(1)

        logger.info(f"Created report with id {report_id}.")
        return report_id
Example #37
0
def xloader_hook(context, data_dict):
    ''' Update xloader task. This action is typically called by ckanext-xloader
    whenever the status of a job changes.

    :param metadata: metadata provided when submitting job. key-value pairs.
                     Must have resource_id property.
    :type metadata: dict
    :param status: status of the job from the xloader service. Allowed values:
                   pending, running, running_but_viewable, complete, error
                   (which must all be valid values for task_status too)
    :type status: string
    :param error: Error raised during job execution
    :type error: string

    NB here are other params which are in the equivalent object in
    ckan-service-provider (from job_status):
        :param sent_data: Input data for job
        :type sent_data: json encodable data
        :param job_id: An identifier for the job
        :type job_id: string
        :param result_url: Callback url
        :type result_url: url string
        :param data: Results from job.
        :type data: json encodable data
        :param requested_timestamp: Time the job started
        :type requested_timestamp: timestamp
        :param finished_timestamp: Time the job finished
        :type finished_timestamp: timestamp

    '''

    metadata, status = _get_or_bust(data_dict, ['metadata', 'status'])

    res_id = _get_or_bust(metadata, 'resource_id')

    # Pass metadata, not data_dict, as it contains the resource id needed
    # on the auth checks
    p.toolkit.check_access('xloader_submit', context, metadata)

    task = p.toolkit.get_action('task_status_show')(context, {
        'entity_id': res_id,
        'task_type': 'xloader',
        'key': 'xloader'
    })

    task['state'] = status
    task['last_updated'] = str(datetime.datetime.utcnow())
    task['error'] = data_dict.get('error')

    resubmit = False

    if status in ('complete', 'running_but_viewable'):
        # Create default views for resource if necessary (only the ones that
        # require data to be in the DataStore)
        resource_dict = p.toolkit.get_action('resource_show')(context, {
            'id': res_id
        })

        dataset_dict = p.toolkit.get_action('package_show')(
            context, {
                'id': resource_dict['package_id']
            })

        for plugin in p.PluginImplementations(xloader_interfaces.IXloader):
            plugin.after_upload(context, resource_dict, dataset_dict)

        logic.get_action('resource_create_default_resource_views')(
            context, {
                'resource': resource_dict,
                'package': dataset_dict,
                'create_datastore_views': True,
            })

        # Check if the uploaded file has been modified in the meantime
        if (resource_dict.get('last_modified')
                and metadata.get('task_created')):
            try:
                last_modified_datetime = parse_date(
                    resource_dict['last_modified'])
                task_created_datetime = parse_date(metadata['task_created'])
                if last_modified_datetime > task_created_datetime:
                    log.debug('Uploaded file more recent: {0} > {1}'.format(
                        last_modified_datetime, task_created_datetime))
                    resubmit = True
            except ValueError:
                pass
        # Check if the URL of the file has been modified in the meantime
        elif (resource_dict.get('url') and metadata.get('original_url')
              and resource_dict['url'] != metadata['original_url']):
            log.debug('URLs are different: {0} != {1}'.format(
                resource_dict['url'], metadata['original_url']))
            resubmit = True

    context['ignore_auth'] = True
    p.toolkit.get_action('task_status_update')(context, task)

    if resubmit:
        log.debug('Resource {0} has been modified, '
                  'resubmitting to DataPusher'.format(res_id))
        p.toolkit.get_action('xloader_submit')(context, {
            'resource_id': res_id
        })
Example #38
0
# idle cutoff for user inactivity
IDLE_CUTOFF_DAYS = int(os.environ.get("IDLE_CUTOFF_DAYS") or 30)
IDLE_CUTOFF = datetime.now(timezone.utc) - timedelta(days=IDLE_CUTOFF_DAYS)
# limit the number of deletions in a given run
IDLE_DELETE_LIMIT = int(os.environ.get("IDLE_DELETE_LIMIT") or 0)

# batch variables for deletions
DELETE_BATCH_SIZE = int(os.environ.get("DELETE_BATCH_SIZE") or 1)
DELETE_BATCH_SECONDS = int(os.environ.get("DELETE_BATCH_SECONDS") or 30)

# the date before which we assume sql data doesn't need to be deleted again
# because re-running SQL delete is so slow
# this should not normally be set, but it is now while we are re-importing data from the lake to the db
SQL_CUTOFF_DATE = os.environ.get("SQL_CUTOFF_DATE")
if SQL_CUTOFF_DATE:
    SQL_CUTOFF_DATE = parse_date(SQL_CUTOFF_DATE)

# backlog date is a date cutoff so we can limit processing to
# only old data when there's been a backlog buildup
BACKLOG_DATE = os.environ.get("BACKLOG_DATE")

PERSISTENT_CHECK_DB = os.environ.get("PERSISTENT_CHECK_DB", "") == "1"

to_delete = graph.extension_attr_name("toDelete")
to_delete_date = graph.extension_attr_name("toDeleteDate")
iot_deleted_date = graph.extension_attr_name("iotDeletedDate")
sql_deleted_date = graph.extension_attr_name("sqlDeletedDate")
lake_deleted_date = graph.extension_attr_name("lakeDeletedDate")
consent_revoked = graph.extension_attr_name("consentRevoked")

Example #39
0
def step_impl(context, timestamp):
    context.cel['activation']["Now"] = celpy.celtypes.TimestampType(parse_date(timestamp))
Example #40
0
def parse_sce_csv_file(path: str, service_id: str) -> List[IntervalReading]:
    """Extract interval data readings from a CSV file downloaded from the SCE website

    This file is little unusual; there is an extended header portion, several line containing various
    metadata about the interval data that follows. We skip over that header and extract the interval
    readings. They are returned as a list of tuples of the form (datetime.datetime, float). We expect
    these readings to be 15 minute readings. The readings can either be demand or usage values, depending
    on manner in which the CSV file is downloaded. This function makes no assumption about the units
    on these readings.

    A given file can contain reading for multiple service ids. This is implemented by having one column per
    service ID. This function takes an argument specifying which service id column to fetch data for. If the
    service ID cannot be found, an IntervalDataParseException is thrown. Similarly, any errors that occur
    while parsing will be raised as IntervalDataParseException instances.

    Arguments:
        path: The path to the CSV file on the file system
        service_id: The service id of interest

    Returns:
        A list of interval data readings, formatted as 2-tuples, of the for (datetime.datetime, float). The first
        tuple element stores the time when the reading occurred, the second the interval data reading, as a float.

    Raises:
        IntervalDataParseException: If the desired service ID can't be found, or an error occurs while parsing.
    """

    # Read lines until we find the interval data header line (starts with the string "Date",
    # then store the raw data into data_lines
    data_started = False
    data_lines = []
    with open(path) as f:
        for line in f:
            stripped_line = line.strip()
            if not data_started:
                if stripped_line.startswith("Date"):
                    data_started = True
                    data_lines.append(stripped_line)
            else:
                data_lines.append(stripped_line)

    # Parse each reading row
    csv_reader = csv.reader(data_lines)
    first = True
    data_column = None
    results = []
    for row in csv_reader:
        if first:
            first = False
            headers = [th.strip() for th in row]
            for idx, th in enumerate(headers):
                if th == service_id:
                    data_column = idx
            if not data_column:
                raise IntervalDataParseException(
                    "Could not find data for SAID={}".format(service_id))
        else:
            try:
                reading_date = parse_date(row[0].strip()).date()
                reading_time = parse_date(row[1].strip()).time()
                reading_datetime = datetime.combine(reading_date, reading_time)
                reading_value = _to_float(row[data_column].strip())
                results.append(
                    IntervalReading(dt=reading_datetime, value=reading_value))
            except Exception as e:
                msg = "An error occured while trying to parse interval data from the SCE website."
                raise IntervalDataParseException(msg) from e
    return results
Example #41
0
def datetime_to_date_string(datetime_obj):
    if isinstance(datetime_obj, str):
        datetime_obj = parse_date(datetime_obj)
    return "%s/%s/%s" % (datetime_obj.month, datetime_obj.day,
                         datetime_obj.year)
Example #42
0
    def compute_features(self, threads, stemmed_vocabulary, distrib_matrix):
        self.log(
            'Computing features. Please, wait. This will take some serious time...',
            logging.INFO)
        for thread in threads:
            self.log(
                'Computing features for thread id {0}'.format(
                    thread['question_uid']), logging.INFO)
            try:
                base_date = parse_date(thread['date_time'])
            except ValueError:
                base_date = parse_date('1970-01-01')
            except AttributeError:
                base_date = thread['date_time']
            answers = thread['answers']
            try:
                tag_list = thread['tags'].split('.')
            except AttributeError:
                tag_list = thread[
                    'tags']  # there is no '.' used as tag separator
            if '' in tag_list:
                tag_list.remove('')
            for answer in answers:
                # compute thread tags
                answer_tags = answer['tags'].split()
                if 'null' in answer_tags:
                    answer_tags.remove('null')
                tag_list.extend(answer_tags)
                thread['tags'] = sorted(set(tag_list))

                # compute len in chars and words
                alen = len(answer['text'])
                answer['len'] = alen
                wordcount = Discretizer._count_words(answer['text'])
                answer['wordcount'] = wordcount
                if wordcount == 0:
                    answer['avg_chars_per_word'] = 0
                else:
                    answer['avg_chars_per_word'] = "{0:.2f}".format(
                        alen / float(wordcount))  # float with 2 decimals
                try:
                    sentences = tokenize.sent_tokenize(answer['text'].decode(
                        'utf-8', 'replace').encode('ascii', 'replace'),
                                                       language='english')
                except (AttributeError, TypeError) as e:
                    sentences = tokenize.sent_tokenize(str(answer['text']),
                                                       language='english')
                sentence_count = len(sentences)
                answer['sentences'] = sentence_count
                if sentence_count == 0:
                    words_per_sentence = 0
                else:
                    words_per_sentence = "{0:.2f}".format(
                        wordcount / float(sentence_count))
                answer['avg_words_per_sentence'] = words_per_sentence
                longest_sentence = 0
                for s in sentences:
                    l = Discretizer._count_words(s)
                    if l > longest_sentence:
                        longest_sentence = l
                answer['longest_sentence'] = longest_sentence
                try:
                    creation_date = parse_date(answer['date_time'])
                except AttributeError:
                    creation_date = answer['date_time']
                except Exception:
                    print('\nInvalid date_time')
                time_difference = abs(
                    (creation_date - base_date).total_seconds())
                answer['time_difference'] = time_difference

                #answer['upvotes'] = thread['upvotes']

                # check for urls and code snippets
                match = re.search(r'http(s)?://', str(answer['text']),
                                  re.MULTILINE)
                if match:
                    answer['has_links'] = True
                else:
                    answer['has_links'] = False

                answer['has_code_snippet'] = self._has_codesnippet(
                    str(answer['text']))
                try:
                    LL = Discretizer._log_likelihood(
                        answer['text'].decode('utf-8', 'replace').encode(
                            'ascii', 'replace'), stemmed_vocabulary,
                        distrib_matrix)
                except (AttributeError, TypeError) as e:
                    LL = Discretizer._log_likelihood(str(answer['text']),
                                                     stemmed_vocabulary,
                                                     distrib_matrix)
                answer['loglikelihood'] = LL
                answer['loglikelihood_descending'] = LL
                answer['loglikelihood_ascending'] = LL
                try:
                    aspw = Discretizer._ASPW(answer['text'].decode(
                        'utf-8', 'replace').encode('ascii', 'replace'))
                except (AttributeError, TypeError) as e:
                    aspw = Discretizer._ASPW(str(answer['text']))
                fk = Discretizer._FK(answer['avg_words_per_sentence'], aspw)
                answer['F-K'] = fk
                answer['F-K_descending'] = fk
                answer['F-K_ascending'] = fk

            # compute ranks
            #answers = Discretizer._sort_rank(answers, 'upvotes', reverse=True)
            answers = Discretizer._sort_rank(answers,
                                             'sentences',
                                             reverse=True)
            answers = Discretizer._sort_rank(answers, 'len', reverse=True)
            answers = Discretizer._sort_rank(answers, 'views', reverse=True)
            answers = Discretizer._sort_rank(answers,
                                             'wordcount',
                                             reverse=True)
            answers = Discretizer._sort_rank(answers,
                                             'avg_chars_per_word',
                                             reverse=True)
            answers = Discretizer._sort_rank(answers,
                                             'avg_words_per_sentence',
                                             reverse=True)
            answers = Discretizer._sort_rank(answers,
                                             'longest_sentence',
                                             reverse=True)
            answers = Discretizer._sort_rank(answers,
                                             'time_difference',
                                             reverse=False)
            answers = Discretizer._sort_rank(answers,
                                             'loglikelihood_descending',
                                             reverse=True)
            answers = Discretizer._sort_rank(answers,
                                             'loglikelihood_ascending',
                                             reverse=False)
            answers = Discretizer._sort_rank(answers,
                                             'F-K_descending',
                                             reverse=True)
            answers = Discretizer._sort_rank(answers,
                                             'F-K_ascending',
                                             reverse=False)
            thread['answers'] = answers

        self.log(
            'Done computing features for {0} threads'.format(len(threads)),
            logging.INFO)
        return threads
Example #43
0
 def _get_value(self, raw_value):
     return parse_date(raw_value)
Example #44
0
def _prepare_attributes(name, value):
    name = name.lower()
    if name == 'site':
        value = value.lower()
        query = ''
        if value == 'acfun':
            query = 'acfun'
        elif value in ['bilibili', 'bili']:
            query = 'bilibili'
        elif value in ['youtube', 'ytb']:
            query = 'youtube'
        elif value in ['nicovideo', 'niconico', 'nico']:
            query = 'nicovideo'
        elif value in ['twitter']:
            query = 'twitter'
        elif value in ['ipfs']:
            query = 'ipfs'
        else:
            query = value
        return {'item.site': query}
    elif name == 'date':
        if value[:2] == '<=':
            date = parse_date(value[2:])
            return {'item.upload_time': {'$lte': date + timedelta(days=1)}}
        elif value[:2] == '>=':
            date = parse_date(value[2:])
            return {'item.upload_time': {'$gte': date}}
        elif value[:1] == '<':
            date = parse_date(value[1:])
            return {'item.upload_time': {'$lt': date}}
        elif value[:1] == '>':
            date = parse_date(value[1:])
            return {'item.upload_time': {'$gt': date}}
        elif value[:1] == '=':
            date = parse_date(value[1:])
            return {
                'item.upload_time': {
                    '$gte': date,
                    '$lte': date + timedelta(days=1)
                }
            }
        date = parse_date(value)
        return {
            'item.upload_time': {
                '$gte': date,
                '$lte': date + timedelta(days=1)
            }
        }
    elif name == 'tags':
        if value[:2] == '<=':
            return {'tag_count': {'$lte': int(value[2:])}}
        elif value[:2] == '>=':
            return {'tag_count': {'$gte': int(value[2:])}}
        elif value[:1] == '<':
            return {'tag_count': {'$lt': int(value[1:])}}
        elif value[:1] == '>':
            return {'tag_count': {'$gt': int(value[1:])}}
        elif value[:1] == '=':
            return {'tag_count': {'$eq': int(value[1:])}}
        else:
            return {}
    elif name == 'placeholder':
        value = value.lower()
        if value == 'true':
            return {'item.placeholder': True}
        elif value == 'false':
            return {'item.placeholder': False}
        else:
            return {}
    elif name == 'repost':
        if value in _REPOST_TRANSLATE:
            value = _REPOST_TRANSLATE[value]
        if not value:
            return {'item.repost_type': {'$exists': False}}
        return {'item.repost_type': value}
    return {}
Example #45
0
def test_publish_revision(content_util, persist_util, app, db_engines,
                          db_tables):
    resources = list([content_util.gen_resource() for x in range(0, 2)])
    module = content_util.gen_module(resources=resources)
    module = persist_util.insert_module(module)

    metadata = parse_module_metadata(module)

    # Collect control data for non-legacy metadata
    stmt = (db_tables.modules.select().where(
        db_tables.modules.c.moduleid == metadata.id))
    control_metadata = db_engines['common'].execute(stmt).fetchone()

    # TARGET
    with pytest.raises(Unchanged), db_engines['common'].begin() as conn:
        now = conn.execute('SELECT CURRENT_TIMESTAMP as now').fetchone().now
        (id, version), ident = publish_legacy_page(
            module,
            metadata,
            (
                'user1',
                'test publish',
            ),
            conn,
        )

    # Change the module text, to make it publishable.
    index_cnxml = module.file.read_text()
    start_offset = index_cnxml.find('test document')
    module.file.write_text(index_cnxml[:start_offset] + 'TEST DOCUMENT' +
                           index_cnxml[start_offset + 13:])

    # TARGET - again
    with db_engines['common'].begin() as conn:
        now = conn.execute('SELECT CURRENT_TIMESTAMP as now').fetchone().now
        (id, version), ident = publish_legacy_page(
            module,
            metadata,
            (
                'user1',
                'test publish',
            ),
            conn,
        )

    # Check core metadata insertion
    stmt = (db_tables.modules.select().where(
        db_tables.modules.c.module_ident == ident))
    result = db_engines['common'].execute(stmt).fetchone()
    assert result.version == '1.2'
    assert result.uuid == control_metadata.uuid
    assert result.major_version == 2
    assert result.minor_version is None
    # Check for reuse of the existing abstract
    assert result.abstractid == control_metadata.abstractid
    assert result.created == parse_date(metadata.created)
    assert result.revised == now
    assert result.portal_type == 'Module'
    assert result.name == metadata.title
    assert result.licenseid == 13
    assert result.submitter == 'user1'
    assert result.submitlog == 'test publish'
    assert result.authors == list(metadata.authors)
    assert result.maintainers == list(metadata.maintainers)
    assert result.licensors == list(metadata.licensors)
    assert result.google_analytics == GOOGLE_ANALYTICS_CODE

    # Check subject metadata insertion
    stmt = (db_tables.moduletags.join(db_tables.tags).select().where(
        db_tables.moduletags.c.module_ident == ident))
    results = db_engines['common'].execute(stmt)
    subjects = [x.tag for x in results]
    assert sorted(subjects) == sorted(metadata.subjects)

    # Check keyword metadata insertion
    stmt = (db_tables.modulekeywords.join(db_tables.keywords).select().where(
        db_tables.modulekeywords.c.module_ident == ident))
    results = db_engines['common'].execute(stmt)
    keywords = [x.word for x in results]
    assert sorted(keywords) == sorted(metadata.keywords)

    # Check for file insertion
    stmt = (db_tables.module_files.join(db_tables.files).select().where(
        db_tables.module_files.c.module_ident == ident))
    result = db_engines['common'].execute(stmt).fetchall()
    files = {x.filename: x for x in result}
    assert len(files) == len(resources) + 2  # content files
    assert 'index.cnxml' in files
    assert 'index.cnxml.html' in files

    # Check for resource file insertion
    html_content = files['index.cnxml.html'].file.decode('utf8')
    for resource in resources:
        assert resource.filename in files
        # Check for reference rewrites in the content. This is out of scope
        # for this project, but order of insertion matters in order for
        # the references to be rewritten.
        assert '/resources/{}'.format(resource.sha1) in html_content
Example #46
0
    async def process_one(uuid_activity):
        uuid, last_activity = uuid_activity
        group = await graph.get_group(uuid)
        if group is None:
            app_log.info(f"No group for inactive device {uuid}")
            counts["no_group"] += 1
            return
        if group.get(consent_revoked):
            app_log.info(f"Already marked for deletion: {uuid}")
            counts["deleted"] += 1
            return
        user = await graph.user_for_device(group)
        if user is None:
            app_log.info(f"No user for inactive device {uuid}")
            counts["no_user"] += 1
            # FIXME: something went wrong. Mark device id group for deletion?
            return

        # check other devices on the same user
        # in case of new device registrations,
        # don't delete data from a user's old phone
        other_device_activity = None
        device_ids = [uuid]
        async for group in graph.device_groups_for_user(user):
            device_id = group["displayName"]
            if device_id != uuid:
                device_ids.append(device_id)
            # First check for recent registration (cheap)
            if parse_date(group["createdDateTime"]) >= IDLE_CUTOFF:
                app_log.info(f"Recently registered device {device_id}")
                counts["new"] += 1
                if device_id == uuid:
                    app_log.warning(
                        f"WRONG activity: recently registered {device_id} not idle"
                    )
                    counts["wrong"] += 1
                other_device_activity = True
                break
            try:
                device = await devices.get_device(device_id)
            except Exception as e:
                app_log.warning(f"Failed to get device {device_id}: ({e})")
                counts["iot_err"] += 1
                pass
            else:
                if parse_date(device["lastActivityTime"]) >= IDLE_CUTOFF:
                    counts["iot"] += 1
                    app_log.info(f"Activity on {device_id} in IoTHub")
                    if device_id == uuid:
                        app_log.warning(
                            f"WRONG activity: iothub active {device_id} not idle"
                        )
                        counts["wrong"] += 1
                    other_device_activity = True
                    break
            if device_id != uuid:
                # if not registered since cutoff, check for activity in SQL
                if await check_sql_data(device_id, activity_cutoff=IDLE_CUTOFF):
                    counts["sql"] += 1
                    app_log.info(f"Activity on {device_id} in SQL")
                    other_device_activity = True
                    break
        if other_device_activity:
            app_log.info(f"{uuid} is associated with other more recent device activity")
            counts["active"] += 1
        else:
            app_log.info(f"User {user['logName']} is inactive since {last_activity}")
            app_log.info(f"Inactive devices: {','.join(device_ids)}")
            counts["idle"] += 1
            return user
Example #47
0
 def __init__(self, item):
     self.guid = item.find('guid').text
     self.title = item.find('title').text
     self.link = item.find('link').text
     self.description = item.find('description').text
     self.pub_date = parse_date(item.find('pubDate').text)
Example #48
0
    def license_from_manifest(self, manifest):
        def is_appropriate_manifest_sub(sub):
            if sub['pool']['activeSubscription'] is False:
                return False
            now = datetime.now(timezone.utc)
            if parse_date(sub['startDate']) > now:
                return False
            if parse_date(sub['endDate']) < now:
                return False
            products = sub['pool']['providedProducts']
            if any(product.get('productId') == '480' for product in products):
                return True
            return False

        def _can_aggregate(sub, license):
            # We aggregate multiple subs into a larger meta-sub, if they match
            #
            # No current sub in aggregate
            if not license:
                return True
            # Same SKU type (SER vs MCT vs others)?
            if license['sku'][0:3] != sub['pool']['productId'][0:3]:
                return False
            return True

        # Parse output for subscription metadata to build config
        license = dict()
        for sub in manifest:
            if not is_appropriate_manifest_sub(sub):
                logger.warning(
                    "Subscription %s (%s) in manifest is not active or for another product"
                    % (sub['pool']['productName'], sub['pool']['productId']))
                continue
            if not _can_aggregate(sub, license):
                logger.warning(
                    "Subscription %s (%s) in manifest does not match other manifest subscriptions"
                    % (sub['pool']['productName'], sub['pool']['productId']))
                continue

            license.setdefault('sku', sub['pool']['productId'])
            license.setdefault('subscription_name', sub['pool']['productName'])
            license.setdefault('pool_id', sub['pool']['id'])
            license.setdefault('product_name', sub['pool']['productName'])
            license.setdefault('valid_key', True)
            license.setdefault('license_type', 'enterprise')
            license.setdefault('satellite', False)
            # Use the nearest end date
            endDate = parse_date(sub['endDate'])
            currentEndDateStr = license.get('license_date',
                                            '4102462800')  # 2100-01-01
            currentEndDate = datetime.fromtimestamp(int(currentEndDateStr),
                                                    timezone.utc)
            if endDate < currentEndDate:
                license['license_date'] = endDate.strftime('%s')
            instances = sub['quantity']
            license['instance_count'] = license.get('instance_count',
                                                    0) + instances
            license['subscription_name'] = re.sub(
                r'[\d]* Managed Nodes',
                '%d Managed Nodes' % license['instance_count'],
                license['subscription_name'])

        if not license:
            logger.error("No valid subscriptions found in manifest")
        self._attrs.update(license)
        settings.LICENSE = self._attrs
        return self._attrs
Example #49
0
def _prepare_attributes(name, value):
    value = value.lower()
    name = name.lower()
    if name == 'site':
        query = ''
        if value == 'acfun':
            query = 'acfun'
        elif value in ['bilibili', 'bili']:
            query = 'bilibili'
        elif value in ['youtube', 'ytb']:
            query = 'youtube'
        elif value in ['nicovideo', 'niconico', 'nico']:
            query = 'nicovideo'
        elif value in ['twitter']:
            query = 'twitter'
        elif value in ['ipfs']:
            query = 'ipfs'
        return {'item.site': query}
    elif name == 'date':
        if value[:2] == '<=':
            date = parse_date(value[2:])
            return {'item.upload_time': {'$lte': date + timedelta(days=1)}}
        elif value[:2] == '>=':
            date = parse_date(value[2:])
            return {'item.upload_time': {'$gte': date}}
        elif value[:1] == '<':
            date = parse_date(value[1:])
            return {'item.upload_time': {'$lt': date}}
        elif value[:1] == '>':
            date = parse_date(value[1:])
            return {'item.upload_time': {'$gt': date}}
        elif value[:1] == '=':
            date = parse_date(value[1:])
            return {
                'item.upload_time': {
                    '$gte': date,
                    '$lte': date + timedelta(days=1)
                }
            }
        date = parse_date(value)
        return {
            'item.upload_time': {
                '$gte': date,
                '$lte': date + timedelta(days=1)
            }
        }
    elif name == 'tagless':
        if value == 'true':
            return {'tags': {'$size': 0}}
        elif value == 'false':
            return {'tags': {'$not': {'$size': 0}}}
        else:
            return {}
    elif name == 'placeholder':
        if value == 'true':
            return {'item.placeholder': True}
        elif value == 'false':
            return {'item.placeholder': False}
        else:
            return {}
    return {}
Example #50
0
 def _deserialize(self, value, attr, obj, **kwargs):
     try:
         return parse_date(value).date()
     except Exception as e:
         return None
Example #51
0
def parse_prometheus(alert, external_url):

    status = alert.get('status', 'firing')

    labels = copy(alert['labels'])
    annotations = copy(alert['annotations'])

    starts_at = parse_date(alert['startsAt'])
    if alert['endsAt'] == '0001-01-01T00:00:00Z':
        ends_at = None
    else:
        ends_at = parse_date(alert['endsAt'])

    if status == 'firing':
        severity = labels.pop('severity', 'warning')
        create_time = starts_at
    elif status == 'resolved':
        severity = 'normal'
        create_time = ends_at
    else:
        severity = 'unknown'
        create_time = ends_at or starts_at

    # get labels
    resource = labels.pop('exported_instance', None) or labels.pop(
        'instance', 'n/a')
    event = labels.pop('alertname')
    environment = labels.pop('environment', 'Production')

    # get annotations
    correlate = annotations.pop('correlate').split(
        ',') if 'correlate' in annotations else None
    service = annotations.pop('service', '').split(',')
    group = annotations.pop('job', 'Prometheus')
    value = annotations.pop('value', None)

    # build alert text
    summary = annotations.pop('summary', None)
    description = annotations.pop('description', None)
    text = description or summary or '%s: %s on %s' % (
        labels['job'], labels['alertname'], labels['instance'])

    try:
        timeout = int(labels.pop('timeout', 0)) or None
    except ValueError:
        timeout = None

    if external_url:
        annotations['externalUrl'] = external_url
    if 'generatorURL' in alert:
        annotations[
            'moreInfo'] = '<a href="%s" target="_blank">Prometheus Graph</a>' % alert[
                'generatorURL']

    return Alert(
        resource=resource,
        event=event,
        environment=environment,
        severity=severity,
        correlate=correlate,
        service=service,
        group=group,
        value=value,
        text=text,
        attributes=annotations,
        origin='prometheus/' + labels.pop('monitor', '-'),
        event_type='prometheusAlert',
        create_time=create_time.astimezone(tz=pytz.UTC).replace(tzinfo=None),
        timeout=timeout,
        raw_data=alert,
        tags=["%s=%s" % t
              for t in labels.items()]  # any labels left are used for tags
    )
Example #52
0
    def validate_arg(arg, argdef):
        """
        Validate an incoming (unicode) string argument according the UPnP spec. Raises UPNPError.
        """
        datatype = argdef['datatype']
        reasons = set()
        ranges = {
            'ui1': (int, 0, 255),
            'ui2': (int, 0, 65535),
            'ui4': (int, 0, 4294967295),
            'i1': (int, -128, 127),
            'i2': (int, -32768, 32767),
            'i4': (int, -2147483648, 2147483647),
            'r4': (Decimal, Decimal('3.40282347E+38'), Decimal('1.17549435E-38'))
        }
        try:
            if datatype in set(ranges.keys()):
                v_type, v_min, v_max = ranges[datatype]
                if not v_min <= v_type(arg) <= v_max:
                    reasons.add('%r datatype must be a number in the range %s to %s' % (
                        datatype, v_min, v_max))

            elif datatype in {'r8', 'number', 'float', 'fixed.14.4'}:
                v = Decimal(arg)
                if v < 0:
                    assert Decimal('-1.79769313486232E308') <= v <= Decimal('4.94065645841247E-324')
                else:
                    assert Decimal('4.94065645841247E-324') <= v <= Decimal('1.79769313486232E308')

            elif datatype == 'char':
                v = arg.decode('utf8') if six.PY2 or isinstance(arg, bytes) else arg
                assert len(v) == 1

            elif datatype == 'string':
                v = arg.decode("utf8") if six.PY2 or isinstance(arg, bytes) else arg
                if argdef['allowed_values'] and v not in argdef['allowed_values']:
                    reasons.add('Value %r not in allowed values list' % arg)

            elif datatype == 'date':
                v = parse_date(arg)
                if any((v.hour, v.minute, v.second)):
                    reasons.add("'date' datatype must not contain a time")

            elif datatype in ('dateTime', 'dateTime.tz'):
                v = parse_date(arg)
                if datatype == 'dateTime' and v.tzinfo is not None:
                    reasons.add("'dateTime' datatype must not contain a timezone")

            elif datatype in ('time', 'time.tz'):
                now = datetime.datetime.utcnow()
                v = parse_date(arg, default=now)
                if v.tzinfo is not None:
                    now += v.utcoffset()
                if not all((
                        v.day == now.day,
                        v.month == now.month,
                        v.year == now.year)):
                    reasons.add('%r datatype must not contain a date' % datatype)
                if datatype == 'time' and v.tzinfo is not None:
                    reasons.add('%r datatype must not have timezone information' % datatype)

            elif datatype == 'boolean':
                valid = {'true', 'yes', '1', 'false', 'no', '0'}
                if arg.lower() not in valid:
                    reasons.add('%r datatype must be one of %s' % (datatype, ','.join(valid)))

            elif datatype == 'bin.base64':
                b64decode(arg)

            elif datatype == 'bin.hex':
                unhexlify(arg)

            elif datatype == 'uri':
                urlparse(arg)

            elif datatype == 'uuid':
                if not re.match(
                        r'^[0-9a-f]{8}\-[0-9a-f]{4}\-[0-9a-f]{4}\-[0-9a-f]{4}\-[0-9a-f]{12}$',
                        arg, re.I):
                    reasons.add('%r datatype must contain a valid UUID')

            else:
                reasons.add("%r datatype is unrecognised." % datatype)

        except ValueError as exc:
            reasons.add(str(exc))

        return not bool(len(reasons)), reasons
Example #53
0
    def process(self, resources, event=None):
        results = []
        filtered = []
        attached = []
        stats = Counter()
        marker_date = parse_date('2016-11-01T00:00:00+00:00')

        # Filter volumes
        for r in resources:
            # unsupported type
            if r['VolumeType'] == 'standard':
                stats['vol-type'] += 1
                filtered.append(r['VolumeId'])
                continue

            # unattached are easy
            if not r.get('Attachments'):
                results.append(r)
                continue

            # check for attachment date older then supported date
            if r['Attachments'][0]['AttachTime'] < marker_date:
                stats['attach-time'] += 1
                filtered.append(r['VolumeId'])
                continue

            attached.append(r)

        # Filter volumes attached to unsupported instance types
        ec2 = self.manager.get_resource_manager('ec2')
        instance_map = {}
        for v in attached:
            instance_map.setdefault(v['Attachments'][0]['InstanceId'],
                                    []).append(v)

        instances = ec2.get_resources(list(instance_map.keys()))
        for i in instances:
            if i['InstanceType'] in self.older_generation:
                stats['instance-type'] += len(instance_map[i['InstanceId']])
                filtered.extend(
                    [v['VolumeId'] for v in instance_map.pop(i['InstanceId'])])
            else:
                results.extend(instance_map.pop(i['InstanceId']))

        # Filter volumes that are currently under modification
        client = local_session(self.manager.session_factory).client('ec2')
        modifying = set()
        for vol_set in chunks(list(results), 200):
            vol_ids = [v['VolumeId'] for v in vol_set]
            mutating = client.describe_volumes_modifications(
                Filters=[{
                    'Name': 'volume-id',
                    'Values': vol_ids
                }, {
                    'Name': 'modification-state',
                    'Values': ['modifying', 'optimizing', 'failed']
                }])
            for vm in mutating.get('VolumesModifications', ()):
                stats['vol-mutation'] += 1
                filtered.append(vm['VolumeId'])
                modifying.add(vm['VolumeId'])

        self.log.debug("filtered %d of %d volumes due to %s", len(filtered),
                       len(resources), sorted(stats.items()))

        return [r for r in results if r['VolumeId'] not in modifying]
Example #54
0
def add_second(t):
    added = parse_date(t) + relativedelta(seconds=1)
    return added.isoformat()
Example #55
0
def apptsuccess(request):
    flag = True
    p_name = request.user.id
    p_user = User.objects.get(id=p_name)
    d_name = request.POST.get("Doctor_name", "")
    d_user = User.objects.get(id=d_name)
    medProblem = request.POST.get("Medical_problem", "")
    date_unicode = request.POST.get("date", "")
    time_unicode = request.POST.get("time1", "")

    if (p_name != None and p_name != "" and d_name != None and d_name != ''
            and medProblem != None and medProblem != ''
            and date_unicode != None and date_unicode != ''
            and time_unicode != None and time_unicode != ""
            and parse_date(date_unicode) > datetime.now()):
        time_list = []
        db_date_list = []
        timedelta_list = []
        date_list = []
        date = datetime.strptime(date_unicode, "%Y-%m-%d")
        time = datetime.strptime(time_unicode, "%H:%M")
        calendar_date = datetime.strptime(date_unicode + " " + time_unicode,
                                          "%Y-%m-%d %H:%M")
        for a in User.objects.raw('SELECT * FROM login_appt;'):
            if (a.doctor_id == long(d_name)):
                time_list.append(a.time)
                date_list.append(a.date)
                db_date_list.append(datetime.combine(a.date, a.time))

        if (calendar_date in db_date_list):
            flag = False
        else:
            for d in db_date_list:
                time_added = d + timedelta(minutes=15)
                if (calendar_date < time_added and calendar_date > d):
                    flag = False
                    break
                else:
                    flag = True
                    continue

        if flag == False:
            doctor_list = []
            for doctor in User.objects.raw(
                    'SELECT * FROM auth_user a join login_userrole b on a.id=b.user_id where b.role="doctor";'
            ):
                doctor_list.append(doctor)
            return render_to_response(
                'patient/appointment.html', {
                    'user':
                    request.user,
                    'doctor':
                    doctor_list,
                    'message':
                    'Sorry, There is a time clash, please select a different time.'
                })
        else:
            d = Appt.objects.create(patient=p_user,
                                    doctor=d_user,
                                    medical_problem=medProblem,
                                    date=date,
                                    time=time)
            d.save()
            return render_to_response('patient/success.html',
                                      {'user': request.user})

    else:
        doctor_list = []
        for doctor in User.objects.raw(
                'SELECT * FROM auth_user a join login_userrole b on a.id=b.user_id where b.role="doctor";'
        ):
            doctor_list.append(doctor)
        return render_to_response(
            'patient/appointment.html', {
                'user':
                request.user,
                'doctor':
                doctor_list,
                'message':
                'Either the information you have filled is incorrect or empty. Please check.'
            })
    d = Appt.objects.create(patient=p_user,
                            doctor=d_user,
                            medical_problem=medProblem,
                            date=date,
                            time=time)
    d.save()
    return render_to_response('patient/success.html', {'user': request.user})
Example #56
0
def api_list_report_files():
    probe_cc = request.args.get("probe_cc")
    probe_asn = request.args.get("probe_asn")
    test_name = request.args.get("test_name")

    since = request.args.get("since")
    until = request.args.get("until")
    since_index = request.args.get("since_index")

    order_by = request.args.get("order_by", "index")
    if order_by is "index":
        order_by = "idx"
    order = request.args.get("order", 'desc')

    try:
        offset = int(request.args.get("offset", "0"))
        limit = int(request.args.get("limit", "100"))
    except ValueError:
        raise BadRequest("Invalid offset or limit")

    q = current_app.db_session.query(ReportFile.filename,
                                     ReportFile.bucket_date,
                                     ReportFile.test_start_time,
                                     ReportFile.probe_cc, ReportFile.probe_asn,
                                     ReportFile.idx)

    # XXX maybe all of this can go into some sort of function.
    if probe_cc:
        q = q.filter(ReportFile.probe_cc == probe_cc)
    if probe_asn:
        q = q.filter(ReportFile.probe_asn == probe_asn)
    if test_name:
        q = q.filter(ReportFile.test_name == test_name)
    if since:
        try:
            since = parse_date(since)
        except ValueError:
            raise BadRequest("Invalid since")
        q = q.filter(ReportFile.test_start_time > since)
    if until:
        try:
            until = parse_date(until)
        except ValueError:
            raise BadRequest("Invalid until")
        q = q.filter(ReportFile.test_start_time <= until)

    if since_index:
        q = q.filter(ReportFile.idx > since_index)

    # XXX these are duplicated above, refactor into function
    if order.lower() not in ('asc', 'desc'):
        raise BadRequest("Invalid order")
    if order_by not in ('test_start_time', 'probe_cc', 'report_id',
                        'test_name', 'probe_asn', 'idx'):
        raise BadRequest("Invalid order_by")

    q = q.order_by('{} {}'.format(order_by, order))
    count = q.count()
    pages = math.ceil(count / limit)
    current_page = math.ceil(offset / limit) + 1

    q = q.limit(limit).offset(offset)
    next_args = request.args.to_dict()
    next_args['offset'] = "%s" % (offset + limit)
    next_args['limit'] = "%s" % limit
    next_url = urljoin(current_app.config['BASE_URL'],
                       '/api/v1/files?%s' % urlencode(next_args))
    if current_page >= pages:
        next_url = None

    metadata = {
        'offset': offset,
        'limit': limit,
        'count': count,
        'pages': pages,
        'current_page': current_page,
        'next_url': next_url
    }
    results = []
    for row in q:
        url = get_download_url(current_app, row.bucket_date, row.filename)
        results.append({
            'download_url':
            url,
            'probe_cc':
            row.probe_cc,
            'probe_asn':
            row.probe_asn,
            # Will we ever hit sys.maxint?
            'index':
            int(row.idx),
            'test_start_time':
            row.test_start_time.strftime('%Y-%m-%dT%H:%M:%SZ')
        })
    return jsonify({'metadata': metadata, 'results': results})
Example #57
0
def build_list(type, video, listing, response):

    ADDON = xbmcaddon.Addon()
    engine = ADDON.getSetting('engine')
    streamType = ADDON.getSetting('streamType')
    kodi_version = int(getKodiVersion())

    # Get the plugin url
    __url__ = sys.argv[0]

    try:
        attrs = video['attributes']
        alternateTitle = attrs.get('alternateId')

        if type == 'sport':
            # Pull start time from availability window
            availability = video.get('attributes',
                                     {}).get('availabilityWindows', [])
            if len(availability) > 0:
                av_window = availability[0]
                av_start = parse_date(av_window['playableStart'])
                av_start_local = av_start.astimezone(tz.tzlocal())
                av_startstr = av_start_local.strftime("%H:%M")

            title = av_startstr + ' - ' + attrs.get('name')
        elif type == 'daily':
            # Pull start time from schedule start
            av_start = parse_date(attrs['scheduleStart'])
            av_start_local = av_start.astimezone(tz.tzlocal())
            av_startstr = av_start_local.strftime("%H:%M")

            title = av_startstr + ' - ' + attrs.get('name')
        elif type == 'ontv':
            # Pull start time from schedule start
            av_start = parse_date(attrs['scheduleStart'])
            av_start_local = av_start.astimezone(tz.tzlocal())
            channel = attrs.get('path')
            if 'eurosport-1' in channel:
                title = 'Eurosport 1: ' + attrs.get('name')
            if 'eurosport-2' in channel:
                title = 'Eurosport 2: ' + attrs.get('name')

        else:
            # Set the base title
            title = attrs.get('name')

        # Add chanel details for on-air shows
        if type == 'daily':
            if attrs.get('materialType') == 'LINEAR':
                channel = attrs.get('path')
                if 'eurosport-1' in channel:
                    title = title + ' - (E1)'
                if 'eurosport-2' in channel:
                    title = title + ' - (E2)'

            if attrs.get('broadcastType') == 'LIVE':
                title = title + ' (Live)'

        item = ListItem(title)

        # Get image and it's url
        images = video.get('relationships', {}).get('images',
                                                    {}).get('data', [])

        if len(images) > 0:
            image_url = response.get_image_url(images[0]['id'])
            item.setArt({'thumb': image_url, 'icon': image_url})

        # Set the premiered date
        if type == 'daily' or type == 'ontv':
            premiered = str(attrs.get('scheduleStart')[:10])
            timestamp = attrs.get('scheduleStart')

        if type == 'sport':
            premiered = str(attrs.get('publishStart')[:10])
            timestamp = attrs.get('publishStart')

        # Get the plot
        plot = attrs.get('description')
        if plot == '' or plot is None:
            plot = attrs.get('secondaryTitle')

        # Set the metadata
        if type == 'ondemand':
            labels = {'title': title, 'sorttitle': title}
        else:
            labels = {
                'title': title,
                'sorttitle': title,
                'plot': plot,
                'premiered': premiered,
                'aired': premiered,
                'dateadded': timestamp,
                'mediatype': 'episode'
            }

        item.setInfo('video', labels)

        if type == 'ondemand':
            isPlayable = 'false'
        else:
            now = datetime.datetime.now(tz.tzutc())
            if av_start_local > now:
                isPlayable = 'false'
            else:
                isPlayable = 'true'

        item.setProperty('IsPlayable', isPlayable)

        # Ondemand brings up a list of items to select, not play
        if type == 'ondemand':
            url = '{0}?action=Select sport&sport={1}'.format(
                __url__, alternateTitle)
            isfolder = True
        else:
            # Determine which stream to play

            # Initialise properties
            mimetype = ''
            inputstreamtype = ''
            manifesttype = ''
            inputstream = ''
            manifest = ''

            # Matrix or earlier?
            if kodi_version > 18:
                inputstream = 'inputstream'
            else:
                if engine == 'inputstream.adaptive':
                    inputstream = 'inputstreamaddon'
                if engine == 'ffmpeg':
                    inputstream = 'inputstreamclass'

            # Set manifest & inputstream type
            if engine == 'inputstream.adaptive':
                manifest = 'inputstream.adaptive.manifest_type'
                inputstreamtype = 'inputstream.adaptive'
            if engine == 'ffmpeg':
                manifest = 'inputstream.ffmpegdirect.manifest_type'
                inputstreamtype = 'inputstream.ffmpegdirect'
                item.setProperty('inputstream.ffmpegdirect.is_realtime_stream',
                                 'true')

            # Override settings for HLS streams
            if streamType == 'hls':
                mimetype = 'application/x-mpegURL'
                manifesttype = 'hls'

            # Override settings for ISM streams
            if streamType == 'ism':
                mimetype = 'text/xml'
                manifesttype = 'ism'

            # Set properties
            item.setContentLookup(False)
            item.setProperty(inputstream, inputstreamtype)
            item.setProperty(manifest, manifesttype)
            if (len(mimetype) != 0):
                item.setMimeType(mimetype)

            # Extra properties for ism ondemand streams
            if type == 'ondemand' and streamType == 'ism':
                item.setProperty('inputstream.ffmpegdirect.stream_mode',
                                 'catchup')
                item.setProperty('inputstream.ffmpegdirect.open_mode',
                                 'ffmpeg')
                item.setProperty('inputstream.ffmpegdirect.playback_as_live',
                                 'true')

            id = video.get('id')
            url = '{0}?action=play&id={1}'.format(__url__, id)
            isfolder = False

        # Add item to our listing
        listing.append((url, item, isfolder))
    except:
        pass
Example #58
0
 def get_row_first_seen(row, default=None):
     first_seen = default
     if "first_seen" in row:
         first_seen = parse_date(row["first_seen"]).replace(tzinfo=pytz.utc)
     return first_seen
Example #59
0
def date_or_none(date):
    try:
        return parse_date(date)
    except Exception as error:
        logger.exception(error)
        return None
Example #60
0
def parse_prometheus(alert: JSON, external_url: str) -> Alert:

    status = alert.get('status', 'firing')

    # Allow labels and annotations to use python string formats that refer to
    # other labels eg. runbook = 'https://internal.myorg.net/wiki/alerts/{app}/{alertname}'
    # See https://github.com/prometheus/prometheus/issues/2818

    labels = {}
    for k, v in alert['labels'].items():
        try:
            labels[k] = v.format(**alert['labels'])
        except Exception:
            labels[k] = v

    annotations = {}
    for k, v in alert['annotations'].items():
        try:
            annotations[k] = v.format(**labels)
        except Exception:
            annotations[k] = v

    starts_at = parse_date(alert['startsAt'])
    if alert['endsAt'] != '0001-01-01T00:00:00Z':
        ends_at = parse_date(alert['endsAt'])
    else:
        ends_at = None  # type: ignore

    if status == 'firing':
        severity = labels.pop('severity', 'warning')
        create_time = starts_at
    elif status == 'resolved':
        severity = alarm_model.DEFAULT_NORMAL_SEVERITY
        create_time = ends_at
    else:
        severity = 'unknown'
        create_time = ends_at or starts_at

    # labels
    resource = labels.pop('exported_instance', None) or labels.pop(
        'instance', 'n/a')
    event = labels.pop('event', None) or labels.pop('alertname')
    environment = labels.pop('environment', 'Production')
    customer = labels.pop('customer', None)
    correlate = labels.pop('correlate').split(
        ',') if 'correlate' in labels else None
    service = labels.pop('service', '').split(',')
    group = labels.pop('group', None) or labels.pop('job', 'Prometheus')
    origin = 'prometheus/' + labels.pop('monitor', '-')
    tags = ['{}={}'.format(k, v) for k, v in labels.items()
            ]  # any labels left over are used for tags

    try:
        timeout = int(labels.pop('timeout', 0)) or None
    except ValueError:
        timeout = None

    value = annotations.pop('value', None)
    summary = annotations.pop('summary', None)
    description = annotations.pop('description', None)
    text = description or summary or '{}: {} is {}'.format(
        severity.upper(), resource, event)

    if external_url:
        annotations[
            'externalUrl'] = external_url  # needed as raw URL for bi-directional integration
    if 'generatorURL' in alert:
        annotations[
            'moreInfo'] = '<a href="{}" target="_blank">Prometheus Graph</a>'.format(
                alert['generatorURL'])
    attributes = annotations  # any annotations left over are used for attributes

    return Alert(
        resource=resource,
        event=event,
        environment=environment,
        customer=customer,
        severity=severity,
        correlate=correlate,
        service=service,
        group=group,
        value=value,
        text=text,
        attributes=attributes,
        origin=origin,
        event_type='prometheusAlert',
        create_time=create_time.astimezone(tz=pytz.UTC).replace(tzinfo=None),
        timeout=timeout,
        raw_data=alert,
        tags=tags)