Example #1
0
def _cal_events_update(params, req):
	cal_id = params.get('CalendarId', '')
	if cal_id != _cal['id']:
		return
	if 'EventId' not in params:
		return
	evtype, evid = params['EventId'].split('-')
	if evtype != 'ticket':
		return
	evid = int(evid)
	if not has_permission('TICKETS_UPDATE', req.context, req):
		return
	sess = DBSession()
	tkt = sess.query(Ticket).get(evid)
	if tkt is None:
		return
	sess.execute(SetVariable('ticketid', tkt.id))
	if 'StartDate' in params:
		new_ts = dparse(params['StartDate']).replace(tzinfo=None, microsecond=0)
		if new_ts:
			tkt.assigned_time = new_ts
	if ('EndDate' in params) and tkt.assigned_time:
		new_ts = dparse(params['EndDate']).replace(tzinfo=None, microsecond=0)
		if new_ts:
			delta = new_ts - tkt.assigned_time
			tkt.duration = delta.seconds
Example #2
0
def _cal_events_update(params, req):
    if 'EventId' not in params:
        return
    evtype, evid = params['EventId'].split('-')
    if evtype != 'ticket':
        return
    evid = int(evid)
    if not req.has_permission('TICKETS_UPDATE'):
        return
    # TODO: fancy permissions/ACLs
    sess = DBSession()
    tkt = sess.query(Ticket).get(evid)
    if tkt is None:
        return
    sess.execute(SetVariable('ticketid', tkt.id))
    if 'StartDate' in params:
        new_ts = dparse(params['StartDate']).replace(tzinfo=None,
                                                     microsecond=0)
        if new_ts:
            tkt.assigned_time = new_ts
    if ('EndDate' in params) and tkt.assigned_time:
        new_ts = dparse(params['EndDate']).replace(tzinfo=None, microsecond=0)
        if new_ts:
            delta = new_ts - tkt.assigned_time
            tkt.duration = delta.seconds
Example #3
0
def _cal_events(evts, params, req):
	if not has_permission('TICKETS_LIST', req.context, req):
		return
	ts_from = params.get('startDate')
	ts_to = params.get('endDate')
	if (not ts_from) or (not ts_to):
		return
	ts_from = dparse(ts_from).replace(hour=0, minute=0, second=0, microsecond=0)
	ts_to = dparse(ts_to).replace(hour=23, minute=59, second=59, microsecond=999999)
	sess = DBSession()
	q = sess.query(Ticket)\
		.filter(
			Ticket.assigned_time <= ts_to,
			IntervalSeconds(Ticket.assigned_time, Ticket.duration) >= ts_from
		)
	for tkt in q:
		ev = {
			'id'       : 'ticket-%d' % tkt.id,
			'cid'      : _cal['id'],
			'title'    : tkt.name,
			'start'    : tkt.assigned_time,
			'end'      : tkt.end_time,
			'notes'    : tkt.description,
			'apim'     : 'tickets',
			'apic'     : 'Ticket',
			'apiid'    : tkt.id,
			'caned'    : False
		}
		evts.append(ev)
Example #4
0
def filter_time(descr, tfrom, tto):
    "runid=2582f1be82a0c29a01c4c852b672559257416ff6 read=12 ch=2153 start_time=2017-10-13T11:54:09Z"
    time = dparse([i for i in descr.split() if i.startswith('start_time')][0].split('=')[1])
    if dparse(tfrom) <= time <= dparse(tto):
        return True
    else:
        return False
Example #5
0
def save_or_update_pm_terms(data: np.array, db_config: dict):
    inserts = [
        (
            term[-1],
            dparse(term[3]),  # start
            None if term[4] == "incumbent" else dparse(term[4]),  # end
            term[0],  # pm id
            len(term[1]),  # did the PM have a title? True/False
        ) for term in data
    ]

    with MariaDBConnector(**db_config) as cursor:
        logger.info("saving data to prime_minister_term table...")
        sql = """INSERT INTO prime_minister_term (id,start_date, end_date,  prime_minister_id,  had_title)
                 values (%s,%s,%s,%s,%s)
                 ON DUPLICATE KEY UPDATE
                     start_date=start_date,
                     end_date=end_date,
                     prime_minister_id=prime_minister_id,
                     had_title=had_title;"""

        cursor.executemany(sql, inserts)
        cursor.commit()
        sql = """SELECT * FROM prime_minister_term ;"""

        cursor.execute(sql)
        return cursor.fetchall()
def read_date(dstr, prefix=None, **kwargs):
    """
    type: `String -> String`

    Examples ::

        read_date("1432") == "1432"
        read_date("December 1432") == "1432-12"
        read_date("3 December 1432") == "1432-12-03"
        read_date("December", prefix="1311") == "1311-12"

    Given an English string representing a date, return an ISO formatted
    partial date representation showing only as much of the date as we
    know the granularity for. (If you supply a prefix, it will be used
    to provide a partial default for unknown values)

    Note that we also accept dateutil.parse args (`fuzzy` may be of use)

    The underlying implementation is a bit embarassing. We're using an
    English date parser that only returns timestamps (you have to supply
    a default for fields it does not know). The approach we use is to
    parse the date *twice* and take the common prefix of their ISO
    formatted representations. Sorry.
    """

    def iso(stamp):
        "iso format for the date part only"
        return stamp.isoformat().split("T")[0]

    def common_prefix(iso1, iso2):
        "common prefix of two iso date strings"
        parts1 = iso1.split("-")
        parts2 = iso2.split("-")
        return "-".join(p1 for p1, _ in
                        itertools.takewhile(lambda (x, y): x == y,
                                            zip(parts1, parts2)))


    if prefix is not None:
        try:
            default1 = dparse(prefix, default=_DEFAULT_DATE_1)
            default2 = dparse(prefix, default=_DEFAULT_DATE_2)
        except TypeError as _:
            raise ValueError("Could not parse prefix date {}".format(prefix))
    else:
        default1 = _DEFAULT_DATE_1
        default2 = _DEFAULT_DATE_2
	
    try:
        stamp1 = dparse(dstr, default=default1, **kwargs)
        stamp2 = dparse(dstr, default=default2, **kwargs)
    except (TypeError, ValueError) as _:
        return None

    res = common_prefix(iso(stamp1), iso(stamp2))
    if res == str(_FAR_AWAY):
        res = None
    return res
Example #7
0
def read_date(dstr, prefix=None, **kwargs):
    """
    type: `String -> String`

    Examples ::

        read_date("1432") == "1432"
        read_date("December 1432") == "1432-12"
        read_date("3 December 1432") == "1432-12-03"
        read_date("December", prefix="1311") == "1311-12"

    Given an English string representing a date, return an ISO formatted
    partial date representation showing only as much of the date as we
    know the granularity for. (If you supply a prefix, it will be used
    to provide a partial default for unknown values)

    Note that we also accept dateutil.parse args (`fuzzy` may be of use)

    The underlying implementation is a bit embarassing. We're using an
    English date parser that only returns timestamps (you have to supply
    a default for fields it does not know). The approach we use is to
    parse the date *twice* and take the common prefix of their ISO
    formatted representations. Sorry.
    """
    def iso(stamp):
        "iso format for the date part only"
        return stamp.isoformat().split("T")[0]

    def common_prefix(iso1, iso2):
        "common prefix of two iso date strings"
        parts1 = iso1.split("-")
        parts2 = iso2.split("-")
        return "-".join(p1 for p1, _ in itertools.takewhile(
            lambda (x, y): x == y, zip(parts1, parts2)))

    if prefix is not None:
        try:
            default1 = dparse(prefix, default=_DEFAULT_DATE_1)
            default2 = dparse(prefix, default=_DEFAULT_DATE_2)
        except TypeError as _:
            raise ValueError("Could not parse prefix date {}".format(prefix))
    else:
        default1 = _DEFAULT_DATE_1
        default2 = _DEFAULT_DATE_2

    try:
        stamp1 = dparse(dstr, default=default1, **kwargs)
        stamp2 = dparse(dstr, default=default2, **kwargs)
    except (TypeError, ValueError) as _:
        return None

    res = common_prefix(iso(stamp1), iso(stamp2))
    if res == str(_FAR_AWAY):
        res = None
    return res
Example #8
0
def _cal_events(evts, params, req):
	ts_from = params.get('startDate')
	ts_to = params.get('endDate')
	if (not ts_from) or (not ts_to):
		return
	cals = params.get('cals')
	if isinstance(cals, Iterable) and len(cals):
		try:
			cals = [int(cal[5:]) for cal in cals if cal[:5] == 'user-']
		except (TypeError, ValueError):
			cals = ()
		if len(cals) == 0:
			return
	else:
		cals = None
	ts_from = dparse(ts_from).replace(hour=0, minute=0, second=0, microsecond=0)
	ts_to = dparse(ts_to).replace(hour=23, minute=59, second=59, microsecond=999999)
	sess = DBSession()
	cal_q = sess.query(Calendar).filter(Calendar.user == req.user)
	if cals:
		cal_q = cal_q.filter(Calendar.id.in_(cals))
	cal_ids = [cal.id for cal in cal_q]
	for cali in sess.query(CalendarImport).filter(CalendarImport.user_id == req.user.id):
		cal = cali.calendar
		if cal.user == req.user:
			continue
		if cals and (cal.id not in cals):
			continue
		if not cal.can_read(req.user):
			continue
		if cal.id in cal_ids:
			continue
		cal_ids.append(cal.id)
	q = sess.query(Event)\
		.filter(
			Event.calendar_id.in_(cal_ids),
			Event.event_start <= ts_to,
			Event.event_end >= ts_from
		)
	for e in q:
		ev = {
			'id'       : 'event-%u' % e.id,
			'cid'      : 'user-%u' % e.calendar_id,
			'title'    : e.summary,
			'start'    : e.event_start,
			'end'      : e.event_end,
			'ad'       : e.all_day,
			'notes'    : e.description,
			'loc'      : e.location,
			'url'      : e.url,
			'caned'    : e.calendar.can_write(req.user)
		}
		evts.append(ev)
Example #9
0
 def is_date(self, string: AnyStr, fuzzy: bool = False) -> bool:
     """
     Can the string be converted into a date?
     :param string:
     :param fuzzy:
     :return:
     """
     try:
         dparse(string, fuzzy=fuzzy)
         return True
     except ValueError:
         return False
Example #10
0
def dyn_entity_history(params, request):
	eid = params.get('eid')
	if not eid:
		raise ValueError('No entity ID specified')
	begin = params.get('begin')
	end = params.get('end')
	cat = params.get('cat')
	maxnum = params.get('maxnum')
	sort = params.get('sort')
	sdir = params.get('dir')
	sess = DBSession()
	e = sess.query(Entity).get(int(eid))
	if not e:
		raise KeyError('No such entity found')
	if begin:
		xbegin = dparse(begin)
		if xbegin:
			begin = dt.datetime(
				xbegin.year,
				xbegin.month,
				xbegin.day,
				0, 0, 0
			)
		else:
			begin = None
	else:
		begin = None
	if end:
		xend = dparse(end)
		if xend:
			end = dt.datetime(
				xend.year,
				xend.month,
				xend.day,
				23, 59, 59
			)
		else:
			end = None
	else:
		end = None
	if maxnum:
		maxnum = int(maxnum)
	else:
		maxnum = 20
	ret = {
		'success' : True,
		'history' : e.get_history(request, begin, end, cat, maxnum, sort, sdir)
	}
	ret['total'] = len(ret['history'])
	return ret
Example #11
0
    def get_time_until_next_departure(self, stop_id, linename, direction):
        departure = self.get_next_departure(
            stop_id, linename=linename, direction=direction)

        departure_dt =\
            localize(dparse(departure['MonitoredCall']['ExpectedArrivalTime']))

        return localize(departure_dt) - tz_now()
Example #12
0
    def get_time_until_next_departure(self, stop_id, linename, direction):
        departure = self.get_next_departure(stop_id,
                                            linename=linename,
                                            direction=direction)

        departure_dt =\
            localize(dparse(departure['MonitoredCall']['ExpectedArrivalTime']))

        return localize(departure_dt) - tz_now()
Example #13
0
 def __init__(self, raw, product, version, timestamp=None):
     self.raw = raw
     self.product = product
     self.version = version
     if timestamp is None:
         timestamp = datetime.datetime.utcnow()
     if isinstance(timestamp, basestring):
         timestamp = dparse(timestamp)
     self.timestamp = timestamp
Example #14
0
 def __init__(self, path, username, hostname, config, date):
     self.dir = path
     self.username = username
     self.hostname = hostname
     self.config = config
     self.datestr = date
     self.date = dparse(date)
     self.passes = []
     self.fails = []
     self.notruns = []
Example #15
0
def _ev_set(sess, ev, params, req):
	user = req.user
	if ev.id:
		if (not ev.calendar) or (not ev.calendar.can_write(user)):
			return False
	cal_id = params.get('CalendarId', '')
	if cal_id:
		if cal_id[:5] != 'user-':
			return False
		try:
			cal_id = int(cal_id[5:])
		except (TypeError, ValueError):
			return False
		cal = sess.query(Calendar).get(cal_id)
		if (cal is None) or (not cal.can_write(user)):
			return False
		ev.calendar = cal

	val = params.get('Title', False)
	if val:
		ev.summary = val
	val = params.get('Url', False)
	if val:
		ev.url = val
	val = params.get('Notes', False)
	if val:
		ev.description = val
	val = params.get('Location', False)
	if val:
		ev.location = val
	if 'StartDate' in params:
		new_ts = dparse(params['StartDate']).replace(tzinfo=None, microsecond=0)
		if new_ts:
			ev.event_start = new_ts
	if 'EndDate' in params:
		new_ts = dparse(params['EndDate']).replace(tzinfo=None, microsecond=0)
		if new_ts:
			ev.event_end = new_ts
	val = params.get('IsAllDay', None)
	if isinstance(val, bool):
		ev.all_day = val
		# FIXME: enforce proper times for all-day events
	return True
Example #16
0
def _cal_events(evts, params, req):
    if not req.has_permission('TICKETS_LIST'):
        return
    # TODO: fancy permissions/ACLs
    ts_from = params.get('startDate')
    ts_to = params.get('endDate')
    if (not ts_from) or (not ts_to):
        return
    cals = params.get('cals')
    if cals:
        if isinstance(cals, collections.Iterable):
            if _cal['id'] not in cals:
                return
        else:
            return
    ts_from = dparse(ts_from).replace(hour=0,
                                      minute=0,
                                      second=0,
                                      microsecond=0)
    ts_to = dparse(ts_to).replace(hour=23,
                                  minute=59,
                                  second=59,
                                  microsecond=999999)
    sess = DBSession()
    q = sess.query(Ticket)\
     .filter(
      Ticket.assigned_time <= ts_to,
      IntervalSeconds(Ticket.assigned_time, Ticket.duration) >= ts_from
     )
    for tkt in q:
        ev = {
            'id': 'ticket-%d' % tkt.id,
            'cid': _cal['id'],
            'title': tkt.name,
            'start': tkt.assigned_time,
            'end': tkt.end_time,
            'notes': tkt.description,
            'apim': 'tickets',
            'apic': 'Ticket',
            'apiid': tkt.id,
            'caned': False
        }
        evts.append(ev)
Example #17
0
def dyn_entity_history(params, request):
    eid = params.get('eid')
    if not eid:
        raise ValueError('No entity ID specified')
    begin = params.get('begin')
    end = params.get('end')
    cat = params.get('cat')
    maxnum = params.get('maxnum')
    sort = params.get('sort')
    sdir = params.get('dir')
    e = DBSession().query(Entity).get(int(eid))
    if not e:
        raise KeyError('No such entity found')
    if begin:
        xbegin = dparse(begin)
        if xbegin:
            begin = dt.datetime(xbegin.year, xbegin.month, xbegin.day, 0, 0, 0)
        else:
            begin = None
    else:
        begin = None
    if end:
        xend = dparse(end)
        if xend:
            end = dt.datetime(xend.year, xend.month, xend.day, 23, 59, 59)
        else:
            end = None
    else:
        end = None
    if maxnum:
        maxnum = int(maxnum)
    else:
        maxnum = 20
    ret = {
        'success': True,
        'history': e.get_history(request, begin, end, cat, maxnum, sort, sdir)
    }
    ret['total'] = len(ret['history'])
    return ret
Example #18
0
def _cal_events(evts, params, req):
	ts_from = params.get('startDate')
	ts_to = params.get('endDate')
	if (not ts_from) or (not ts_to):
		return
	ts_from = dparse(ts_from).replace(hour=0, minute=0, second=0, microsecond=0)
	ts_to = dparse(ts_to).replace(hour=23, minute=59, second=59, microsecond=999999)
	sess = DBSession()
	# FIXME: Add calendar-based filters
	cal_ids = [cal.id for cal in sess.query(Calendar).filter(Calendar.user == req.user)]
	for cali in sess.query(CalendarImport).filter(CalendarImport.user_id == req.user.id):
		cal = cali.calendar
		if cal.user == req.user:
			continue
		if not cal.can_read(req.user):
			continue
		if cal.id in cal_ids:
			continue
		cal_ids.append(cal.id)
	q = sess.query(Event)\
		.filter(
			Event.calendar_id.in_(cal_ids),
			Event.event_start <= ts_to,
			Event.event_end >= ts_from
		)
	for e in q:
		ev = {
			'id'       : 'event-%u' % e.id,
			'cid'      : 'user-%u' % e.calendar_id,
			'title'    : e.summary,
			'start'    : e.event_start,
			'end'      : e.event_end,
			'ad'       : e.all_day,
			'notes'    : e.description,
			'loc'      : e.location,
			'url'      : e.url,
			'caned'    : e.calendar.can_write(req.user)
		}
		evts.append(ev)
Example #19
0
 def latest_metadata(self):
     """Returns segment metadata from the latest segment"""
     logging.info('Syncing datasource [{}]'.format(self.datasource_name))
     client = self.cluster.get_pydruid_client()
     try:
         results = client.time_boundary(datasource=self.datasource_name)
     except IOError:
         results = None
     if results:
         max_time = results[0]['result']['maxTime']
         max_time = dparse(max_time)
     else:
         max_time = datetime.now()
     # Query segmentMetadata for 7 days back. However, due to a bug,
     # we need to set this interval to more than 1 day ago to exclude
     # realtime segments, which triggered a bug (fixed in druid 0.8.2).
     # https://groups.google.com/forum/#!topic/druid-user/gVCqqspHqOQ
     lbound = (max_time - timedelta(days=7)).isoformat()
     if not self.version_higher(self.cluster.druid_version, '0.8.2'):
         rbound = (max_time - timedelta(1)).isoformat()
     else:
         rbound = max_time.isoformat()
     segment_metadata = None
     try:
         segment_metadata = client.segment_metadata(
             datasource=self.datasource_name,
             intervals=lbound + '/' + rbound,
             merge=self.merge_flag,
             analysisTypes=[])
     except Exception as e:
         logging.warning('Failed first attempt to get latest segment')
         logging.exception(e)
     if not segment_metadata:
         # if no segments in the past 7 days, look at all segments
         lbound = datetime(1901, 1, 1).isoformat()[:10]
         if not self.version_higher(self.cluster.druid_version, '0.8.2'):
             rbound = datetime.now().isoformat()
         else:
             rbound = datetime(2050, 1, 1).isoformat()[:10]
         try:
             segment_metadata = client.segment_metadata(
                 datasource=self.datasource_name,
                 intervals=lbound + '/' + rbound,
                 merge=self.merge_flag,
                 analysisTypes=[])
         except Exception as e:
             logging.warning('Failed 2nd attempt to get latest segment')
             logging.exception(e)
     if segment_metadata:
         return segment_metadata[-1]['columns']
Example #20
0
 def latest_metadata(self):
     """Returns segment metadata from the latest segment"""
     logging.info('Syncing datasource [{}]'.format(self.datasource_name))
     client = self.cluster.get_pydruid_client()
     try:
         results = client.time_boundary(datasource=self.datasource_name)
     except IOError:
         results = None
     if results:
         max_time = results[0]['result']['maxTime']
         max_time = dparse(max_time)
     else:
         max_time = datetime.now()
     # Query segmentMetadata for 7 days back. However, due to a bug,
     # we need to set this interval to more than 1 day ago to exclude
     # realtime segments, which triggered a bug (fixed in druid 0.8.2).
     # https://groups.google.com/forum/#!topic/druid-user/gVCqqspHqOQ
     lbound = (max_time - timedelta(days=7)).isoformat()
     if not self.version_higher(self.cluster.druid_version, '0.8.2'):
         rbound = (max_time - timedelta(1)).isoformat()
     else:
         rbound = max_time.isoformat()
     segment_metadata = None
     try:
         segment_metadata = client.segment_metadata(
             datasource=self.datasource_name,
             intervals=lbound + '/' + rbound,
             merge=self.merge_flag,
             analysisTypes=[])
     except Exception as e:
         logging.warning('Failed first attempt to get latest segment')
         logging.exception(e)
     if not segment_metadata:
         # if no segments in the past 7 days, look at all segments
         lbound = datetime(1901, 1, 1).isoformat()[:10]
         if not self.version_higher(self.cluster.druid_version, '0.8.2'):
             rbound = datetime.now().isoformat()
         else:
             rbound = datetime(2050, 1, 1).isoformat()[:10]
         try:
             segment_metadata = client.segment_metadata(
                 datasource=self.datasource_name,
                 intervals=lbound + '/' + rbound,
                 merge=self.merge_flag,
                 analysisTypes=[])
         except Exception as e:
             logging.warning('Failed 2nd attempt to get latest segment')
             logging.exception(e)
     if segment_metadata:
         return segment_metadata[-1]['columns']
Example #21
0
    def get_datetime(self, published_at):
        """
        Takes a 'pubdate' tag content and attempts to convert it to a datetime.datetime object.
        
        :param unicode published_at: The <pubdate> tag contents. Ideally format will be in ( "%a, %d %b %Y %H:%M:%S +0000", "%a, %d %b %Y %H:%M:%S %Z" )
        
        :rtype datetime.datetime:
        """
        if not published_at:
            return datetime.datetime.now()

        published_at = re.sub(r"\s+", " ", str(published_at))

        print "Converting " + str(published_at)
        return dparse(published_at)
Example #22
0
def _parse_date(dt_str, is_start=True):
    try:
        ts = float(dt_str)
        if ts > 9999999999:
            ts /= 100
        return dt.fromtimestamp(ts)
    except:
        pass
    try:
        default_date = dt(dt.now().year, 1, 1, 0, 0, 0) if is_start else dt(
            dt.now().year, 12, 31, 23, 59, 59)
        return dparse(dt_str, fuzzy=True, default=default_date)
    except:
        pass
    raise (ValueError(f'Can not parse datetime {dt_str}'))
Example #23
0
def frenchdates(item):
    pos=item.find(u'trimestre')
    if pos>=0:
        try:
            quarter=int(item[pos-5])
        except ValueError:
            return item
        date='15/%s/2012' % ((quarter*3)-1)
    else:
        date=u'/'.join([frmonths.get(x,x) for x in item.split()])
    try:
        date=dparse(date)
    except (UnicodeEncodeError, ValueError) as e:
        return item
    except:
        print >>sys.stderr, date.encode('utf8')
        raise
    return date.isoformat()
Example #24
0
def reorder(doc):
    """
    fixme: obsolete

    Reorder and cleanup dives in UDDF document.

    Following operations are being performed

    - dives are sorted by dive start time 
    - duplicate dives and repetition groups are removed

    :Parameters:
     doc
        UDDF document.
    """
    find = partial(doc.xpath, namespaces=_NSMAP)

    profiles = find('//uddf:profiledata')
    rgroups = find('//uddf:profiledata/uddf:repetitiongroup')
    if not profiles or not rgroups:
        raise ValueError('No profile data to reorder')
    pd = profiles[0]

    q = '/uddf:uddf/uddf:profiledata/uddf:repetitiongroup/uddf:dive'
    qt = q + '/uddf:informationbeforedive/uddf:datetime/text()'
    nodes = find(q)
    times = find(qt)

    dives = {}
    for n, t in zip(nodes, times):
        dt = dparse(t) # don't rely on string representation for sorting
        if dt not in dives:
            dives[dt] = n

    log.debug('removing old repetition groups')
    for rg in rgroups: # cleanup old repetition groups
        pd.remove(rg)
    rg, = create_node('uddf:repetitiongroup', parent=pd)
    _set_id(rg)

    # sort dive nodes by dive time
    log.debug('sorting dives')
    for dt, n in sorted(dives.items(), key=itemgetter(0)):
        rg.append(n)
Example #25
0
 def before_render(self):
     """ put anything that needs to happen just before rendering in here,
     for example, if you want to assign a template variable to the page
     that's determined only after a whole GET or POST method runs,
     you would put it here, like:      
     """
     
     self.page["top_navs"] = [
         [urls["home"][0], urls["home"][1]],
         ["Trends", "/trends"],
         ["About Us", "/about"],
         ["FAQs", "/faqs"],
         ["Contact", "/contact"],
     ]
     
     if self.template == "home":
         origin = dparse("july 1st 2011")
         now = datetime.datetime.now()
         elapsed_time = int((now-origin).total_seconds())
         check_rate = 2000
         
         self.page["counter_stats,"] = {
             "update_delay": 350,
             "variation": 0.10,
             "values": {
                 "num_sites": {
                     "initial": models.Domain.count(),
                     "rate": 0.1
                 },
                 "num_checks": {
                     "initial": elapsed_time * check_rate,
                     "rate": check_rate
                 },
                 "num_down": {
                     "initial": models.Domain.down_count(),
                     "bias": 0.5, # if it's < 0.5, more sites are going down than coming up
                     "rate": 0.2
                 }
             }
         }
     
     self.page["activity_stream_config,"] = config.activity_stream
     self.page["activity_entry_queue,"] = [] #random_activity_stream_entry(config.activity_stream["length"] * 3)
Example #26
0
    def __call__(self, args):
        """
        Execute command for adding dives into logbook file.
        """
        import kenozooid.logbook as kl
        import kenozooid.data as kd
        from dateutil.parser import parse as dparse

        lfile = args.logbook

        datetime = dparse(args.datetime)
        depth = float(args.depth)
        duration = float(args.duration) * 60

        site = args.site
        buddy = args.buddy

        dive = kd.Dive(datetime=datetime, depth=depth, duration=duration)
        kl.add_dive(dive, lfile, qsite=site, qbuddies=buddy)
Example #27
0
    async def create_event(self, msg, prompt="When's dota?"):
        """Create group event. """
        if await self.get_future_event():
            raise EventAlreadyPlanned("There is already a future event planned.")

        await self.sender.sendMessage(
            prompt)

        # TODO add error checking here
        response = await self.listener.wait()
        start_datetime = dparse(response["text"])

        await self.sender.sendMessage(
            "Creating event, with you as first attendee.")

        group_event = GroupEvent(start_datetime)

        print(group_event.export())
        try:
            await self.write_event_to_db(group_event)
            await self.add_person_to_event(group_event, {
            "name": msg["from"]["username"]})
        except EventAlreadyPlanned:
            await self.sender.sendMessage("Sorry, event already planned for then!")
Example #28
0
def DECODER(msg, address=None, host=None):
    sender=collapse_rfc2231_value(msg['from'])
    m=sendere.match(sender)
    res={}
    if m:
        res['sender_name'], res['sender_mail']=m.groups()
    else:
        res['sender_mail']=sender

    for mpart in msg.walk():
        part=to_message(mpart)
        # cut of preamble
        inblock=False
        lines=part.get_payload(decode=True).split('\n')
        i=0
        #logging.info(lines)
        while i<len(lines):
            if not inblock:
                if lines[i].strip()=='-----BEGIN PGP MESSAGE-----':
                    inblock=True
                    i+=2
            else:
                if lines[i].strip()=='-----END PGP MESSAGE-----':
                    break
            i+=1
        #logging.info(i)
        if i<len(lines):
            res.update(getpgpmeta(part.get_payload(decode=True)))
            ret=gpg('-d',
                    _ok_code=[0,2],
                    _in=part.get_payload(decode=True))
            #logging.info('ret '+str(ret))
            #logging.info('stderr '+ret.stderr)
            res['msg']='\n'.join(["> %s" % x for x in ret.stdout.split('\n')])
            # extra points,
            #   - no named recipient
            #   - signed
            modifiers={'sekrit': False, 'signed': False}
            #logging.info(res['keys'])
            if len([x for x in res['keys'] if x['key_id']!="0000000000000000"])==0:
                modifiers['sekrit']=True
            signed={}
            for line in ret.stderr.split('\n'):
                if line.startswith('gpg: Signature made '):
                    # gpg: Signature made Fri 11 May 2012 04:43:04 PM CEST using RSA key ID XXXXXX
                    m=signed1re.match(line)
                    if m:
                        #logging.info(m.groups())
                        signed['date']=dparse(str(m.group(1)))
                        signed['algo']=m.group(2)
                        signed['key_id']=m.group(3)
                elif line.startswith('gpg: Good signature from '):
                    # gpg: Good signature from "name <mail>"
                    m=signed2re.match(line)
                    if m:
                        #logging.info(m.groups())
                        signed['name']=m.group(1)
                        signed['mail']=m.group(2)
                    modifiers['signed']=True
            if signed: res['signed']=signed
            res['award']=award("You sent an encrypted mail.\n%s" % '\n'.join(["%s [%s]" % (k,'X' if v else ' ') for k,v in modifiers.items()]))
            #logging.info(res)
            welcome = view.respond(res, "pgpmail.msg",
                           From=sendermail,
                           To=sender,
                           Subject="Encrypted mail received")
            relay.deliver(welcome)
Example #29
0
def client_sessions(ctx, request):
	loc = get_localizer(request)
	page = int(request.params.get('page', 1))
	# FIXME: make per_page configurable
	per_page = 30
	ts_from = request.params.get('from')
	ts_to = request.params.get('to')
	ts_now = dt.datetime.now()
	sess = DBSession()
	ent_ids = tuple()
	cls = AccessSession
	cls_name = _('Active Sessions')
	show_active = True
	entity_name = None
	tsfield = AccessSession.update_timestamp
	if request.matchdict and ('traverse' in request.matchdict):
		tr = request.matchdict.get('traverse')
		if len(tr) > 3:
			eid = int(tr[2])
			ent = sess.query(AccessEntity).get(eid)
			if (not ent) or (ent.stash != ctx):
				raise HTTPForbidden()
			entity_name = ent.nick
			ent_ids = (eid,)
			if tr[3] == 'past':
				cls = AccessSessionHistory
				cls_name = _('Past Sessions')
				show_active = False
				tsfield = AccessSessionHistory.end_timestamp
	if not len(ent_ids):
		ent_ids = [e.id for e in ctx.access_entities]
	if ts_from:
		try:
			ts_from = dparse(ts_from)
		except ValueError:
			ts_from = None
	else:
		ts_from = None
	if ts_to:
		try:
			ts_to = dparse(ts_to)
		except ValueError:
			ts_to = None
	else:
		ts_to = None
	if ts_from is None:
		ts_from = request.session.get('sessions_ts_from')
	if ts_to is None:
		ts_to = request.session.get('sessions_ts_to')
	if ts_from is None:
		ts_from = ts_now.replace(day=1, hour=0, minute=0, second=0, microsecond=0)
	if ts_to is None:
		ts_to = ts_from\
			.replace(hour=23, minute=59, second=59, microsecond=999999)\
			+ relativedelta(months=1, days=-1)
	request.session['sessions_ts_from'] = ts_from
	request.session['sessions_ts_to'] = ts_to

	total = sess.query(func.count('*')).select_from(cls)\
		.filter(
			cls.entity_id.in_(ent_ids),
			tsfield.between(ts_from, ts_to)
		)\
		.scalar()
	max_page = int(math.ceil(total / per_page))
	if max_page <= 0:
		max_page = 1
	if page <= 0:
		page = 1
	elif page > max_page:
		page = max_page
	sessions = sess.query(cls)\
		.filter(
			cls.entity_id.in_(ent_ids),
			tsfield.between(ts_from, ts_to)
		)\
		.order_by(tsfield.desc())
	if total > per_page:
		sessions = sessions\
			.offset((page - 1) * per_page)\
			.limit(per_page)

	crumbs = [{
		'text' : loc.translate(_st('My Accounts')),
		'url'  : request.route_url('stashes.cl.accounts', traverse=())
	}, {
		'text' : ctx.name,
		'url'  : request.route_url('stashes.cl.accounts', traverse=(ctx.id,))
	}]
	if entity_name:
		crumbs.append({ 'text' : entity_name })
	crumbs.append({ 'text' : loc.translate(cls_name) })
	tpldef = {
		'ts_from'  : ts_from,
		'ts_to'    : ts_to,
		'ename'    : entity_name,
		'active'   : show_active,
		'page'     : page,
		'perpage'  : per_page,
		'maxpage'  : max_page,
		'sessions' : sessions.all(),
		'crumbs'   : crumbs
	}

	request.run_hook('access.cl.tpldef', tpldef, request)
	request.run_hook('access.cl.tpldef.accounts.sessions', tpldef, request)
	return tpldef
Example #30
0
                   X[:, 1],
                   X[:, 2],
                   color=colors[(sum_labels + 1) // 2])
        ax.set_xlabel(pca1)
        ax.set_ylabel(pca2)
        ax.set_zlabel(pca3)
        ax.set_xlim(-10, 10)
        ax.set_ylim(-10, 10)
        ax.set_zlim(-10, 10)

        plt.savefig(f"./result/{fname[:-5]}_fig.png", dpi=300)
        plt.close()
        with open('./json/' + fname, "r", encoding="utf-8-sig") as json_file:
            json_data = json.load(json_file)
            mes_date = json_data['mes_datetime']
            result_arr.append([dparse(mes_date), result_label])
        print(f"=====Complete {i+1} files in {len(json_files)}=====")
        print("")

        json_file.close()
        result_dict = dict()
        result_dict['selelcted_feautures'] = str([pca1, pca2, pca3])
        result_dict['classes'] = sum_labels[-features_n.shape[0]:].tolist()
        if result_label >= 0.5:
            result_dict['result'] = 'normal'
        else:
            result_dict['result'] = 'abnormal'

        with open('./result/' + fname[:-5] + '_result.json', "w") as json_file:
            json.dump(result_dict, json_file, indent=4)
    result_arr.sort()
Example #31
0
 def _clean_deadline(deadline):
     if isinstance(deadline, str):
         return dparse(deadline)
     return deadline
Example #32
0
# Status,url,Country,Date,treaty
# In Force,http://www.wipo.int/treaties/en/Remarks.jsp?cnty_id=903C,Albania,"March 6, 1994",Berne Convention

treatytype, created = RegionType.objects.get_or_create(name="Treaty")

with transaction.commit_on_success():
   for line in reader:
      treaty, created = Region.objects.get_or_create(name=line[4], type=treatytype)

      if created:
         print "Created treaty %s" % treaty.name

      if not line[2].strip(): continue
      try:	signatory = Region.objects.get(name=line[2])
      except:
         try:	signatory = Region.objects.get(shortname=line[2])
         except:
            print "Failed to recognize signatory '%s'" % line[2]
            continue

      # print signatory
      if line[0]:
         memrel, created = RegionMembership.objects.get_or_create(region=treaty, member=signatory)
         memrel.type = line[0]
         if line[3].strip():
             memrel.start = dparse(line[3])
         memrel.save()
         if created:
            print "Added %s to treaty %s" % (signatory, treaty.name)
Example #33
0
def datetime_from_str(s):
    if s is None:
        return None
    if not isinstance(s, datetime):
        return dparse(s).replace(tzinfo=None)
    return s
Example #34
0
             desc.append(u"Chapter: %s" % line[0])
         if line[5]:
             desc.append(u"Treaty(PDF): %s" % line[5])
         treaty.description='\n\n'.join(desc)
         treaty.save()

      line[2]=line[2].decode('utf8')
      if line[2][0]=='[': line[2]=line[2][1:]
      try:	signatory = Region.objects.get(name=line[2])
      except:
         try:	signatory = Region.objects.get(shortname=line[2])
         except:
            print (u"Failed to recognize signatory '%s'" % line[2]).encode('utf8')
            continue

      # print signatory
      if line[4].strip():
         memrel, created = RegionMembership.objects.get_or_create(region=treaty, member=signatory)
         memrel.type = 'Ratified'
         try:
             date=line[4].strip()
             if date[0]=='[' and date[-1]==']': date=date[1:-1]
             date=' '.join(date.split(' ')[:3])
             memrel.start = dparse(date,fuzzy=True)
         except:
             print line[4]
             raise
         memrel.save()
         if created:
            print (u"Added %s to treaty %s" % (signatory, treaty.name)).encode('utf8')
Example #35
0
def dyn_ticket_sched_find(params, request):
    if 'date' not in params:
        raise ValueError('No date given')
    dur = 0
    tkt = None
    tpl = None
    sess = DBSession()
    if params.get('ticketid'):
        tkt = sess.query(Ticket).get(int(params['ticketid']))
        if not tkt:
            raise KeyError('No matching ticket found')
        dur = tkt.duration
    elif params.get('tstid'):
        tst = sess.query(TicketState).get(int(params['tstid']))
        if not tst:
            raise KeyError('No matching ticket state found')
        dur = tst.duration
    elif params.get('ttplid'):
        tpl = sess.query(TicketTemplate).get(int(params['ttplid']))
        if not tpl:
            raise KeyError('No matching ticket template found')
        dur = tpl.duration
    else:
        raise ValueError('No ticket or ticket state ID given')
    p_dt = dparse(params['date'])
    from_dt = dt.datetime(p_dt.year, p_dt.month, p_dt.day, 0, 0, 0)
    to_dt = dt.datetime(p_dt.year, p_dt.month, p_dt.day, 23, 59, 59)
    sched = []
    if params.get('tschedid'):
        xs = sess.query(TicketScheduler).get(int(params['tschedid']))
        if xs:
            sched.append(xs)
    if params.get('xtschedid'):
        xs = sess.query(TicketScheduler).get(int(params['xtschedid']))
        if xs:
            sched.append(xs)
    if tpl and tpl.scheduler:
        sched.append(tpl.scheduler)
    user = None
    group = None
    numdates = int(params.get('numdates', 5))
    if 'uid' in params:
        user = sess.query(User).get(int(params['uid']))
    elif tpl:
        if tpl.assign_to_self:
            user = request.user
        elif tpl.assign_to_user:
            user = tpl.assign_to_user
    if user and user.schedule_map:
        sched.append(user.schedule_map.scheduler)
    if 'gid' in params:
        group = sess.query(Group).get(int(params['gid']))
    elif tpl:
        if tpl.assign_to_own_group:
            group = request.user.group
        elif tpl.assign_to_group:
            group = tpl.assign_to_group
    if group and group.schedule_map:
        sched.append(group.schedule_map.scheduler)
    dates = TicketScheduler.find_schedule(tkt,
                                          sched,
                                          from_dt,
                                          to_dt,
                                          user,
                                          group,
                                          max_dates=numdates,
                                          duration=dur)
    return {'success': True, 'dates': dates}
Example #36
0
def str_to_date(_value):
    try:
        dparse(_value)
        return True
    except (ValueError, OverflowError):
        pass
Example #37
0
def otrfp(msg, address=None, host=None):
    sender=collapse_rfc2231_value(msg['from'])
    m=sendere.match(sender)
    res={}
    if m:
        res['sender_name'], res['sender_mail']=m.groups()
    else:
        res['sender_mail']=sender

    for mpart in msg.walk():
        part=to_message(mpart)
        # cut of preamble
        inblock=False
        lines=part.get_payload(decode=True).split('\n')
        i=0
        #logging.info(lines)
        while i<len(lines):
            if not inblock:
                if lines[i].strip()=='-----BEGIN PGP SIGNED MESSAGE-----' or lines[i].strip()=='-----BEGIN PGP MESSAGE-----':
                    inblock=True
                    i+=2
            else:
                if lines[i].strip()=='-----END PGP SIGNATURE-----' or lines[i].strip()=='-----END PGP MESSAGE-----':
                    break
            i+=1
        #logging.info(i)
        if i<len(lines):
            res.update(getpgpmeta(part.get_payload(decode=True)))
            ret=gpg('-d',
                    _ok_code=[0,2],
                    _in=part.get_payload(decode=True))
            #logging.info('ret '+str(ret))
            #logging.info('stderr '+ret.stderr)
            res['msg']='\n'.join(["> %s" % x for x in ret.stdout.split('\n')])
            # extra points,
            #   - no named recipient
            #   - signed
            #logging.info(res['keys'])
            modifiers={'sekrit': False, 'signed': False}
            if len([x for x in res['keys'] if x['key_id']!="0000000000000000"])==0:
                modifiers['sekrit']=True
            else: 
                logging.warn([x for x in res['keys'] if x['key_id']!="0000000000000000"])
            signed={}
            for line in ret.stderr.split('\n'):
                if line.startswith('gpg: Signature made '):
                    # gpg: Signature made Fri 11 May 2012 04:43:04 PM CEST using RSA key ID XXXXXX
                    m=signed1re.match(line)
                    if m:
                        #logging.info(m.groups())
                        signed['date']=dparse(str(m.group(1)))
                        signed['algo']=m.group(2)
                        signed['key_id']=m.group(3)
                elif line.startswith('gpg: Good signature from '):
                    # gpg: Good signature from "name <mail>"
                    m=signed2re.match(line)
                    if m:
                        #logging.info(m.groups())
                        signed['name']=m.group(1)
                        signed['mail']=m.group(2)
                    modifiers['signed']=True
            if not signed:
                plssign = view.respond(res, "plssign.msg",
                                       From=sendermail,
                                       To=sender,
                                       Subject="OTR fingerprint help")
                relay.deliver(plssign)
                continue
            res['signed']=signed
            res['award']=award("you bootstrapped OTR trust using PGP.\n%s" % '\n'.join(["%s [%s]" % (k,'X' if v else ' ') for k,v in modifiers.items()]))
            #logging.info(res)
            jid=None
            fp=None
            secret=None
            for line in to_message(from_string(ret.stdout)).get_payload(decode=True).split('\n'):
                if not line.strip(): continue
                if line=='-- ': break
                if jid and fp:
                    secret=line
                    break
                #logging.info("line "+line)
                m=otrfpre.match(line)
                if m:
                    #logging.info(m.groups())
                    jid, fp = m.group(1), m.group(2)
            if jid and fp:
                with FileLock('%s/otr/otr/%s.fpr' % (basepath, botjid)):
                    fr=open('%s/otr/otr/%s.fpr' % (basepath, botjid), 'r')
                    fw=open('%s/otr/otr/%s.fpr.new' % (basepath, botjid), 'w')
                    for line in fr:
                        #logging.info(line)
                        #logging.info("%s\t%s\tjabber\t%s" % (jid,
                        #                              botjid,
                        #                              fp.lower().replace(' ','')))
                        if line.startswith("%s\t%s\tjabber\t%s" % (jid,
                                                                   botjid,
                                                                   fp.lower().replace(' ',''))):
                            fw.write("%s\t%s\tjabber\t%s\ttrust\n" % (jid,
                                                                    botjid,
                                                                    fp.lower().replace(' ','')))
                        else:
                            fw.write(line)
                    fw.close()
                    fr.close()
                    os.unlink('%s/otr/otr/%s.fpr' % (basepath, botjid))
                    shutil.move('%s/otr/otr/%s.fpr.new' % (basepath, botjid),
                                '%s/otr/otr/%s.fpr' % (basepath, botjid))
            if secret:
                fs=open('%s/otr/otr/%s.s' % (basepath, jid), 'w')
                fs.write("%s %s" % (signed['key_id'], secret))
                fs.close()
            welcome = view.respond(res, "otrtrust.msg",
                           From=sendermail,
                           To=sender,
                           Subject="OTR fingerprint received")
            relay.deliver(welcome)
Example #38
0
def parse_date(date):
    if not isinstance(date, datetime.date):
        date = str(date)
        return dparse(date)
    else:
        return date
Example #39
0
def client_sessions(ctx, request):
    loc = request.localizer
    page = int(request.params.get('page', 1))
    # FIXME: make per_page configurable
    per_page = 30
    ts_from = request.params.get('from')
    ts_to = request.params.get('to')
    ts_now = dt.datetime.now()
    sess = DBSession()
    ent_ids = tuple()
    cls = AccessSession
    cls_name = _('Active Sessions')
    show_active = True
    entity_name = None
    tsfield = AccessSession.update_timestamp
    if request.matchdict and ('traverse' in request.matchdict):
        tr = request.matchdict.get('traverse')
        if len(tr) > 3:
            eid = int(tr[2])
            ent = sess.query(AccessEntity).get(eid)
            if (not ent) or (ent.stash != ctx):
                raise HTTPForbidden()
            entity_name = ent.nick
            ent_ids = (eid, )
            if tr[3] == 'past':
                cls = AccessSessionHistory
                cls_name = _('Past Sessions')
                show_active = False
                tsfield = AccessSessionHistory.end_timestamp
    if not len(ent_ids):
        ent_ids = [e.id for e in ctx.access_entities]
    if ts_from:
        try:
            ts_from = dparse(ts_from)
        except ValueError:
            ts_from = None
    else:
        ts_from = None
    if ts_to:
        try:
            ts_to = dparse(ts_to)
        except ValueError:
            ts_to = None
    else:
        ts_to = None
    if ts_from is None:
        ts_from = request.session.get('sessions_ts_from')
    if ts_to is None:
        ts_to = request.session.get('sessions_ts_to')
    if ts_from is None:
        ts_from = ts_now.replace(day=1,
                                 hour=0,
                                 minute=0,
                                 second=0,
                                 microsecond=0)
    if ts_to is None:
        ts_to = ts_from\
         .replace(hour=23, minute=59, second=59, microsecond=999999)\
         + relativedelta(months=1, days=-1)
    request.session['sessions_ts_from'] = ts_from
    request.session['sessions_ts_to'] = ts_to

    total = sess.query(func.count('*')).select_from(cls)\
     .filter(
      cls.entity_id.in_(ent_ids),
      tsfield.between(ts_from, ts_to)
     )\
     .scalar()
    max_page = int(math.ceil(total / per_page))
    if max_page <= 0:
        max_page = 1
    if page <= 0:
        page = 1
    elif page > max_page:
        page = max_page
    sessions = sess.query(cls)\
     .filter(
      cls.entity_id.in_(ent_ids),
      tsfield.between(ts_from, ts_to)
     )\
     .order_by(tsfield.desc())
    if total > per_page:
        sessions = sessions\
         .offset((page - 1) * per_page)\
         .limit(per_page)

    crumbs = [{
        'text': loc.translate(_st('My Accounts')),
        'url': request.route_url('stashes.cl.accounts', traverse=())
    }, {
        'text':
        ctx.name,
        'url':
        request.route_url('stashes.cl.accounts', traverse=(ctx.id, ))
    }]
    if entity_name:
        crumbs.append({'text': entity_name})
    crumbs.append({'text': loc.translate(cls_name)})
    tpldef = {
        'ts_from': ts_from,
        'ts_to': ts_to,
        'ename': entity_name,
        'active': show_active,
        'page': page,
        'perpage': per_page,
        'maxpage': max_page,
        'sessions': sessions.all(),
        'crumbs': crumbs
    }

    request.run_hook('access.cl.tpldef', tpldef, request)
    request.run_hook('access.cl.tpldef.accounts.sessions', tpldef, request)
    return tpldef
Example #40
0
def sync_for(user):
    if is_dev(): return
    if not (user.habitica_user_id and user.habitica_api_token):
        return
    # https://habitica.com/apidoc/#api-Task-GetUserTasks
    logger.info("Calling Habitica")
    headers = {
        "Content-Type": "application/json",
        "x-api-user": user.habitica_user_id,
        "x-api-key": user.habitica_api_token,
        "x-client": f"{vars.HABIT_USER}-{vars.HABIT_APP}"
    }
    tasks = requests.get('https://habitica.com/api/v3/tasks/user',
                         headers=headers).json()['data']
    huser = requests.get(
        'https://habitica.com/api/v3/user?userFields=lastCron,needsCron',
        headers=headers).json()['data']

    lastCron = dparse(huser['lastCron'])
    logger.info("Habitica finished")

    fes = M.FieldEntry.get_day_entries(user.id, day=lastCron).all()

    f_map = {f.service_id: f for f in user.fields}
    fe_map = {fe.field_id: fe for fe in fes}
    t_map = {task['id']: task for task in tasks}

    # Remove Habitica-deleted tasks
    for f in user.fields:
        if f.service != 'habitica': continue
        if f.service_id not in t_map:
            db.session.delete(f)
    db.session.commit()

    # Add/update tasks from Habitica
    for task in tasks:
        # {id, text, type, value}
        # habit: {counterUp, counterDown}
        # daily:{checklist: [{completed}], completed, isDue}

        # only care about habits/dailies
        if task['type'] not in ['habit', 'daily']: continue

        f = f_map.get(task['id'], None)
        if not f:
            # Field doesn't exist here yet, add it.
            # TODO delete things here if deleted in habitica
            f = M.Field(service='habitica',
                        service_id=task['id'],
                        name=task['text'],
                        type='number')
            user.fields.append(f)
        # Text has changed on Habitica, update here
        if f.name != task['text']:
            f.name = task['text']

        db.session.commit()  # for f to have f.id

        value = 0.
        # Habit
        if task['type'] == 'habit':
            value = (task['counterUp'] or 0.) - (task['counterDown'] or 0.)
        # Daily
        else:
            value = 1. if task['completed'] \
                else 0. if not task['isDue'] \
                else -1.

            # With Checklist
            cl = task['checklist']
            if (not task['completed']) and any(c['completed'] for c in cl):
                value = sum(c['completed'] for c in cl) / len(cl)

        fe = fe_map.get(f.id, None)
        if fe:
            fe.value = value
        else:
            fe = M.FieldEntry(field_id=f.id, created_at=lastCron, value=value)
            user.field_entries.append(fe)
        db.session.commit()
        logger.info(task['text'] + " done")
Example #41
0
cmap=dict([(''.join(reg.shortname.lower().split()), reg)
           for reg in Region.objects.all()])
countries=cmap.keys()

with transaction.commit_on_success():
   for item in reversed(f['entries']):
      ts=[]
      cs=[]
      for tag in item.get('tags',[]):
         if tag.get('term','').lower() in countries:
            cs.append(tag.get('term'))
         if tag.get('term','').lower() in topics:
            ts.append(tag.get('term'))
      if ts:
         newsitem, created = NewsItem.objects.get_or_create(headline=item.get('title'),
                                                            timestamp_submitted=dparse(item.get('updated')),
                                                            url=item['links'][0]['href'])
         if created:
            print 'added', newsitem.headline.encode('utf8')
            newsitem.text=''.join([x.value for x in item.content])
            newsitem.save()
            # add tags from bookmark
            for tag in item.get('tags',[]):
               if tag in cs or tag in ts: continue
               t=Tag.objects.get_or_create(name=tag['term'])[0]
               EntityTag.objects.get_or_create(entity=newsitem, tag=t)
            # link topics to newsitem
            for t in ts:
               if not t.strip(): continue
               newsitem.itemref.add(tmap[t.lower()])
            # link countries to newsitem
Example #42
0
    def parse(self):
        """
        Parse the table
        :return:
        """
        # TODO: Some optimisation, this is really inefficient, but it works
        logger.info("parsing for all PM's from %s ", self.url)
        for _row_num, row in enumerate(self.rows):
            sup = row.find(["sup"])
            if sup:
                sup.decompose()
            row_items = row.findAll(["td", "th"])
            for cell_index, cell in enumerate(row_items):

                # get the rowspams and/or colspans,
                # default to 1 if neither exists
                col_span = int(cell.get("colspan", 1))
                row_span = int(cell.get("rowspan", 1))
                a_tag = cell.find("a")

                href = None if not a_tag else a_tag["href"].replace(",", "¬")
                l = 0
                for rs in range(row_span):
                    # Go to the first empty cell
                    while self.matrix[_row_num + rs][cell_index + l]:
                        l += 1
                    for cs in range(col_span):
                        cell_n = cell_index + l + cs
                        row_n = _row_num + rs
                        # in some cases the colspan can overflow the table, in
                        # those cases just get the last item
                        cell_n = min(cell_n, len(self.matrix[row_n]) - 1)

                        # using † as a separator of tags because whitespace won't work as there
                        # is whitespace in a number of cells
                        _text = cell.get_text(
                            strip=True, separator="†"
                        ).replace("&nbsp;", " ")

                        text = _text if not a_tag else f"{_text}|{href}"
                        # inserting a character as we are
                        # only filling empty un accounted for cells with data
                        text = text if len(text) else text + "?"

                        self.matrix[row_n][cell_n] += text

        # remove cols not needed.
        self.delete_cols([8, 10])

        rows_to_keep = []

        for index, row in enumerate(self.matrix):
            from_date = row[3].replace("†", " ")
            to_date = row[4].replace("†", " ")
            to_delete = True
            if self.is_date(from_date) and self.is_date(to_date):
                row[3] = dparse(from_date)
                row[4] = dparse(to_date)
                to_delete = False

            elif self.is_date(from_date) and "incumbent" in to_date.lower():
                row[3] = from_date
                row[4] = "incumbent"
                to_delete = False

            if not to_delete:
                rows_to_keep.append(index)
                pm_details = row[2]
                details, pm_wiki_path = pm_details.split("|")
                pm_wiki_path = pm_wiki_path.replace("¬", ",").strip()
                details = details.split("†")
                name = ""
                title = ""
                if details:
                    if details[0].lower() == "sir":
                        name = f"{details.pop(1)}"
                    else:
                        name = f"{details.pop(0)}"

                    # last item may be title years of birth/death, or MP for
                    # constituency
                    title = details.pop(0)
                    year_regex = r"\(?(\d{4}|born|MP for)(\D\d{4}\)?)?"
                    if bool(re.search(year_regex, title)):
                        title = ""

                party, _ = row[7].split("|")
                party = party.split("†")[0]

                row[0] = name.strip()
                row[1] = title.strip()
                row[2] = pm_wiki_path
                row[5] = party.strip()
                _pm_info_cache = self.pm_info.get(pm_wiki_path, {})
                if not _pm_info_cache:
                    pm_info = PrimeMinister(
                        host=self.host,
                        path=pm_wiki_path,
                        x=0,
                        html_classname="infobox vcard",
                    )
                    _pm_info_cache = {
                        "dob": pm_info.birthday,
                        "death": pm_info.died,
                        "monarchs": pm_info.monarchs,
                    }
                    self.pm_info[pm_wiki_path] = _pm_info_cache

                row[-2] = _pm_info_cache["dob"]
                row[-1] = _pm_info_cache["death"]
                row[6] = "†".join([x for x in _pm_info_cache["monarchs"]])

        self.delete_rows(
            [
                y
                for y in [x for x in range(len(self.matrix))]
                if y not in rows_to_keep
            ]
        )

        self.distinct_rows(inplace=True)

        self.sort_by_col(3, True)
        ids = np.array([[j] for j in range(1, self.num_rows + 1)])
        self.add_cols(new_cols=ids)
Example #43
0
def client_ops(ctx, request):
    loc = request.localizer
    page = int(request.params.get('page', 1))
    # FIXME: make per_page configurable
    per_page = 30
    ts_from = request.params.get('from')
    ts_to = request.params.get('to')
    ts_now = dt.datetime.now()
    sname = None
    stash_ids = tuple()
    if ts_from:
        try:
            ts_from = dparse(ts_from)
        except ValueError:
            ts_from = None
    else:
        ts_from = None
    if ts_to:
        try:
            ts_to = dparse(ts_to)
        except ValueError:
            ts_to = None
    else:
        ts_to = None
    if ts_from is None:
        ts_from = request.session.get('ops_ts_from')
    if ts_to is None:
        ts_to = request.session.get('ops_ts_to')
    if ts_from is None:
        ts_from = ts_now.replace(day=1,
                                 hour=0,
                                 minute=0,
                                 second=0,
                                 microsecond=0)
    if ts_to is None:
        ts_to = ts_from\
         .replace(hour=23, minute=59, second=59, microsecond=999999)\
         + relativedelta(months=1, days=-1)
    request.session['ops_ts_from'] = ts_from
    request.session['ops_ts_to'] = ts_to
    sess = DBSession()
    ent = request.user.parent
    if isinstance(ctx, Stash):
        stash_ids = (ctx.id, )
        sname = ctx.name
    else:
        stash_ids = [s.id for s in ent.stashes]
    total = sess.query(func.count('*')).select_from(StashIO)\
     .filter(
      StashIO.stash_id.in_(stash_ids),
      StashIO.timestamp.between(ts_from, ts_to)
     )\
     .scalar()
    max_page = int(math.ceil(total / per_page))
    if max_page <= 0:
        max_page = 1
    if page <= 0:
        page = 1
    elif page > max_page:
        page = max_page
    ios = sess.query(StashIO)\
     .filter(
      StashIO.stash_id.in_(stash_ids),
      StashIO.timestamp.between(ts_from, ts_to)
     )\
     .order_by(StashIO.timestamp.desc())
    if total > per_page:
        ios = ios\
        .offset((page - 1) * per_page)\
        .limit(per_page)

    crumbs = [{
        'text': loc.translate(_('My Accounts')),
        'url': request.route_url('stashes.cl.accounts', traverse=())
    }]
    if sname:
        crumbs.append({
            'text':
            sname,
            'url':
            request.route_url('stashes.cl.accounts', traverse=(ctx.id, ))
        })
    crumbs.append({'text': loc.translate(_('Account Operations'))})
    tpldef = {
        'ts_from': ts_from,
        'ts_to': ts_to,
        'sname': sname,
        'page': page,
        'perpage': per_page,
        'maxpage': max_page,
        'ios': ios.all(),
        'crumbs': crumbs
    }

    request.run_hook('access.cl.tpldef', tpldef, request)
    request.run_hook('access.cl.tpldef.accounts.ops', tpldef, request)
    return tpldef
Example #44
0
def client_ops(ctx, request):
    loc = get_localizer(request)
    page = int(request.params.get("page", 1))
    # FIXME: make per_page configurable
    per_page = 30
    ts_from = request.params.get("from")
    ts_to = request.params.get("to")
    ts_now = dt.datetime.now()
    sname = None
    stash_ids = tuple()
    if ts_from:
        try:
            ts_from = dparse(ts_from)
        except ValueError:
            ts_from = None
    else:
        ts_from = None
    if ts_to:
        try:
            ts_to = dparse(ts_to)
        except ValueError:
            ts_to = None
    else:
        ts_to = None
    if ts_from is None:
        ts_from = request.session.get("ops_ts_from")
    if ts_to is None:
        ts_to = request.session.get("ops_ts_to")
    if ts_from is None:
        ts_from = ts_now.replace(day=1, hour=0, minute=0, second=0, microsecond=0)
    if ts_to is None:
        ts_to = ts_from.replace(hour=23, minute=59, second=59, microsecond=999999) + relativedelta(months=1, days=-1)
    request.session["ops_ts_from"] = ts_from
    request.session["ops_ts_to"] = ts_to
    sess = DBSession()
    ent = request.user.parent
    if isinstance(ctx, Stash):
        stash_ids = (ctx.id,)
        sname = ctx.name
    else:
        stash_ids = [s.id for s in ent.stashes]
    total = (
        sess.query(func.count("*"))
        .select_from(StashIO)
        .filter(StashIO.stash_id.in_(stash_ids), StashIO.timestamp.between(ts_from, ts_to))
        .scalar()
    )
    max_page = int(math.ceil(total / per_page))
    if max_page <= 0:
        max_page = 1
    if page <= 0:
        page = 1
    elif page > max_page:
        page = max_page
    ios = (
        sess.query(StashIO)
        .filter(StashIO.stash_id.in_(stash_ids), StashIO.timestamp.between(ts_from, ts_to))
        .order_by(StashIO.timestamp.desc())
    )
    if total > per_page:
        ios = ios.offset((page - 1) * per_page).limit(per_page)

    crumbs = [{"text": loc.translate(_("My Accounts")), "url": request.route_url("stashes.cl.accounts", traverse=())}]
    if sname:
        crumbs.append({"text": sname, "url": request.route_url("stashes.cl.accounts", traverse=(ctx.id,))})
    crumbs.append({"text": loc.translate(_("Account Operations"))})
    tpldef = {
        "ts_from": ts_from,
        "ts_to": ts_to,
        "sname": sname,
        "page": page,
        "perpage": per_page,
        "maxpage": max_page,
        "ios": ios.all(),
        "crumbs": crumbs,
    }

    request.run_hook("access.cl.tpldef", tpldef, request)
    request.run_hook("access.cl.tpldef.accounts.ops", tpldef, request)
    return tpldef
Example #45
0
def dyn_ticket_sched_find(params, request):
	if 'date' not in params:
		raise ValueError('No date given')
	dur = 0
	tkt = None
	tpl = None
	sess = DBSession()
	if params.get('ticketid'):
		tkt = sess.query(Ticket).get(int(params['ticketid']))
		if not tkt:
			raise KeyError('No matching ticket found')
		dur = tkt.duration
	elif params.get('tstid'):
		tst = sess.query(TicketState).get(int(params['tstid']))
		if not tst:
			raise KeyError('No matching ticket state found')
		dur = tst.duration
	elif params.get('ttplid'):
		tpl = sess.query(TicketTemplate).get(int(params['ttplid']))
		if not tpl:
			raise KeyError('No matching ticket template found')
		dur = tpl.duration
	else:
		raise ValueError('No ticket or ticket state ID given')
	p_dt = dparse(params['date'])
	from_dt = dt.datetime(p_dt.year, p_dt.month, p_dt.day, 0, 0, 0)
	to_dt = dt.datetime(p_dt.year, p_dt.month, p_dt.day, 23, 59, 59)
	sched = []
	if params.get('tschedid'):
		xs = sess.query(TicketScheduler).get(int(params['tschedid']))
		if xs:
			sched.append(xs)
	if params.get('xtschedid'):
		xs = sess.query(TicketScheduler).get(int(params['xtschedid']))
		if xs:
			sched.append(xs)
	if tpl and tpl.scheduler:
		sched.append(tpl.scheduler)
	user = None
	group = None
	numdates = int(params.get('numdates', 5))
	if 'uid' in params:
		user = sess.query(User).get(int(params['uid']))
	elif tpl:
		if tpl.assign_to_self:
			user = request.user
		elif tpl.assign_to_user:
			user = tpl.assign_to_user
	if user and user.schedule_map:
		sched.append(user.schedule_map.scheduler)
	if 'gid' in params:
		group = sess.query(Group).get(int(params['gid']))
	elif tpl:
		if tpl.assign_to_own_group:
			group = request.user.group
		elif tpl.assign_to_group:
			group = tpl.assign_to_group
	if group and group.schedule_map:
		sched.append(group.schedule_map.scheduler)
	dates = TicketScheduler.find_schedule(tkt, sched, from_dt, to_dt, user, group, max_dates=numdates, duration=dur)
	return {
		'success' : True,
		'dates'   : dates
	}
Example #46
0
def client_ops(ctx, request):
	loc = get_localizer(request)
	page = int(request.params.get('page', 1))
	# FIXME: make per_page configurable
	per_page = 30
	ts_from = request.params.get('from')
	ts_to = request.params.get('to')
	ts_now = dt.datetime.now()
	sname = None
	stash_ids = tuple()
	if ts_from:
		try:
			ts_from = dparse(ts_from)
		except ValueError:
			ts_from = None
	else:
		ts_from = None
	if ts_to:
		try:
			ts_to = dparse(ts_to)
		except ValueError:
			ts_to = None
	else:
		ts_to = None
	if ts_from is None:
		ts_from = request.session.get('ops_ts_from')
	if ts_to is None:
		ts_to = request.session.get('ops_ts_to')
	if ts_from is None:
		ts_from = ts_now.replace(day=1, hour=0, minute=0, second=0, microsecond=0)
	if ts_to is None:
		ts_to = ts_from\
			.replace(hour=23, minute=59, second=59, microsecond=999999)\
			+ relativedelta(months=1, days=-1)
	request.session['ops_ts_from'] = ts_from
	request.session['ops_ts_to'] = ts_to
	sess = DBSession()
	ent = request.user.parent
	if isinstance(ctx, Stash):
		stash_ids = (ctx.id,)
		sname = ctx.name
	else:
		stash_ids = [s.id for s in ent.stashes]
	total = sess.query(func.count('*')).select_from(StashIO)\
		.filter(
			StashIO.stash_id.in_(stash_ids),
			StashIO.timestamp.between(ts_from, ts_to)
		)\
		.scalar()
	max_page = int(math.ceil(total / per_page))
	if max_page <= 0:
		max_page = 1
	if page <= 0:
		page = 1
	elif page > max_page:
		page = max_page
	ios = sess.query(StashIO)\
		.filter(
			StashIO.stash_id.in_(stash_ids),
			StashIO.timestamp.between(ts_from, ts_to)
		)\
		.order_by(StashIO.timestamp.desc())
	if total > per_page:
		ios = ios\
		.offset((page - 1) * per_page)\
		.limit(per_page)

	crumbs = [{
		'text' : loc.translate(_('My Accounts')),
		'url'  : request.route_url('stashes.cl.accounts', traverse=())
	}]
	if sname:
		crumbs.append({
			'text' : sname,
			'url'  : request.route_url('stashes.cl.accounts', traverse=(ctx.id,))
		})
	crumbs.append({ 'text' : loc.translate(_('Account Operations')) })
	tpldef = {
		'ts_from' : ts_from,
		'ts_to'   : ts_to,
		'sname'   : sname,
		'page'    : page,
		'perpage' : per_page,
		'maxpage' : max_page,
		'ios'     : ios.all(),
		'crumbs'  : crumbs
	}

	request.run_hook('access.cl.tpldef', tpldef, request)
	request.run_hook('access.cl.tpldef.accounts.ops', tpldef, request)
	return tpldef
Example #47
0
# Country,Ratification,treaty,Entry,Signature,Note,Territorial Application,Declarations,Reservations,Communication,Authorities,Denunciation,Effect denunciation,Objection
# Slovakia,18/3/1992,Convention for the Protection of Human Rights and Fundamental Freedoms,1/1/1993,21/2/1991,17,,X,X,,,,,

treatytype, created = RegionType.objects.get_or_create(name="Treaty")

with transaction.commit_on_success():
   for line in reader:
      if '"""' in line[2]: line[2].replace('"""','"')
      treaty, created = Region.objects.get_or_create(name=line[2], type=treatytype)

      if created:
         print "Created treaty %s" % treaty.name

      try:	signatory = Region.objects.get(name=line[0])
      except:
         try:	signatory = Region.objects.get(shortname=line[0])
         except:
            print "Failed to recognize signatory '%s'" % line[0]
            continue

      # print signatory
      if line[1]:
         memrel, created = RegionMembership.objects.get_or_create(region=treaty, member=signatory)
         memrel.type = "Ratified"
         if line[1].strip():
             memrel.start = dparse(line[1].split(' ')[0])
         memrel.save()
         if created:
            print "Added %s to treaty %s" % (signatory, treaty.name)
Example #48
0
import dateutil.parser
from dateutil.parser import parse as dparse
import fitsio
import scipy.stats
from collections import OrderedDict as odict

import obztak.delve
from obztak.utils import fileio
import obztak.tactician
import obztak.survey
from obztak.utils import fileio

# CTIO midpoint varies from 04:40 to 05:00 UTC over the course of the year.

SEMESTERS = odict([
    ('2019A',(dparse('2019-02-02T04:50UTC'),dparse('2019-08-01T04:50UTC'),20)),
    ('2019B',(dparse('2019-08-02T04:50UTC'),dparse('2020-02-01T04:50UTC'),21)),
    ('2020A',(dparse('2020-02-02T04:50UTC'),dparse('2020-08-01T04:50UTC'),22)),
    ('2020B',(dparse('2020-08-02T04:50UTC'),dparse('2021-02-01T04:50UTC'),21)),
    ('2021A',(dparse('2021-02-02T04:50UTC'),dparse('2021-08-01T04:50UTC'),22)),
    ('2021B',(dparse('2021-08-02T04:50UTC'),dparse('2022-02-01T04:50UTC'),20)),
])

def get_semester(date):
    for key,(start,stop,nights) in SEMESTERS.items():
        if (date >= start) and (date <= stop):
            return key
    raise ValueError(str(date))

def choose_2019A(data):
    nights = [
Example #49
0
def choose_2019A(data):
    nights = [
        [dparse('2019-02-07T04:50UTC'), 'second'],
        [dparse('2019-02-08T04:50UTC'), 'second'],
        [dparse('2019-02-09T04:50UTC'), 'second'],
        [dparse('2019-02-12T04:50UTC'), 'full  '],
        [dparse('2019-02-13T04:50UTC'), 'full  '],
        [dparse('2019-02-14T04:50UTC'), 'second'],
        [dparse('2019-02-15T04:50UTC'), 'full  '],
        [dparse('2019-02-24T04:50UTC'), 'second'],
        [dparse('2019-02-25T04:50UTC'), 'second'],
        [dparse('2019-02-26T04:50UTC'), 'second'],
        [dparse('2019-02-27T04:50UTC'), 'second'],
        [dparse('2019-02-28T04:50UTC'), 'second'],
        [dparse('2019-03-01T04:50UTC'), 'second'],
        [dparse('2019-05-12T04:50UTC'), 'full  '],
        [dparse('2019-05-13T04:50UTC'), 'full  '],
        [dparse('2019-05-28T04:50UTC'), 'second'],
        [dparse('2019-05-29T04:50UTC'), 'second'],
        [dparse('2019-05-30T04:50UTC'), 'second'],
        [dparse('2019-05-31T04:50UTC'), 'second'],
        [dparse('2019-06-01T04:50UTC'), 'second'],
        [dparse('2019-06-02T04:50UTC'), 'second'],
        [dparse('2019-06-03T04:50UTC'), 'second'],
        [dparse('2019-06-04T04:50UTC'), 'second'],
        [dparse('2019-06-05T04:50UTC'), 'full  '],
        [dparse('2019-06-06T04:50UTC'), 'full  '],
        [dparse('2019-06-07T04:50UTC'), 'full  '],
        [dparse('2019-06-08T04:50UTC'), 'full  '],
        [dparse('2019-06-09T04:50UTC'), 'full  '],
        [dparse('2019-06-23T04:50UTC'), 'second'],
        [dparse('2019-06-24T04:50UTC'), 'second'],
        [dparse('2019-06-25T04:50UTC'), 'second'],
        [dparse('2019-06-26T04:50UTC'), 'second'],
        [dparse('2019-06-27T04:50UTC'), 'second'],
        [dparse('2019-06-28T04:50UTC'), 'second'],
    ]

    sel = np.in1d(data['date'],[n[0] for n in nights])
    choice = data[sel]
    for n in nights:
        choice['half'][choice['date'] == n[0]] = n[1]

    return choice
Example #50
0
 def _clean_timestamp(timestamp):
     if timestamp is None:
         return None
     if isinstance(timestamp, str):
         return dparse(timestamp)
     return timestamp