def cancel_listing_btn(self,cr,uid,ids,context={}):
        inst_lnk = self.browse(cr, uid, ids[0]).shop_id.instance_id
        site_id = self.browse(cr, uid, ids[0]).shop_id.site_id
        if site_id:
            siteid = site_id.site_id
        ebay_item_id = self.browse(cr,uid,ids[0]).name
        cancel_listing = self.browse(cr,uid,ids[0]).cancel_listing
        utc_tm = datetime.utcnow()
        utc_trunk = str(utc_tm)[:19]
        difft_time = datetime.utcnow() - datetime.now()
        if cancel_listing == True:
            ending_reason = self.browse(cr,uid,ids[0]).ending_reason
            if not ending_reason:
                self.log(cr, uid, ids[0], 'Please Select the Ending Reason \n')
                return
            try:
                if inst_lnk:
                    results = self.call(cr, uid, inst_lnk, 'EndItem',ebay_item_id,ending_reason,siteid)
                    self.log(cr, uid, ids[0], '%s has been Cancelled. \n' % (ebay_item_id))
            except Exception, e:
                raise osv.except_osv(_('Error !'),e)
                return False
            if results:
                if len(ids):
                    FMT = '%Y-%m-%d %H:%M:%S'
                    endtime = results.get('EndTime',False)
                    if endtime:
                        end_tm = self.pool.get('ebayerp.instance').openerpFormatDate(endtime)
                        endtime = datetime.strptime(end_tm, FMT) - difft_time
                        ebay_end_tm2 = str(endtime)[:19]
                        ebay_end_tm = ebay_end_tm2
#                        ebay_end_time = self.write(cr, uid, ids[0],{'ebay_end_time': ebay_end_tm,'is_cancel':True})
                        cr.execute("UPDATE product_listing SET ebay_end_time='%s',is_cancel='True' where id=%d"%(ebay_end_tm,ids[0],))
                        cr.commit()
Esempio n. 2
1
def relativeTime(date):
    pp(datetime.utcnow())
    print "-"
    pp(date)
    date = date.replace(tzinfo=None)
    diff = datetime.utcnow() - date

    if diff.days > 7 or diff.days < 0:
        return date.ctime()
    elif diff.days == 1:
        return '1 day ago'
    elif diff.days > 1:
        return '%d days ago' % diff.days
    elif diff.seconds <= 1:
        return 'just now'
    elif diff.seconds < 60:
        return '%d seconds ago' % diff.seconds
    elif diff.seconds < (60 * 2):
        return '1 minute ago'
    elif diff.seconds < (60 * 60):
        return '%d minutes ago' % (diff.seconds / 60)
    elif diff.seconds < (60 * 60 * 2):
        return '1 hour ago'
    else:
        return '%d hours ago' % (diff.seconds / (60 * 60))
Esempio n. 3
1
def H_list_upcoming(service, opts):

    if not opts.args.calId:
        opts.args.calId = 'primary'
    if not opts.args.num:
        opts.args.num = 10

    tdargs = tdkwargs(*opts.args.timeargs)

    now = datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time
    later = ( datetime.utcnow() + timedelta(**tdargs) ).isoformat() + 'Z'
    print(now, later)

    eventsResult = service.events().list(
        calendarId=opts.args.calId,
        timeMin=now, timeMax=later,
        maxResults=opts.args.num,
        singleEvents=True,
        orderBy='startTime').execute()
    events = eventsResult.get('items', [])

    if not events:
        return 1
    for event in events:
        start = event['start'].get('dateTime', event['start'].get('date'))
        print(start, event['summary'])
Esempio n. 4
1
    def _upsert_currency_to_db(cls, currency, rate):
        """
        updates currency to coefficient if exists. if not exists creates that currency
        :param currency:
        :param coefficient:
        :return:
        """
        try:
            currency = currency.upper()
            table_name, table = ServicesTables.ensure_currency_exchange_rates_table()

            query = text("select * from {0} where currency = '{1}'".format(table_name, currency))
            response_db = list(ServicesTables.server.execute(query))

            if not response_db:
                insert_obj = \
                    {
                        "currency": currency,
                        "rate": rate,
                        "date_modified": datetime.utcnow()
                    }
                table.insert().values(insert_obj).execute()
            else:
                query = text("UPDATE {0} SET rate = {1},date_modified='{3}' WHERE currency = '{2}'"
                             .format(table_name, rate, currency, datetime.utcnow()))
                ServicesTables.server.execute(query)

        except Exception, e:
            logger.exception("Cant upsert currency {0} to DB.".format(currency))
	def provide_empty_event():
		return { 'summary': '',
						'location': '',
						'description': '',
						'start': {
								'dateTime': datetime.utcnow().isoformat() + 'Z',
								'timeZone': 'Europe/Berlin',
								},
						'end': {
								'dateTime': (datetime.utcnow() + timedelta(minutes=2)).isoformat() + 'Z',
								'timeZone': 'Europe/Berlin',
							  },
						'recurrence': [ 
										'' #'RRULE:FREQ=DAILY;COUNT=2'
               		             	],
               'attendees': [
                             # {'email': '*****@*****.**'},
                            ],
               'reminders': {
                             #'useDefault': False,
                             #'overrides': [
                             #              {'method': 'email', 'minutes': 24 * 60},
                             #              {'method': 'popup', 'minutes': 10},
                             #             ],
                            }
				}
Esempio n. 6
0
def create_token(user_id):
    payload = {
        'sub': user_id,
        'iat': datetime.utcnow(),
        'exp': datetime.utcnow() + timedelta(days=14)
    }
    token = jwt.encode(payload, app.config['TOKEN_SECRET'])
    return token.decode('unicode_escape')
Esempio n. 7
0
async def on_member_update(before, after):
	notification_channel = discord.utils.find(lambda m: m.name == 'bot-notifications', after.server.channels)

	if (str(before.status) == 'offline' and str(after.status) == 'online') or ((str(before.status) == 'online' or str(before.status) == 'idle') and str(after.status) == 'offline'):
		try:
			with open('discord_logs.txt', 'r') as f:
				discord_logs = json.load(f)
		except:
			discord_logs = json.loads('{}')
		if after.id not in discord_logs:
			discord_logs[after.id] = {"last login": "******", "last logoff": "N/A"}
		now = str(datetime.utcnow())
		if str(after.status) == 'online':
			discord_logs[after.id]['last login'] = now
		if str(after.status) == 'offline':
			discord_logs[after.id]['last logoff'] = now
		with open('discord_logs.txt', 'w') as f:
			f.write(json.dumps(discord_logs))
	
	
	if (str(before.status) == 'offline' and str(after.status) == 'online') or ((str(before.status) == 'online' or str(before.status) == 'idle') and str(after.status) == 'offline'):
		try:
			with open('discord_logs.txt', 'r') as f:
				discord_logs = json.load(f)
		except:
			discord_logs = json.loads('{}')
		if after.id not in discord_logs:
			discord_logs[after.id] = {"last login": "******", "last logoff": "N/A"}
		now = str(datetime.utcnow())
		if str(after.status) == 'online':
			discord_logs[after.id]['last login'] = now
		if str(after.status) == 'offline':
			discord_logs[after.id]['last logoff'] = now
		with open('discord_logs.txt', 'w') as f:
			f.write(json.dumps(discord_logs))
	
	if str(before.status) == 'offline' and str(after.status) == 'online' and bot.check_role(client, after, "Member") == True:
			x = open("display_names.txt", 'r')
			disp_names = json.load(x)
			x.close()
			if after.id not in disp_names:
				await client.send_message(after, "My name is Xantha, the DH Discord bot. According to my records, your GW2 Display Name is not listed in our database. Please enter \n`!displayname <GW2 Display name>`\n without the <>, for example \n`!displayname Xantha.1234`\nin Discord. Be sure to use your full name, including the 4 digits at the end. If you need help, please ask an Admin.")

	leadership_names = []
	for x in after.server.members:
		if bot.check_role(client, x, 'Leadership') == True:
			leadership_names += [x.name]
	if before.name not in leadership_names and after.name in leadership_names and bot.check_role(client, after, "Leadership") == False:
		await client.send_message(after, "I have noticed that you have changed your name to the name of one of our members of leadership. Please take a moment to go to your Discord settings and change your username to something else. Failure to make this change may result in your Discord permissions being revoked.")
		await client.send_message(notification_channel, "The member with user ID {} has changed their name from {} to {}. They have been asked to change their name.".format(after.id, before.name, after.name))
	if before.name in leadership_names and after.name not in leadership_names and bot.check_role(client, after, "Leadership") == False:
		await client.send_message(after, "Thank you for changing your username.")
		await client.send_message(notification_channel, "The member with user ID {} has changed their name from {} to {}.".format(after.id, before.name, after.name))

	if str(before.status) == 'offline' and str(after.status) == 'online' and after.name == "Scottzilla":
		await client.send_message(after, ":boom: Happy birthday! :boom:")
Esempio n. 8
0
 def set_columns(self, **kwargs):
   self._changes = self._set_columns(**kwargs)
   if 'modified' in self.__table__.columns:
     self.modified = datetime.utcnow()
   if 'updated' in self.__table__.columns:
     self.updated = datetime.utcnow()
   if 'modified_at' in self.__table__.columns:
     self.modified_at = datetime.utcnow()
   if 'updated_at' in self.__table__.columns:
     self.updated_at = datetime.utcnow()
   return self._changes
def get_datestr(ui):
    import tkSimpleDialog
    from datetime import datetime
    import re
    ui.datestr = tkSimpleDialog.askstring('Flight Date','Flight Date (yyyy-mm-dd):')
    if not ui.datestr:
        ui.datestr = datetime.utcnow().strftime('%Y-%m-%d')
    else:
        while not re.match('[0-9]{4}-[0-9]{2}-[0-9]{2}',ui.datestr):
            ui.datestr = tkSimpleDialog.askstring('Flight Date',
                                                  'Bad format, please retry!\nFlight Date (yyyy-mm-dd):')
            if not ui.datestr:
                ui.datestr = datetime.utcnow().strftime('%Y-%m-%d')
    ui.ax1.set_title(ui.datestr)
Esempio n. 10
0
def wsdetailbill(request,billid):
	if request.method=='GET':
		reqbody = xmlbill(billid) 
		#url = 'http://127.0.0.1:4567/comercio1/factura'
		#r = requests.post(url,data=reqbody,headers={'content-type':'application/xml'})
		return HttpResponse(reqbody,content_type='application/xml')
	elif request.method=='POST':
		requestedbill = Bill.objects.get(id=billid)

		conttype = request.META['CONTENT_TYPE']
		if conttype == 'application/xml':
			xmlrequest = request.body
			#dtdstring = StringIO.StringIO('<!ELEMENT factura (idFactura,actores,despachos,costos,fechas,statuses)><!ELEMENT idFactura (#PCDATA)><!ELEMENT actores (emisor,pagador)><!ELEMENT emisor (rifEmisor,nombreEmisor,cuenta)><!ELEMENT rifEmisor (#PCDATA)><!ELEMENT nombreEmisor (#PCDATA)><!ELEMENT cuenta (#PCDATA)><!ELEMENT pagador (rifPagador,nombrePagador)><!ELEMENT rifPagador (#PCDATA)><!ELEMENT nombrePagador (#PCDATA)><!ELEMENT despachos (despacho+)><!ELEMENT despacho (id,productos,tracking,costo)><!ELEMENT id (#PCDATA)><!ELEMENT productos (producto+)><!ELEMENT producto (nombre,cantidad,medidas)><!ELEMENT nombre (#PCDATA)><!ELEMENT cantidad (#PCDATA)><!ELEMENT medidas (peso,largo,ancho,alto)><!ELEMENT peso (#PCDATA)><!ELEMENT largo (#PCDATA)><!ELEMENT ancho (#PCDATA)><!ELEMENT alto (#PCDATA)><!ELEMENT tracking (#PCDATA)><!ELEMENT costo (#PCDATA)><!ELEMENT costos (subtotal,impuestos,total)><!ELEMENT subtotal (#PCDATA)><!ELEMENT impuestos (#PCDATA)><!ELEMENT total (#PCDATA)><!ELEMENT fechas (fechaEmision,fechaVencimiento,fechaPago)><!ELEMENT fechaEmision (#PCDATA)><!ELEMENT fechaVencimiento (#PCDATA)><!ELEMENT fechaPago (#PCDATA)><!ELEMENT statuses (status+)><!ELEMENT status (#PCDATA)>')
			#dtd = etree.DTD(dtdstring)
			root = etree.XML(xmlrequest)
			#if dtd.validate(root):
				#associated = root[1]
				#assoc_found = Associated.objects.filter(rif=associated[0].text,assoc_name=associated[1].text)
				#if assoc_found.count() == 1:			
			xmlbillid = int(root[0].text)
			#print xmlbillid
			#print billid
			if int(billid) == xmlbillid:
				paymentcode = int(root[5][0].text)
				if paymentcode == 1000:
					msg = 'Pago aceptado'
					paystat = requestedbill.payment_status
					if paystat == '00':
						requestedbill.payment_status = '01'
						requestedbill.payment_date = datetime.utcnow().replace(tzinfo=utc)
					elif paystat == '02':
						requestedbill.payment_status = '03'
						requestedbill.payment_date = datetime.utcnow().replace(tzinfo=utc)
					requestedbill.save()
				elif paymentcode == 2000:
					msg = 'Pago rechazado'
				print msg
				return HttpResponse(status=200)
				#return HttpResponse(msg,content_type='text/plain')
			else:
				print 'El identificador de la factura no coincide'
				return HttpResponse(status=400)
				#else:
				#	print 'El comercio no se encuentra registrado'
				#	return HttpResponse(status=400)
			#else:
			#	print 'No pase la validacion contra DTD'
			#	return HttpResponse(status=400)
		else:
			return HttpResponse(status=400)
 def test_timing_and_serialization(self):
     from datetime import datetime, timedelta
     import logging
     logging.basicConfig(level=logging.INFO)
     logger = logging.getLogger('diagnosis.Diagnoser')
     logger.setLevel(logging.INFO)
     start = datetime.utcnow()
     import codecs
     with codecs.open('test_article.txt', encoding='utf-8') as f:
         diagnosis = self.my_diagnoser.diagnose(f.read())
         make_json_compat(diagnosis)
         self.assertLess(
             (datetime.utcnow() - start),
             timedelta(seconds=30)
         )
Esempio n. 12
0
def handleDeviceEntriesToGoogleCalendar(calendar,events,devices):
	""" Takes a list of FritzBoxDevices and adds them to the provided Calender object.
	 	Unique IDs are used to extend existing events and create nw ones accordingly.	
	"""
	for device in devices:
		#print device
		event={ 'summary': device.name + " " + device.ipv4 + " " + device.type,
					'location': 'Dresden, Saxony, Germany',
					'description': device.UID,
					'start': {
							'dateTime': str(datetime.utcnow().isoformat() + 'Z'),
							'timeZone': 'Europe/Berlin',
							},
					'end': {
							'dateTime': str((datetime.utcnow()+ timedelta(minutes=5)).isoformat() + 'Z'),
							'timeZone': 'Europe/Berlin',
						  },
				}
		#print event

		found=False

		for item in events:
			
			if item["UID"] == device.UID:
				found = True
				try:
					parsed_time = parser.parse(item["end"]["dateTime"]).astimezone(pytz.utc);
					#in case the given device has an ending time in the future, we'll extend that event
					#else it gets an new event created
					if parsed_time >= pytz.utc.localize(datetime.utcnow()) : #wierd to localize utcnow to utc
						#handle if event is currently active
						calendar.update_event({'id' : item['id'] , 'end': {
									'dateTime': str((datetime.utcnow()+ timedelta(minutes=5)).isoformat() + 'Z'),
									'timeZone': 'Europe/Berlin',
								}})
					else: #we will create the thing again as a new event
						found=False #not necessary but easier to read
				except Exception,e:
					print "error while handling"
					print e
					pprint(item)
					pass #more stable

		if (found==False):
			#print "new device state"
			#pprint(event)
			calendar.create_event(event)
Esempio n. 13
0
 def get(self):
   user = users.get_current_user()
   if not user:
       self.redirect(users.create_login_url('/key'))
       return
   else:
       account = Membership.all().filter('username ='******'templates/error.html', locals()))
         return
       if account.status != "active":
         url = "https://spreedly.com/"+SPREEDLY_ACCOUNT+"/subscriber_accounts/"+account.spreedly_token
         error = "<p>Your Spreedly account status does not appear to me marked as active.  This might be a mistake, in which case we apologize. <p>To investigate your account, you may go here: <a href=\""+url+"\">"+url+"</a> <p>If you believe this message is in error, please contact <a href=\"mailto:[email protected]?Subject=Spreedly+account+not+linked+to+hackerdojo+account\">[email protected]</a></p>";
         self.response.out.write(template.render('templates/error.html', locals()))
         return
       delta = datetime.utcnow() - account.created
       if delta.days < DAYS_FOR_KEY:
         error = "<p>You have been a member for "+str(delta.days)+" days.  After "+str(DAYS_FOR_KEY)+" days you qualify for a key.  Check back in "+str(DAYS_FOR_KEY-delta.days)+" days!</p><p>If you believe this message is in error, please contact <a href=\"mailto:[email protected]?Subject=Membership+create+date+not+correct\">[email protected]</a>.</p>";
         self.response.out.write(template.render('templates/error.html', locals()))
         return    
       bc = BadgeChange.all().filter('username ='******'templates/key.html', locals()))
Esempio n. 14
0
    def write(
        self, cursor, user, ids, sale_data, context=None
    ):
        """
        Find or Create sale using sale data

        :param cursor: Database cursor
        :param user: ID of current user
        :param ids: array ID of saving Sale
        :param sale_data: Order Data from sale
        :param context: Application context
        :returns: 
        """
        
        # only change OrderDate if SO in draft state
        boAllowUpdate = True
        print 'gia tri cap nhat  ==  ', sale_data
        if (sale_data.has_key('date_order')):
            boAllowUpdate = False
        else:
            for intID in ids: # loop a list.
                objSale = self.browse(cursor, user, intID, context=context)
                if (objSale.state != 'draft' ):
                    boAllowUpdate = False 
        if (boAllowUpdate):
            sale_data['date_order'] =  datetime.utcnow() # + timedelta(days=1) #  ... time.strptime(strDate, '%Y-%m-%d').strftime('%d-%m-%Y')
        return super(sale_order, self).write(cursor, user, ids, sale_data, context=context)
Esempio n. 15
0
def get(x,y,iss):
	#Lat, long, iss
	location = ephem.Observer()
	location.lat = x
	location.lon = y
	location.elevation = 2198
	timenow = datetime.utcnow()
	#print timenow
	location.date = timenow
	#calculate where the ISS is
	nextpassdata = location.next_pass(iss)
	try: 
		next = nextpassdata[0] - ephem.Date(timenow)
		timeseconds = next*86400
		timeseconds = timedelta(seconds=timeseconds)
		d = datetime(1,1,1) + timeseconds
		returner = "%d:%d:%d" % (d.hour, d.minute, d.second)
	except: 
		returner = "LOS"
	#print "This is the nextpassdata array " 
	#print nextpassdata
	#print "This is the rise time " + str(nextpassdata[0])
	#print "This is the current time in emphem format " + str(ephem.Date(timenow))
	#print next
	#print "this is the time remaining in seconds"
	#print timeseconds
	#Code from http://stackoverflow.com/questions/4048651/python-function-to-convert-seconds-into-minutes-hours-and-days
	#print ("%d:%d:%d" % (d.hour, d.minute, d.second))
	return returner
Esempio n. 16
0
def pretty_date(dt, default=None):
    """
    Returns string representing "time since" e.g.
    3 days ago, 5 hours ago etc.
    Ref: https://bitbucket.org/danjac/newsmeme/src/a281babb9ca3/newsmeme/
    """

    if default is None:
        default = 'just now'

    now = datetime.utcnow()
    diff = now - dt

    periods = (
        (diff.days / 365, 'year', 'years'),
        (diff.days / 30, 'month', 'months'),
        (diff.days / 7, 'week', 'weeks'),
        (diff.days, 'day', 'days'),
        (diff.seconds / 3600, 'hour', 'hours'),
        (diff.seconds / 60, 'minute', 'minutes'),
        (diff.seconds, 'second', 'seconds'),
    )

    for period, singular, plural in periods:

        if not period:
            continue

        if period == 1:
            return u'%d %s ago' % (period, singular)
        else:
            return u'%d %s ago' % (period, plural)

    return default
Esempio n. 17
0
def set_dehumidifier(s):
    """ Load recent samples and determine if the dehumidifier should be switched on or off """
    # read all the samples for the past 2 days (LOOKBACK_HOURS may span two days' worth of files)
    now = time.time()
    samples = []
    for path in [ get_path(datetime.utcnow()-timedelta(days=days)) for days in [1,0] ]:
        with open(path, "r") as f:
            for line in csv.reader(f, delimiter=','):
                sample = parse_sample(line)
                if sample["ts"] > now - 24*60*60:
                    samples.append(sample)

    # count number of samples w/ dew or w/ dehumidifier on
    dew_samples = 0
    on_samples = 0
    for sample in samples:
        dew_samples += below_dewpoint(sample)
        on_samples += bool(sample["status"])

    # work out if the dehumidifier should be on
    now = datetime.now()
    in_hours = now.hour >= ON_HOURS[0] and now.hour < ON_HOURS[1]
    should_be_on = in_hours and dew_samples * DEHUMIDIFIER_FACTOR > on_samples

    try:
        latest = samples[-1]
    except IndexError:
        latest = { "status" : None }
    # anti-hysterisis check if it's changed lately, before switching
    if latest["status"] != should_be_on and any(r["status"]==should_be_on for r in samples if r["ts"] > time.time() - ANTI_HYSTERISIS_MINUTES*60):
        should_be_on = not should_be_on

    s.write("o" if should_be_on else "f")
Esempio n. 18
0
def pkmn_time_text(time):
		s = (time - datetime.utcnow()).total_seconds()
		(m, s) = divmod(s, 60)
		(h, m) = divmod(m, 60)
		d = timedelta(hours = h, minutes = m, seconds = s)
		disappear_time = datetime.now() + d
		return "Available until %s (%dm %ds)." % (disappear_time.strftime("%H:%M:%S"),m,s)
 def __init__(self, title, body, category, pub_date=None):
     self.title = title
     self.body = body
     if pub_date is None:
         pub_date = datetime.utcnow()
     self.pub_date = pub_date
     self.category = category
    def get(self):
        locations_query = Location.all().ancestor(location_key()).order("-arrival")
        locations = locations_query.fetch(10)
        # remove brother entries who have already left
        currentlocs = []
        # EST = Zone(-5, False, 'EST')
        for loc in locations:
            if datetime.utcnow() < loc.departure:
                tzloc = {}
                fmat = "%I:%M %p"
                tzloc["name"] = loc.name
                arrivetime = loc.arrival - timedelta(hours=5)
                tzloc["arrival"] = arrivetime.strftime(fmat)
                deptime = loc.departure - timedelta(hours=5)
                tzloc["departure"] = deptime.strftime(fmat)
                tzloc["location"] = loc.location
                tzloc["open_seats"] = loc.open_seats
                tzloc["other_bros"] = loc.other_bros
                tzloc["notes"] = loc.notes
                tzloc["creator"] = loc.creator
                currentlocs.append(tzloc)

        url = self.request.relative_url("static/addloc.html")
        logout = users.create_logout_url("/")
        template_values = {"locations": currentlocs, "add_loc_url": url, "logout_url": logout}
        path = os.path.join(os.path.dirname(__file__), "index.html")
        self.response.out.write(template.render(path, template_values))
Esempio n. 21
0
  def _get_iso_datetime(self):
    tt = datetime.utcnow().timetuple() #time in UTC

    #add leading zeros
    if(int(tt[1])<10):
      mm = "0" + str(tt[1])
    else:
      mm = str(tt[1])
    if(int(tt[2])<10):
      d = "0" + str(tt[2])
    else:
      d = str(tt[2])
    if(int(tt[3])<10):
      h = "0" + str(tt[3])
    else:
      h = str(tt[3])
    if(int(tt[4])<10):
      m = "0" + str(tt[4])
    else:
      m = str(tt[4])
    if(int(tt[5])<10):
      s = "0" + str(tt[5])
    else:
      s = str(tt[5])

    return str(tt[0]) + "-" + str(mm) + "-" + str(d) + "T" + str(h) + ":" + \
        str(m) + ":" + str(s) + "Z" #2012-07-31T20:44:36Z
Esempio n. 22
0
 def test_from_api_repr_w_loggers_w_logger_match(self):
     from datetime import datetime
     from google.cloud._helpers import UTC
     client = _Client(self.PROJECT)
     PAYLOAD = 'PAYLOAD'
     IID = 'IID'
     NOW = datetime.utcnow().replace(tzinfo=UTC)
     TIMESTAMP = _datetime_to_rfc3339_w_nanos(NOW)
     LOG_NAME = 'projects/%s/logs/%s' % (self.PROJECT, self.LOGGER_NAME)
     LABELS = {'foo': 'bar', 'baz': 'qux'}
     API_REPR = {
         'dummyPayload': PAYLOAD,
         'logName': LOG_NAME,
         'insertId': IID,
         'timestamp': TIMESTAMP,
         'labels': LABELS,
     }
     LOGGER = object()
     loggers = {LOG_NAME: LOGGER}
     klass = self._getTargetClass()
     entry = klass.from_api_repr(API_REPR, client, loggers=loggers)
     self.assertEqual(entry.payload, PAYLOAD)
     self.assertEqual(entry.insert_id, IID)
     self.assertEqual(entry.timestamp, NOW)
     self.assertEqual(entry.labels, LABELS)
     self.assertTrue(entry.logger is LOGGER)
Esempio n. 23
0
    def mlk_request(self, url_fragment):
        timestamp = int(time.mktime(datetime.utcnow().timetuple()))
        nonce = md5("%s" % random.random()).hexdigest()
        normalized_string = "%s\n" % (self.credentials["access_token"])
        normalized_string += "%s\n" % (timestamp)
        normalized_string += "%s\n" % (nonce)
        normalized_string += "GET\n"  # request method
        normalized_string += "mlkshk.com\n"  # host
        normalized_string += (
            "80\n"
        )  # THIS IS A BUG! port should be 443 but it's not recognizing it, so leave this at 80.
        normalized_string += url_fragment
        normalized_string += "\n"

        digest = hmac.new(self.credentials["access_token_secret"].encode("ascii"), normalized_string, sha1).digest()
        signature = base64.encodestring(digest).strip()  # we strip the end off because it gives us a \n at the end
        authorization_string = 'MAC token="%s", timestamp="%s", nonce="%s", signature="%s"' % (
            self.credentials["access_token"],
            str(timestamp),
            nonce,
            signature,
        )

        url = "https://mlkshk.com" + url_fragment
        r = requests.get(url, headers={"Authorization": authorization_string})
        return r
Esempio n. 24
0
    def join(self, user):
        """
        Add a user to our trial.

        Make sure that the trial has groups, then randomly assign USER
        to one of those groups.

        Ensure that this user hasn't already joined the trial, raising
        AlreadyJoinedError if we have.

        Ensure that this trial isn't already finished, raising
        TrialFinishedError if it is.

        If nobody has joined yet, we go to Group A, else Group A if
        the groups are equal, else Group B.
        """
        if self.stopped:
            raise exceptions.TrialFinishedError()
        if Participant.objects.filter(trial=self, user=user).count() > 0:
            raise exceptions.AlreadyJoinedError()
        part = Participant(trial=self, user=user).randomise()
        part.save()
        if self.instruction_delivery == self.IMMEDIATE:
            part.send_instructions()
        if self.instruction_delivery == self.HOURS:
            eta = datetime.utcnow() + datetime.timedelta(seconds=60*60*self.instruction_hours_after)
            tasks.instruct_later.apply_async((participant.pk), eta=eta)
        return
Esempio n. 25
0
	def city_pace_count_rest():
		current_server_time = datetime.utcnow()
		#current_client_date = timezone(tz).fromutc(current_server_time)
		if City_Pace.objects.all().order_by('city_pace_date').last():
			last_date_counted = City_Pace.objects.all().order_by('city_pace_date').last().city_pace_date
			mytime = datetime.strptime('2130','%H%M').time()
			last_date_counted = datetime.combine(last_date_counted, mytime)
		else:
			#aware - абсолютное время, так как берётся из БД
			last_date_counted = Measurement.objects.all().order_by('measurement_date').first().measurement_date
			current_server_time = timezone(tz).fromutc(current_server_time)
		#logger.info(last_date_counted)
		#logger.info(current_client_date)
		if current_server_time.date() != last_date_counted.date():
			for a_date in range((current_server_time - last_date_counted).days):
				new_pace = 0.
				logger.info(last_date_counted + timedelta(days = a_date + 1))
				for a_bin in Bin.objects.all():
					new_date = last_date_counted + timedelta(days = a_date + 1)
					new_date = timezone('UTC').fromutc(new_date)
					logger.info(new_date)
					new_pace += a_bin.bin_generate_volume_pace_of_date(new_date)
					#logger.info(a_bin.bin_adress)
					#logger.info(a_bin.bin_generate_volume_pace_of_date(last_date_counted + timedelta(days = a_date + 1)))
				logger.info(float("{0:.2f}".format(new_pace * 24 / 1000)))
				new_city_pace = City_Pace(city_pace_date = last_date_counted + timedelta(days = a_date + 1), city_pace_value = new_pace * 24 / 1000)
				new_city_pace.save()
Esempio n. 26
0
def facebook_authorized(resp):
    next_url = request.args.get('next') or url_for('index')
    if resp is None:
        flash('You denied the login')
        return redirect(next_url)

    session['fb_access_token'] = (resp['access_token'], '')

    me = facebook.get('/me')
    user = Users.query.filter_by(fb_id=me.data['id']).first()
    
    if user is None:
      fb_id = me.data['id']
      
      if me.data['username']:
        fb_username = me.data['username']
      else:
        fb_username = me.data['name']

      fb_email = me.data['email']

      user = Users(fb_username, 'temp',fb_email)
      user.fb_id = me.data['id']
      user.activate  = True
      user.created = datetime.utcnow()
      db.session.add(user)
      db.session.commit()
    session['user_id'] = user.uid

    flash('You are now logged in as %s' % user.username)
    return redirect(url_for('index'))
Esempio n. 27
0
def _update_tenant_info_cache(tenant_info):
    tenant_id = tenant_info['tenant']
    try:
        tenant = models.TenantInfo.objects\
                                  .select_for_update()\
                                  .get(tenant=tenant_id)
    except models.TenantInfo.DoesNotExist:
        tenant = models.TenantInfo(tenant=tenant_id)
    tenant.name = tenant_info['name']
    tenant.last_updated = datetime.utcnow()
    tenant.save()

    types = set()
    for type_name, type_value in tenant_info['types'].items():
        try:
            tenant_type = models.TenantType.objects\
                                           .get(name=type_name,
                                                value=type_value)
        except models.TenantType.DoesNotExist:
            tenant_type = models.TenantType(name=type_name,
                                            value=type_value)
            tenant_type.save()
        types.add(tenant_type)
    tenant.types = list(types)
    tenant.save()
Esempio n. 28
0
def twitter_daily_aggregate(retrievaldate):

	#Date Retrieval
	d=[]
	dt = parser.parse(retrievaldate) + timedelta(days=-1)
	d.append(dt)
	d.append(d[-1] + timedelta(days=1))

	#DataFrame Init
	ctrend = DataFrame()
	while d[-1] < datetime.utcnow(): 
		print 'processing ', d[-1], ' ..........'
		#Daily Mention Count
		mnts = twitter_count(d, mentions)

		#User Follower Count
		usrs =  twitter_follower(d,users)
		#Join
		trend = mnts.join(usrs)
		trend['Date'] = Period(d[-1],'D')
		#Append to DataFrame
		ctrend = concat([ctrend,trend])
		#Extend Dates
		d.append(d[-1] + timedelta(days=1))
	#Join DataFrames and Fill NAs
	ctrend =  ctrend.fillna(0)
	#Save
	print 'printing the file'
	ctrend.to_csv('twitter_trend.csv')
	return ctrend
Esempio n. 29
0
    def test_get_active_pbheviors(self):
        now = datetime.utcnow()
        pbehavior_1 = deepcopy(self.pbehavior)
        pbehavior_2 = deepcopy(self.pbehavior)
        pbehavior_1.update({
            'eids': [self.entity_id_1],
            'name': 'pb1',
            'tstart': timegm(now.timetuple()),
            'tstop': timegm((now + timedelta(days=2)).timetuple()),
            'rrule': None
        })
        pbehavior_2.update({'eids': [self.entity_id_3],
                            'name': 'pb2',
                            'tstart': timegm(now.timetuple())})

        self.pbm.pb_storage.put_elements(
            elements=(pbehavior_1, pbehavior_2)
        )

        self.pbm.context._put_entities(self.entities)

        tab = self.pbm.get_active_pbehaviors([self.entity_id_1,
                                              self.entity_id_2])
        names = [x['name'] for x in tab]
        self.assertEqual(names, ['pb1'])
Esempio n. 30
0
def utc_to_local(utc):
    """
    Converts a datetime object in UTC to local time
    """

    offset = datetime.now() - datetime.utcnow()
    return (utc + offset).strftime("%Y-%m-%d %H:%M:%S")
Esempio n. 31
0
async def setDefaultConfig(ctx, dbQuery, configName, configValue):
    em = discord.Embed(color=embedColor)
    em.set_author(name=f'Results')
    try:
        resultValue = dbQuery['Items'][0]['mpaConfig']
    except KeyError:
        em = discord.Embed(color=failEmbedColor)
        em.add_field(name='Nothing found!', value='Nothing was found.')
        await ctx.send('', embed=em)
        return
        # Probably not the best way to determine if a key is a role list or not
    if 'role' in configName.lower():
        if len(ctx.message.role_mentions) > 0:
            foundRole = ctx.message.role_mentions[0]
            configValue = foundRole.id
        else:
            try:
                foundRole = discord.utils.get(ctx.guild.roles, id=int(configValue))
                if foundRole is None:
                    await sendErrorMessage.invalidArguments(ctx, 'invalidChannelConfigSet', setDefaultConfig.__name__, configName)
                    return
            except ValueError:
                await sendErrorMessage.invalidArguments(ctx, 'invalidDefaultConfigRoleSet', setDefaultConfig.__name__, configName)
                return
        if str(configValue) in resultValue[f'{configName}']:
            await sendErrorMessage.invalidArguments(ctx, 'ItemAlreadyExists', setDefaultConfig.__name__, configName) 
            return
        else:
            await sendErrorMessage.invalidArguments(ctx, 'invalidDefaultConfigRoleSet', setDefaultConfig.__name__, configName)
            return
        await ctx.send('', embed=em)
        return
    else:
        if (checkConfigSyntax(ctx, configName, configValue)) is not None:
            updateDB = tonkDB.updateConfig(ctx.guild.id, 'default', configName, configValue, str(datetime.utcnow()))
        else:
            updateDB = None
        if updateDB is not None:
            em.add_field(name=f'Success', value=f'Set {configName} to {configValue}', inline=False)
        else:
            await sendErrorMessage.invalidArguments(ctx, 'invalidDefaultConfigSet', setDefaultConfig.__name__, configName)
            return
        await ctx.send('', embed=em)
        return
Esempio n. 32
0
    def __call__(self, context):
        the_og_base_url = 'http://wzmedia.dot.ca.gov:1935/D3/89_rampart.stream/'

        BASE_URL = 'http://wzmedia.dot.ca.gov:1935/D3/80_whitmore_grade.stream/'
        FPS = 30
        '''
		Distance to line in road: ~0.025 miles
	'''
        ROAD_DIST_MILES = 0.025
        '''
		Speed limit of urban freeways in California (50-65 MPH)
	'''
        HIGHWAY_SPEED_LIMIT = 65

        # Initial background subtractor and text font
        fgbg = cv2.createBackgroundSubtractorMOG2()
        font = cv2.FONT_HERSHEY_PLAIN

        centers = []

        # y-cooridinate for speed detection line
        Y_THRESH = 240

        blob_min_width_far = 6
        blob_min_height_far = 6

        blob_min_width_near = 18
        blob_min_height_near = 18

        frame_start_time = None

        # Create object tracker
        tracker = Tracker(80, 3, 2, 1)

        # Capture livestream
        cap = cv2.VideoCapture('input.mp4')

        while True:
            centers = []
            frame_start_time = datetime.utcnow()
            ret, frame = cap.read()

            orig_frame = copy.copy(frame)

            #  Draw line used for speed detection
            cv2.line(frame, (0, Y_THRESH), (640, Y_THRESH), (255, 0, 0), 2)

            # Convert frame to grayscale and perform background subtraction
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            fgmask = fgbg.apply(gray)

            # Perform some Morphological operations to remove noise
            kernel = np.ones((4, 4), np.uint8)
            kernel_dilate = np.ones((5, 5), np.uint8)
            opening = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)
            dilation = cv2.morphologyEx(opening, cv2.MORPH_OPEN, kernel_dilate)

            _, contours, hierarchy = cv2.findContours(dilation,
                                                      cv2.RETR_EXTERNAL,
                                                      cv2.CHAIN_APPROX_SIMPLE)

            # Find centers of all detected objects
            for cnt in contours:
                x, y, w, h = cv2.boundingRect(cnt)

                if y > Y_THRESH:
                    if w >= blob_min_width_near and h >= blob_min_height_near:
                        center = np.array([[x + w / 2], [y + h / 2]])
                        centers.append(np.round(center))

                        cv2.rectangle(frame, (x, y), (x + w, y + h),
                                      (0, 0, 255), 2)
                else:
                    if w >= blob_min_width_far and h >= blob_min_height_far:
                        center = np.array([[x + w / 2], [y + h / 2]])
                        centers.append(np.round(center))

                        cv2.rectangle(frame, (x, y), (x + w, y + h),
                                      (0, 255, 0), 2)

            if centers:
                tracker.update(centers)

                for vehicle in tracker.tracks:
                    if len(vehicle.trace) > 1:
                        for j in range(len(vehicle.trace) - 1):
                            # Draw trace line
                            x1 = vehicle.trace[j][0][0]
                            y1 = vehicle.trace[j][1][0]
                            x2 = vehicle.trace[j + 1][0][0]
                            y2 = vehicle.trace[j + 1][1][0]

                            cv2.line(frame, (int(x1), int(y1)),
                                     (int(x2), int(y2)), (0, 255, 255), 2)

                        try:
                            '''
							TODO: account for load lag
						'''

                            trace_i = len(vehicle.trace) - 1

                            trace_x = vehicle.trace[trace_i][0][0]
                            trace_y = vehicle.trace[trace_i][1][0]

                            # Check if tracked object has reached the speed detection line
                            if trace_y <= Y_THRESH + 5 and trace_y >= Y_THRESH - 5 and not vehicle.passed:
                                cv2.putText(frame, 'I PASSED!',
                                            (int(trace_x), int(trace_y)), font,
                                            1, (0, 255, 255), 1, cv2.LINE_AA)
                                vehicle.passed = True

                                load_lag = (datetime.utcnow() -
                                            frame_start_time).total_seconds()

                                time_dur = (datetime.utcnow() -
                                            vehicle.start_time
                                            ).total_seconds() - load_lag
                                time_dur /= 60
                                time_dur /= 60

                                vehicle.mph = ROAD_DIST_MILES / time_dur

                                # If calculated speed exceeds speed limit, save an image of speeding car
                                if vehicle.mph > HIGHWAY_SPEED_LIMIT:
                                    print('UH OH, SPEEDING!')
                                    cv2.circle(orig_frame,
                                               (int(trace_x), int(trace_y)),
                                               20, (0, 0, 255), 2)
                                    cv2.putText(orig_frame,
                                                'MPH: %s' % int(vehicle.mph),
                                                (int(trace_x), int(trace_y)),
                                                font, 1, (0, 0, 255), 1,
                                                cv2.LINE_AA)
                                    cv2.imwrite(
                                        'speeding_%s.png' % vehicle.track_id,
                                        orig_frame)
                                    print('FILE SAVED!')

                            if vehicle.passed:
                                # Display speed if available
                                cv2.putText(frame,
                                            'MPH: %s' % int(vehicle.mph),
                                            (int(trace_x), int(trace_y)), font,
                                            1, (0, 255, 255), 1, cv2.LINE_AA)
                            else:
                                # Otherwise, just show tracking id
                                cv2.putText(frame,
                                            'ID: ' + str(vehicle.track_id),
                                            (int(trace_x), int(trace_y)), font,
                                            1, (255, 255, 255), 1, cv2.LINE_AA)
                        except:
                            pass

            # Display all images
            cv2.imshow('original', frame)
            ''' cv2.imshow ('opening/dilation', dilation)
		cv2.imshow ('background subtraction', fgmask) '''

            # Quit when escape key pressed
            if cv2.waitKey(5) == 27:
                break

            # Sleep to keep video speed consistent
            time.sleep(1.0 / FPS)

# Clean up
        cap.release()
        cv2.destroyAllWindows()

        # remove all speeding_*.png images created in runtime
        for file in glob.glob('speeding_*.png'):
            os.remove(file)

        return context
Esempio n. 33
0
    def get(self):
        arguments = {'currency': 'USD', 'nonStop': False}

        if not request.args.get('origin'):
            return {'error': 'Origin city is mandatory', 'status': 400}, 400

        if not request.args.get('uuid'):
            return {'error': 'UUID is mandatory', 'status': 400}, 400

        arguments['origin'] = request.args.get('origin')
        uuid = request.args.get('uuid')

        if request.args.get('budget'):
            arguments['maxPrice'] = abs(int(request.args.get('budget')))

        if request.args.get('start_date'):
            if not check_date(request.args.get('start_date')):
                return {
                    'error': 'Start date is not using the right format',
                    'status': 400
                }, 400
            arguments['departureDate'] = request.args.get('start_date')

        if request.args.get('end_date') and request.args.get('start_date'):
            if not check_date(request.args.get('end_date')):
                return {
                    'error': 'End date is not using the right format',
                    'status': 400
                }, 400

            start_date = datetime.strptime(request.args.get('start_date'),
                                           '%Y-%m-%d').date()
            end_date = datetime.strptime(request.args.get('end_date'),
                                         '%Y-%m-%d').date()

            if start_date > end_date:
                return {
                    'error': 'End date is earlier than the start day',
                    'status': 400
                }, 400

            difference = end_date - start_date
            arguments['duration'] = difference.days

        if request.args.get('num_passengers'):
            num_passengers = abs(int(request.args.get('num_passengers')))
        else:
            num_passengers = 1

        arguments_hash = hashlib.sha256(
            str(arguments).encode('ascii')).hexdigest()
        db_cursor.execute(
            "SELECT query_id, time FROM QUERIES WHERE query_hash=? AND uuid==?",
            (arguments_hash, uuid))

        result = []
        query_cache_result = db_cursor.fetchone()

        if query_cache_result and datetime.strptime(
                query_cache_result[1], '%Y-%m-%d %H-%M-%S') + timedelta(
                    minutes=cache_timeout) > datetime.utcnow():
            db_cursor.execute(
                "SELECT PLAN.start_date, PLAN.end_date, PLAN.origin, PLAN.destination, PLAN.price FROM PLAN WHERE PLAN.query_id=?",
                (query_cache_result[0], ))
            for query_result in db_cursor.fetchall():
                flight = {
                    'departureDate': query_result[0],
                    'returnDate': query_result[1],
                    'origin': query_result[2],
                    'destination': query_result[3],
                    'price': {
                        'total': query_result[4],
                    }
                }
                db_cursor.execute('SELECT image FROM IMAGES WHERE iata_name=?',
                                  (flight['destination'], ))
                flight['image'] = random.choice(db_cursor.fetchall())[0]
                result.append(flight)
        else:

            try:
                flights = amadeus.shopping.flight_destinations.get(
                    **arguments).result
                status_code = 200
            except NotFoundError:
                return {'flights': []}, 201
            except ServerError:
                return {
                    'error': 500,
                    'status': 'Server Error',
                    'message': 'Probably the city does not exist'
                }, 500

            query_id = int(random.getrandbits(256)) % (2 << 63 - 1)
            db_cursor.execute(
                "INSERT INTO QUERIES VALUES(?,?,?,strftime('%Y-%m-%d %H-%M-%S','now'),?,?,?,?,?,?)",
                (query_id, arguments_hash, uuid, status_code,
                 arguments['origin'], request.args.get('budget')
                 if request.args.get('budget') else None,
                 request.args.get('start_date')
                 if request.args.get('start_date') else None,
                 request.args.get('end_date')
                 if request.args.get('end_date') else None, num_passengers))
            db_cursor.execute(
                "INSERT OR IGNORE INTO USERS (uuid, last_query) VALUES (?,?)",
                (uuid, query_id))
            db_cursor.execute("UPDATE USERS SET last_query=? WHERE uuid=?",
                              (query_id, uuid))

            for flight in flights['data']:
                db_cursor.execute('INSERT INTO PLAN VALUES(?,?,?,?,?,?,?,?)', (
                    flight['departureDate'],
                    flight['returnDate'],
                    flight['origin'],
                    flight['destination'],
                    flight['price']['total'],
                    flight['links']['flightOffers'],
                    None,
                    query_id,
                ))
                db_cursor.execute('SELECT image FROM IMAGES WHERE iata_name=?',
                                  (flight['destination'], ))
                query_result = db_cursor.fetchall()
                if query_result == []:
                    """
                    destination_name = amadeus.reference_data.locations.get(
                        keyword=flight['destination'],
                        subType=Location.CITY
                    )
                    if len(destination_name.result['data']) > 0:
                        destination_name = destination_name.result['data'][0]['address']['cityName'].lower()
                    else:
                        destination_name = flight['destination']
                    """
                    destination_name = requests.get(
                        "https://iatacodes.org/api/v6/cities?api_key=" +
                        os.getenv('IATA_API') + "&code=" +
                        flight['destination'],
                        verify=False).json()
                    if 'response' in destination_name:
                        destination_name = destination_name['response'][0][
                            'name'].lower()
                    else:
                        destination_name = flight['destination']
                    """
                    json_response = requests.get(f'https://api.teleport.org/api/urban_areas/slug:{destination_name}/images/')
                    try:
                        json_response = json_response.json()
                        if 'status' not in json_response:
                            if len(json_response['photos']) > 0:
                                image_url = json_response['photos'][0]['image']['mobile']
                            else:
                                image_url = json_response['photos']['image']['mobile']
                        else:
                            image_url = ''

                    except json.decoder.JSONDecodeError:
                        image_url = ''
                    """
                    place_id = gmaps.find_place(destination_name,
                                                'textquery')['candidates']
                    images = []
                    if len(place_id) > 0:
                        place_id = place_id[0]['place_id']
                        place_details = gmaps.place(
                            place_id, random.getrandbits(256),
                            ['photo', 'rating', 'geometry'])
                        if place_details['result'] != {}:
                            if 'photos' in place_details['result']:
                                for photo in place_details['result']['photos']:
                                    image_url = 'https://maps.googleapis.com/maps/api/place/photo?maxheight=400&photoreference=' + photo[
                                        'photo_reference'] + '&key=' + os.getenv(
                                            'GOOGLE_MAPS_SERVER_KEY')
                                    images.append(image_url)
                            else:
                                images.append('')

                            db_cursor.execute(
                                'INSERT INTO CITIES VALUES(?,?,?,?)', (
                                    flight['destination'],
                                    destination_name,
                                    place_details['result']['geometry']
                                    ['location']['lat'],
                                    place_details['result']['geometry']
                                    ['location']['lng'],
                                ))

                        else:
                            images.append('')
                            db_cursor.execute(
                                'INSERT INTO CITIES VALUES(?,?,?,?)', (
                                    flight['destination'],
                                    destination_name,
                                    None,
                                    None,
                                ))
                    else:
                        images.append('')
                        db_cursor.execute('INSERT INTO CITIES VALUES(?,?,?,?)',
                                          (
                                              flight['destination'],
                                              destination_name,
                                              None,
                                              None,
                                          ))

                    for image in images:
                        db_cursor.execute('INSERT INTO IMAGES VALUES(?,?)',
                                          (flight['destination'], image))

                    image_url = random.choice(images)

                else:
                    image_url = random.choice(query_result)[0]

                flight['image'] = image_url
                del flight['type']
                del flight['links']
                if image_url != '':
                    result.append(flight)

        for flight in result:
            flight['price']['passenger'] = float(flight['price']['total'])
            flight['price']['total'] = round(
                float(flight['price']['total']) * num_passengers, 2)
            db_cursor.execute('SELECT city_name FROM CITIES WHERE iata_name=?',
                              (flight['destination'], ))
            city_name = db_cursor.fetchone()[0]
            flight['destination_name'] = city_name.title()

        db_connection.commit()
        return {'flights': result}
Esempio n. 34
0
from datetime import datetime

import aiohttp
import dogehouse
import requests
from bs4 import BeautifulSoup
from covid import Covid
from dogehouse import DogeClient, command, event
from dogehouse.entities import BaseUser, Context, Message, User, UserPreview

from keep_alive import keep_alive

DOGETOKEN = os.getenv('DOGEHOUSE_TOKEN')
DOGEREFRESHTOKEN = os.getenv('DOGEHOUSE_REFRESH_TOKEN')
ownerid = "de27447e-a633-484d-afaa-8377887dda7b"
launch_time = datetime.utcnow()  # this is for the uptime command
print(launch_time)


class Client(DogeClient):
    @event
    async def on_ready(self):
        try:
            keep_alive()
            print("Keep Alive initiated")
            print(f"Successfully connected as {self.user}")
            await self.join_room("87d9cc48-370f-47e7-9d15-6449557cfcf2")
        except Exception as e:
            print(e)

    async def bot_info_get(self):
Esempio n. 35
0
 def stop(self):
     self.stopped = datetime.utcnow()
     return self.pool.close()
Esempio n. 36
0
    def stream_events(self, inputs, ew):
        # djs 01/20
        # Load parameters associated with this modular input.
        self.input_name, self.input_items = inputs.inputs.popitem()
        session_key = self._input_definition.metadata["session_key"]
        unused, inputNameS = self.input_name.split("://")

        username = self.input_items["username"]
        password = self.input_items['password']
        rest_url = self.input_items["rest_url"]
        crefresh = int(self.input_items["crefresh"])
        interval = int(self.input_items["interval"])

        try:
            # If the password is not masked, mask it.
            if password != self.MASK:
                self.encrypt_password(username, password, session_key)
                self.mask_password(session_key, rest_url, username, crefresh)

        except Exception as e:
            ew.log("ERROR", "Error: %s" % str(e))

        # djs 12/19
        # Establish a file state store object based on part of the input name.
        state_store = FileStateStore(inputs.metadata, self.input_name)

        # djs 01/20
        # Create a default time for a cookie 80 hours in the past. Read cookie from
        # state store, but if none present, use the default_time.
        defaultTime = datetime.utcnow() - timedelta(hours=80)
        lastCookieTime = state_store.get_state(
            inputNameS + "_Velo_cookie_time") or str(defaultTime)
        lastCookieTime_obj = datetime.strptime(lastCookieTime,
                                               '%Y-%m-%d %H:%M:%S.%f')
        ew.log("INFO",
               "Cookie time read: " + lastCookieTime + " " + inputNameS)

        # djs 01/20
        # Read in a clear text version of the cookie from password db.
        # Userid is VCO input name + Velo_Cookie
        clearCookie = self.get_password(session_key,
                                        inputNameS + "_Velo_cookie")
        ew.log('INFO', "Cookie read from Password DB for: " + inputNameS + " ")
        cookie = {'velocloud.session': clearCookie}

        # djs 12/19
        # If last cookie time is beyond cookie refresh interval or 0, we need to
        # auth for a new cookie.
        if lastCookieTime_obj < (datetime.utcnow() -
                                 timedelta(hours=crefresh)) or crefresh == 0:

            ew.log('INFO', "Cookie required for: " + inputNameS)

            # djs 12/19
            # Obtain a clear text password for call to VCO and login.
            clearPassword = self.get_password(session_key, username)
            data = {'username': username, 'password': clearPassword}
            veloLoginUrl = rest_url + "/portal/rest/login/enterpriseLogin"

            # djs 01/20
            # If successful, we received a response from VCO.
            respC = requests.post(veloLoginUrl, data=data, verify=False)

            # djs 12/29
            # Save cookie to password db.
            veloCookie = respC.cookies['velocloud.session']
            self.encrypt_password(inputNameS + "_Velo_cookie", veloCookie,
                                  session_key)
            ew.log('INFO',
                   "Cookie Stored in Password DB: " + " for: " + inputNameS)

            # djs 12/29
            # Save cookie time to state store.
            currentCookieTime = datetime.utcnow()
            state_store.update_state(inputNameS + "_Velo_cookie_time",
                                     str(currentCookieTime))
            ew.log(
                'INFO', "Current Cookie Time Stored: " +
                str(currentCookieTime) + " for: " + inputNameS)

            cookie = {'velocloud.session': veloCookie}

        else:

            ew.log('INFO', "No Cookie required for: " + inputNameS)

        if cookie:

            # djs 12/19
            # We read last position or 0.
            # We read last time logged or default to 180 days ago.
            lastPosition = state_store.get_state(inputNameS +
                                                 "_Velo_last_pos") or 0
            ew.log(
                'INFO', "Last Position read is: " + str(lastPosition) +
                " for: " + inputNameS)

            lastTimeLogged = state_store.get_state(
                inputNameS + "_Velo_last_time") or (
                    datetime.utcnow() -
                    timedelta(days=(1))).strftime('%Y-%m-%dT%H:%M:%S.%fZ')
            lastTime_obj = datetime.strptime(
                lastTimeLogged,
                '%Y-%m-%dT%H:%M:%S.%fZ') - timedelta(seconds=interval)
            lastTime = lastTime_obj.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
            ew.log(
                'INFO', "Last Time Logged is: " + str(lastTime) + " for: " +
                inputNameS)

            # djs 12/19
            # Format the api call to velocloud vco to obtain event data.
            eventStart = lastTime
            eventEnd = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ')
            data = {"interval": {"end": eventEnd, "start": eventStart}}
            ew.log('INFO',
                   "Request to VCO is: " + str(data) + " for: " + inputNameS)

            veloEventUrl = rest_url + "/portal/rest/event/getEnterpriseEvents"

            # djs 01/20
            # If successful, we received a response from VCO.
            respE = requests.post(veloEventUrl,
                                  cookies=cookie,
                                  data=json.dumps(data),
                                  verify=False)

            # djs 01/20
            # Debugging only.
            # ew.log('INFO', "Response from VCO: " + respE.text)

            # djs 12/19
            # The data response from the velocloud api call is in resp.text
            outputS = collections.OrderedDict()
            output = json.loads(respE.text)
            respE = ''

            try:
                # djs 12/19
                # Each log entry in json response is in data section identified by id.
                # Using id as key, write to a ordered dictionary so we can sort.

                for entry in output['data']:
                    thisId = entry['id']
                    outputS[thisId] = entry

                ew.log(
                    'INFO',
                    str(len(outputS)) +
                    " records returned from VCO Request for: " + inputNameS)

                if len(outputS) > 0:
                    # djs 12/19
                    # From VeloCloud, records are in the wrong order so we
                    # re-sort the ordered dictionary so oldest events first.
                    outputSr = collections.OrderedDict(
                        reversed(list(outputS.items())))

                    # djs 12/19
                    # For each event, write to splunk using ew.write_event and event object
                    # Note assumption is key is always getting larger. We dont handle wrapping.
                    highId = 0
                    eventCount = 0
                    for key_str, value in outputSr.items():
                        key = int(key_str)
                        if key > highId:
                            highId = key
                        if key > int(lastPosition):
                            event = Event()
                            event.stanza = inputNameS
                            event.data = json.dumps(value)
                            eventCount += 1
                            ew.write_event(event)

                    # djs 12/19
                    # Write the highest event id back to the file state store
                    if highId > 0:
                        try:
                            # djs 01/20
                            # Save the last time and position we wrote to splunk in state store.
                            state_store.update_state(
                                inputNameS + "_Velo_last_pos", str(highId))
                            ew.log(
                                'INFO', "Last Position out is: " +
                                str(highId) + " for: " + inputNameS)

                            state_store.update_state(
                                inputNameS + "_Velo_last_time", str(eventEnd))
                            ew.log(
                                'INFO', "Last Time out is: " + str(eventEnd) +
                                " for: " + inputNameS)

                        except Exception as e:
                            raise Exception, "Something did not go right: %s" % str(
                                e)

                    ew.log(
                        'INFO',
                        str(eventCount) +
                        " VeloCloud events written to log for: " + inputNameS)

            except Exception as e:
                raise Exception, "Something did not go right. Likely a bad password: %s" % str(
                    e)
Esempio n. 37
0
 def get_utc_milli_ts():
     return long(time.mktime(datetime.utcnow().timetuple())) * 1000
Esempio n. 38
0
# Convert the value of the sensor into a temperature
def read_temp():
    lines = read_temp_raw()  # Read the temperature 'device file'

    # While the first line does not contain 'YES', wait for 0.2s
    # and then read the device file again.
    while lines[0].strip()[-3:] != 'YES':
        time.sleep(0.2)
        lines = read_temp_raw()

    # Look for the position of the '=' in the second line of the
    # device file.
    equals_pos = lines[1].find('t=')

    # If the '=' is found, convert the rest of the line after the
    # '=' into degrees Celsius, then degrees Fahrenheit
    if equals_pos != -1:
        temp_string = lines[1][equals_pos + 2:]
        temp_c = float(temp_string) / 1000.0
        return temp_c


# Print out the temperature until the program is stopped.
with open(outfile, 'a') as f:
    while True:
        f.write(datetime.utcnow().isoformat() + ',' + str(read_temp()) +
                '\n')  #write the data to a string and to the output file
        f.flush()  #force the system to write the file to disk
        time.sleep(30)
Esempio n. 39
0
def main():

    global sensor, verbose, testsrc
    ##++++++++++++++++  Configuration section +++++++++++++++++++++++##
    ingestDir = "/data_store/dropbox"
    downloadDir = "/data_store/download"
    minPixelCount = 60000  # minimum number of pixels for image to be valid
    minPixelRange = 50  # minimum range of pixel values for valid image
    ##++++++++++++++++++  end configuration  ++++++++++++++++++++++++##
    #
    ###### definitions base on command line input
    args = _process_command_line()
    verbose = args.verbose  # turns on verbose output
    satellite = args.satellite  # specifies single satellite platform
    testsrc = args.test  # directs data requests to test NRT stream
    matchstr = args.match  # directs data requests to test NRT stream
    if args.noingest:
        doingest = 0
    else:
        doingest = 1
    #
    if testsrc:
        datasrc = "nrt-test"
    else:
        datasrc = "nrt-prod"
    level = args.level
    #

    bgntime = datetime.utcnow() - timedelta(minutes=args.backmins)
    endtime = datetime.utcnow()
    bgnsecs = bgntime.strftime("%s")
    bgnstr = bgntime.strftime("%Y-%m-%d+%H%M")
    endstr = endtime.strftime("%Y-%m-%d+%H%M")
    #print "format={}  satellite={}".format(level, satellite)
    ######
    dset_count = {
        "modis": 0,
        "viirs": 0,
        "avhrr": 0,
        "metop": 0,
        "atms": 0,
        "amsr2": 0
    }
    #
    if verbose:
        print "Dates: ", bgnstr, " / ", endstr
    #
    downloads = 0
    for sensor in args.sensor:
        print "Requesting: {}".format(sensor)
        #
        if satellite == 'all':
            listurl = "http://{0}.gina.alaska.edu/products.txt?sensors[]={1}&processing_levels[]={2}&start_date={3}&end_date={4}".format(
                datasrc, sensor, level, bgnstr, endstr)
        else:
            listurl = "http://{0}.gina.alaska.edu/products.txt?satellites[]={1}&sensors[]={2}&processing_levels[]={3}&start_date={4}&end_date={5}".format(
                datasrc, satellite, sensor, level, bgnstr, endstr)
        #
        print "URL=", listurl
        sock = urllib.urlopen(listurl)

        htmlSource = sock.read()
        sock.close()
        if verbose:
            print "BEGIN HTML ======================================================="
            print htmlSource
            print "END HTML ========================================================="
        rtnval = len(htmlSource)
        print "HTML String length = {}".format(rtnval)
        # instantiate the parser and feed it the HTML page
        parser = MyHTMLParser()
        parser.feed(htmlSource)

        # change working location to the download scratch directory
        if doingest:
            os.chdir(downloadDir)
        # now parse the file name and retrieve the recent files
        cnt = 0
        dcount = 0
        ingcount = 0
        totsize = 0
        for fileurl in parser.satfile:
            # the test location for files is different than the operational location
            if testsrc:
                fileurl = fileurl.replace("dds.gina.alaska.edu/nrt",
                                          "nrt-dds-test.gina.alaska.edu")
            if verbose:
                print "Downloading: {}".format(fileurl)
            filename = "{}".format(fileurl.split("/")[-1])
            if matchstr:
                #print "looking for matchstr=[{}]".format(matchstr)
                if matchstr in filename:
                    print "Found: {}".format(filename)
                else:
                    continue

            print "FILENAME={}".format(filename)
            urllib.urlretrieve(fileurl, filename)
            if os.path.isfile(filename):
                fsize = os.path.getsize(filename)
                dcount += 1
                nameseg = filename.split('.')
                basenm = nameseg[0]
                if verbose:
                    print "Basename = {}".format(basenm)
                # use base name to create a new name with "Alaska" prefix and ".nc" extension
                if args.regionalsat:
                    newfilename = "Alaska_{}.nc".format(basenm)
                    print "Adding prefix: {}".format(newfilename)
                else:
                    newfilename = filename

                # now look for ".gz" in file name to determine if compression is needed
                if ".gz" in filename:
                    # open compressed file and read out all the contents
                    inF = gzip.GzipFile(filename, 'rb')
                    s = inF.read()
                    inF.close()
                    # now write uncompressed result to the new filename
                    outF = file(newfilename, 'wb')
                    outF.write(s)
                    outF.close()
                    # make sure the decompression was successful
                    if not os.path.exists(newfilename):
                        print "Decompression failed: {}".format(filename)
                        raise SystemExit
                    # redirected compression copies to a new file so old compressed file needs to be removed
                    os.remove(filename)
                    #
                    if verbose:
                        print "File decompressed: {}".format(newfilename)

                elif ".nc" in filename:
                    move(filename, newfilename)
                #
                # set the filename variable to the new uncompressed name
                filename = newfilename
                ###############################################
                # last step is to do QC checks on the data
                if args.qcfilter:
                    if qc_image_file(filename, minPixelCount, minPixelRange):
                        print "Moving {} to {}".format(filename, ingestDir)
                        move(filename, ingestDir)
                        ingcount += 1
                    else:
                        print "QC failed. Removing: {}".format(filename)
                        os.remove(filename)
                ###############################################
                else:
                    # Check whether this is nucaps sounding which needs
                    # file modification for AWIPS
                    if level == 'nucaps_level2':
                        print "NUCAPS: {}".format(filename)
                        if "NUCAPS-EDR" in filename:
                            origFilename = filename
                            print "fix nucaps file"
                            filename = fix_nucaps_file(origFilename)
                            print "new filename = {}".format(filename)
                            if os.path.exists(filename):
                                # a new converted file has been made so remove the original file
                                print "Removing: {}".format(origFilename)
                                #move(origFilename,"/home/awips/testscripts/testdata")
                                os.remove(origFilename)
                        else:
                            print "Removing: {}".format(filename)
                            os.remove(filename)
                            continue
                    # Now check if the file already exists ingest directory
                    ingestfilename = "{}/{}".format(ingestDir, filename)
                    if os.path.exists(ingestfilename):
                        print "File already exists in Ingest Dir...removing: {}".format(
                            filename)
                        os.remove(filename)
                        continue
                    elif doingest:
                        # OK, ready to move the file to the ingest directory
                        print "Moving {} to {}".format(filename, ingestDir)
                        try:
                            move(filename, ingestDir)
                        except:
                            print "************  Unable to  move file to ingest: {}".format(
                                filename)
                            continue
                    else:
                        print "No ingest for {}".format(filename)
                    ingcount += 1
                    print "INGEST CNT = {}".format(ingcount)
                #
            else:
                fsize = 0

            totsize += fsize
            downloads += 1
            dset_count[sensor] += 1
            cnt += 1

    for sensor in args.sensor:
        print "{} files downloaded={}".format(sensor, dset_count[sensor])
    print "Total files downloaded={} ingested={}  total size={}".format(
        downloads, ingcount, totsize)
Esempio n. 40
0
 def __init__(self, id, username=None):
     self.id = id
     self.username = username
     self.connected_at = datetime.utcnow()
     self.rooms = []
     self.messages = Queue()
Esempio n. 41
0
def attendance(password, user, period, start=None, end=None):
    """
    Retrieves timesheet and totals it for the current month.
    """
    from datetime import datetime
    import pytz
    import holidays

    def colored_diff(title, diff, notes=None, invert=False):
        positive_color = 'green'
        negative_color = 'magenta'
        if invert:
            positive_color = 'magenta'
            negative_color = 'green'

        if not notes:
            notes = ''
        else:
            notes = f' ! {notes}'

        color = negative_color if diff[0] == '-' else positive_color
        click.echo(
            click.style(f'{title}\t', fg='blue') +
            click.style(diff, fg=color) + click.style(notes, fg='magenta'))

    if password is None:
        password = get_pass()
    check_config()
    with Settings() as config:
        client = Client(username=config['username'],
                        password=password,
                        database=config['database'],
                        host=config['host'])
    client.connect()
    if not user:
        user_id = client.user.id

    filters = [('employee_id.user_id.id', '=', user_id)]

    # Add the start filter
    if start:
        filters.append(('check_in', '>=', start.strftime('%Y-%m-%d 00:00:00')))
    elif period == 'month':
        filters.append(
            ('check_in', '>=', datetime.now().strftime('%Y-%m-01 00:00:00')))
    elif period == 'year':
        filters.append(
            ('check_in', '>=', datetime.now().strftime('%Y-01-01 00:00:00')))

    # Add optional end filter
    if end:
        filters.append(('check_out', '<', end.strftime('%Y-%m-%d 00:00:00')))

    attendance_ids = client.search('hr.attendance', filters)
    attendances = client.read('hr.attendance', attendance_ids)

    weeks = {}
    # @TODO Assumes user is in Finland
    local_holidays = holidays.FI()

    # Faux data to test holidays
    # attendances.append({
    #     'check_in': '2018-01-01 00:00:00',
    #     'check_out': '2018-01-01 02:00:00',
    #     'worked_hours': 2
    # })

    for attendance in attendances:
        # Get a localized datetime object
        # @TODO This assumes the server returns times as EU/Helsinki
        date = pytz.timezone('Europe/Helsinki').localize(
            datetime.strptime(attendance['check_in'], '%Y-%m-%d %H:%M:%S'))

        # If there is no checkout time, sum to now
        if attendance['check_out'] == False:
            # @TODO Same as above
            now = pytz.timezone('Europe/Helsinki').localize(datetime.utcnow())
            attendance['worked_hours'] = (now - date).seconds / 3600

        # Get the day and week index keys (Key = %Y-%m-%d)
        day_key = date.strftime('%Y-%m-%d')
        # Counts weeks from first Monday of the year
        week_key = date.strftime('%W')

        if week_key not in weeks:
            weeks[week_key] = {}

        if day_key not in weeks[week_key]:
            # @TODO Assumes 7.5 hours per day
            weeks[week_key][day_key] = {
                'allocated_hours': 7.5,
                'worked_hours': 0,
                'holiday': None
            }

        if day_key in local_holidays:
            # This day is a holiday, no allocated hours
            weeks[week_key][day_key]['holiday'] = local_holidays.get(day_key)
            weeks[week_key][day_key]['allocated_hours'] = 0

        # Sum the attendance
        weeks[week_key][day_key]['worked_hours'] += attendance['worked_hours']

    total_diff = 0
    total_hours = 0
    day_diff = 0
    click.echo(
        click.style(
            f'Balance as of {(datetime.today().isoformat(timespec="seconds"))} (system time)',
            fg='blue'))
    click.echo(click.style('Day\t\tWorked\tDifference', fg='blue'))
    for week_number, week in sorted(weeks.items()):
        for key, day in sorted(week.items()):
            diff = day['worked_hours'] - day['allocated_hours']
            colored_diff(f'{key}\t{(day["worked_hours"]):.2f}', f'{diff:+.2f}',
                         day['holiday'])

            if key == datetime.today().strftime('%Y-%m-%d'):
                day_diff += day['worked_hours'] - day['allocated_hours']
            else:
                total_diff += day['worked_hours'] - day['allocated_hours']
            total_hours += day['worked_hours']

    today = datetime.now().strftime('%Y-%m-%d')
    this_week = datetime.now().strftime('%W')
    hours_today = 0
    allocated_today = 0
    if today in weeks.get(this_week, {}):
        hours_today = weeks[this_week][today]['worked_hours']
        allocated_today = weeks[this_week][today]['allocated_hours']

    click.echo(click.style('---\t\t------\t-----', fg='blue'))
    colored_diff(f'Totals:\t\t{total_hours:.2f}',
                 f'{(total_diff + day_diff):+.2f}')
    print()
    colored_diff('Balance yesterday:', f'{total_diff:+.2f}')
    colored_diff('Balance now:\t', f'{(total_diff + day_diff):+.2f}')
    colored_diff('Allocated hours today:',
                 f'{(allocated_today - hours_today):+.2f}',
                 invert=True)
Esempio n. 42
0
def getUnix():
    d = datetime.utcnow()
    unixtime = calendar.timegm(d.utctimetuple())
    return unixtime
Esempio n. 43
0
def lambda_handler(event, _context):
    #  Get Current DateTime
    utcDateTime = datetime.utcnow()
    logger.debug(f'utcDateTime: {utcDateTime}')

    #  Get all EC2 instances
    session = boto3.Session()
    ec2Client = session.client('ec2', region_name='us-east-1')

    try:
        logger.debug(f'Describing All Instances...')
        allInstances = ec2Client.describe_instances()
        # logger.debug(f'allInstances: {allInstances}')
    except Exception as e:
        logger.error(f'Failed to describe_instances')
        raise e

    #  Check each EC2 instance and take action for Shutdowns
    logger.info('### Checking for ShutDown Tags ###')
    for instances in allInstances['Reservations']:
        for instance in instances['Instances']:
            try:
                if instance['InstanceId'] == 'i-0de01bff49c78f2ee':
                    instanceId = instance['InstanceId']

                    for tag in instance['Tags']:
                        #  Check AutoShutDown8pm Tag Value (Old way)
                        if 'AutoShutDown8pm' in (tag.get('Key')):
                            autoShutDownValue = (tag.get('Value'))

                            logger.info(f'AutoShutDown8pm Tag found on: {instanceId}')
                            #  Shutdown if AutoShutDown == EST (currentTime +4)
                            if 'EST' in autoShutDownValue and utcDateTime.hour == 00:
                                logger.info(f'Shutting down instance: {instanceId}')
                                ec2Client.stop_instances(InstanceIds=[instanceId])
                                ec2Client.create_tags(Resources=[instanceId], Tags=[{'Key': 'LastAutomatedShutdown',
                                     'Value': str(
                                         utcDateTime) + ' - The server was requested to be shutdown at 8pm EST'}])
                            #  Shutdown if AutoShutDown == EST (currentTime +5)
                            elif 'CST' in autoShutDownValue and utcDateTime.hour == 1:
                                logger.info(f'Shutting down instance: {instanceId}')
                                ec2Client.stop_instances(InstanceIds=[instanceId])
                                ec2Client.create_tags(Resources=[instanceId], Tags=[{'Key': 'LastAutomatedShutdown',
                                     'Value': str(
                                         utcDateTime) + ' - The server was requested to be shutdown at 8pm CST'}])
                            #  Shutdown if AutoShutDown == EST (currentTime +6)
                            elif 'MST' in autoShutDownValue and utcDateTime.hour == 2:
                                logger.info(f'Shutting down instance: {instanceId}')
                                ec2Client.stop_instances(InstanceIds=[instanceId])
                                ec2Client.create_tags(Resources=[instanceId], Tags=[{'Key': 'LastAutomatedShutdown',
                                     'Value': str(
                                         utcDateTime) + ' - The server was requested to be shutdown at 8pm MST'}])
                            #  Shutdown if AutoShutDown == EST (currentTime +7)
                            elif 'PST' in autoShutDownValue and utcDateTime.hour == 3:
                                logger.info(f'Shutting down instance: {instanceId}')
                                ec2Client.stop_instances(InstanceIds=[instanceId])
                                ec2Client.create_tags(Resources=[instanceId], Tags=[{'Key': 'LastAutomatedShutdown',
                                     'Value': str(
                                         utcDateTime) + ' - The server was requested to be shutdown at 8pm PST'}])
                            else:
                                logger.info(f'No Shutdown Action take on: {instanceId}')

                            #  Delete Variable
                            del autoShutDownValue

                        #  Check ShutDownInstanceAt Tag Value (New way)
                        if 'ShutDownInstanceAt' in (tag.get('Key')):
                            logger.info(f'ShutDownInstanceAt Tag found on: {instanceId}')
                            ShutDownInstanceAtValue = (tag.get('Value'))
                            logger.debug(f'ShutDownInstanceAtValue: {ShutDownInstanceAtValue}')
                            shutDownInstanceAtHour = ShutDownInstanceAtValue.split(' ')[0]
                            logger.debug(f'shutDownInstanceAtHour: {shutDownInstanceAtHour}')

                            if 'pm' in shutDownInstanceAtHour:
                                shutDownInstanceAtHour = (int(shutDownInstanceAtHour.split('pm')[0]) + 12)
                                logger.debug(f'shutDownInstanceAtHour: {shutDownInstanceAtHour}')

                            elif 'am' in shutDownInstanceAtHour:
                                shutDownInstanceAtHour = int(shutDownInstanceAtHour.split('am')[0])
                                logger.debug(f'shutDownInstanceAtHour: {shutDownInstanceAtHour}')

                            logger.debug(f'utcDateTime.hour: {utcDateTime.hour}')
                            #  Shutdown instance if the current UTC hour == the shutDownInstanceHour
                            if shutDownInstanceAtHour == utcDateTime.hour:
                                logger.info(f'Shutting down instance: {instanceId}')
                                ec2Client.stop_instances(InstanceIds=[instanceId])
                                ec2Client.create_tags(Resources=[instanceId], Tags=[{'Key': 'LastAutomatedShutdown',
                                     'Value': str(utcDateTime) + ' - The server was requested to be shutdown at ' +
                                              ShutDownInstanceAtValue}])
                            else:
                                logger.info(f'No Shutdown Action take on: {instanceId}')

                            #  Delete Variable
                            del ShutDownInstanceAtValue

            except Exception as e:
                logger.error(f'Failed executing Shutdown actions')
                raise e

    #  Check each EC2 instance and take action for PowerOn
    logger.info('### Checking for PowerOn Tags ###')
    for instances in allInstances['Reservations']:
        for instance in instances['Instances']:
            try:
                if instance['InstanceId'] == 'i-0de01bff49c78f2ee':
                    instanceId = instance['InstanceId']

                    for tag in instance['Tags']:
                        #  Check PowerOnInstanceAt Tag Value
                        if 'PowerOnInstanceAt' in (tag.get('Key')):
                            logger.info(f'PowerOnInstanceAt Tag found on: {instanceId}')
                            PowerOnInstanceAtValue = (tag.get('Value'))
                            logger.debug(f'PowerOnInstanceAtValue: {PowerOnInstanceAtValue}')
                            PowerOnInstanceAtHour = PowerOnInstanceAtValue.split(' ')[0]
                            logger.debug(f'PowerOnInstanceAtHour: {PowerOnInstanceAtHour}')

                            if 'pm' in PowerOnInstanceAtHour:
                                PowerOnInstanceAtHour = (int(PowerOnInstanceAtHour.split('pm')[0]) + 12)
                                logger.debug(f'PowerOnInstanceAtHour: {PowerOnInstanceAtHour}')

                            elif 'am' in PowerOnInstanceAtHour:
                                PowerOnInstanceAtHour = int(PowerOnInstanceAtHour.split('am')[0])
                                logger.debug(f'PowerOnInstanceAtHour: {PowerOnInstanceAtHour}')

                            logger.debug(f'utcDateTime.hour: {utcDateTime.hour}')
                            #  PowerOn instance if the current UTC hour == the PowerOnInstanceAtHour
                            if PowerOnInstanceAtHour == utcDateTime.hour:
                                logger.info(f'Powering on instance: {instanceId}')
                                response = ec2Client.start_instances(InstanceIds=[instanceId])
                                logger.debug(f'response: {response}')
                                ec2Client.create_tags(Resources=[instanceId], Tags=[{'Key': 'LastAutomatedPowerOn',
                                     'Value': str(utcDateTime) + ' - The server was requested to be powered on at ' +
                                              PowerOnInstanceAtValue}])
                            else:
                                logger.info(f'No PowerOn Action take on: {instanceId}')

                            #  Delete Variable
                            del PowerOnInstanceAtValue

            except Exception as e:
                logger.error(f'Failed executing PowerOn actions')
                raise e
Esempio n. 44
0
def refresh_database(refresh_method='system',
                     truncate=False,
                     truncateDate=None):
    run_time = datetime.utcnow()
    athlete_info = app.session.query(athlete).filter(
        athlete.athlete_id == 1).first()
    processing = app.session.query(dbRefreshStatus).filter(
        dbRefreshStatus.refresh_method == 'processing').first()
    # Add record for refresh audit trail
    refresh_record = dbRefreshStatus(
        timestamp_utc=run_time,
        refresh_method=refresh_method,
        truncate=True if truncate or truncateDate else False)
    app.session.add(refresh_record)
    app.session.commit()

    if not processing:
        try:
            # If athlete settings are defined
            if athlete_info.name and athlete_info.birthday and athlete_info.sex and athlete_info.weight_lbs and athlete_info.resting_hr and athlete_info.run_ftp and athlete_info.ride_ftp:
                # Insert record into table for 'processing'
                db_process_flag(flag=True)

                # If either truncate parameter is passed
                if truncate or truncateDate:

                    # If only truncating past a certain date
                    if truncateDate:
                        try:
                            app.server.logger.debug(
                                'Truncating strava_summary')
                            app.session.execute(
                                delete(stravaSummary).where(
                                    stravaSummary.start_date_utc >=
                                    truncateDate))
                            app.server.logger.debug(
                                'Truncating strava_samples')
                            app.session.execute(
                                delete(stravaSamples).where(
                                    stravaSamples.timestamp_local >=
                                    truncateDate))
                            app.server.logger.debug(
                                'Truncating strava_best_samples')
                            app.session.execute(
                                delete(stravaBestSamples).where(
                                    stravaBestSamples.timestamp_local >=
                                    truncateDate))
                            app.server.logger.debug('Truncating stryd_summary')
                            app.session.execute(
                                delete(strydSummary).where(
                                    strydSummary.start_date_local >=
                                    truncateDate))
                            app.server.logger.debug(
                                'Truncating oura_readiness_summary')
                            app.session.execute(
                                delete(ouraReadinessSummary).where(
                                    ouraReadinessSummary.report_date >=
                                    truncateDate))
                            app.server.logger.debug(
                                'Truncating oura_sleep_summary')
                            app.session.execute(
                                delete(ouraSleepSummary).where(
                                    ouraSleepSummary.report_date >=
                                    truncateDate))
                            app.server.logger.debug(
                                'Truncating oura_sleep_samples')
                            app.session.execute(
                                delete(ouraSleepSamples).where(
                                    ouraSleepSamples.report_date >=
                                    truncateDate))
                            app.server.logger.debug(
                                'Truncating oura_activity_summary')
                            app.session.execute(
                                delete(ouraActivitySummary).where(
                                    ouraActivitySummary.summary_date >=
                                    truncateDate))
                            app.server.logger.debug(
                                'Truncating oura_activity_samples')
                            app.session.execute(
                                delete(ouraActivitySamples).where(
                                    ouraActivitySamples.timestamp_local >=
                                    truncateDate))
                            app.server.logger.debug(
                                'Truncating hrv_workout_step_log')
                            # Delete extra day back so hrv workflow can recalculate the 'completed_yesterday' flag
                            app.session.execute(
                                delete(workoutStepLog).where(
                                    workoutStepLog.date >= (
                                        truncateDate - timedelta(days=1))))
                            app.server.logger.debug('Truncating withings')
                            app.session.execute(
                                delete(withings).where(
                                    withings.date_utc >= truncateDate))
                            app.session.commit()
                        except BaseException as e:
                            app.session.rollback()
                            app.server.logger.error(e)
                    else:
                        try:
                            app.server.logger.debug(
                                'Truncating strava_summary')
                            app.session.execute(delete(stravaSummary))
                            app.server.logger.debug(
                                'Truncating strava_samples')
                            app.session.execute(delete(stravaSamples))
                            app.server.logger.debug(
                                'Truncating strava_best_samples')
                            app.session.execute(delete(stravaBestSamples))
                            app.server.logger.debug(
                                'Truncating oura_readiness_summary')
                            app.session.execute(delete(ouraReadinessSummary))
                            app.server.logger.debug(
                                'Truncating oura_sleep_summary')
                            app.session.execute(delete(ouraSleepSummary))
                            app.server.logger.debug(
                                'Truncating oura_sleep_samples')
                            app.session.execute(delete(ouraSleepSamples))
                            app.server.logger.debug(
                                'Truncating oura_activity_summary')
                            app.session.execute(delete(ouraActivitySummary))
                            app.server.logger.debug(
                                'Truncating oura_activity_samples')
                            app.session.execute(delete(ouraActivitySamples))
                            app.server.logger.debug(
                                'Truncating hrv_workout_step_log')
                            app.session.execute(delete(workoutStepLog))
                            app.server.logger.debug('Truncating withings')
                            app.session.execute(delete(withings))
                            app.server.logger.debug('Truncating fitbod')
                            app.session.execute(delete(fitbod))
                            app.session.commit()
                        except BaseException as e:
                            app.session.rollback()
                            app.server.logger.error(e)

                    app.session.remove()

                ### Pull Weight Data ###

                # If withings credentials in config.ini, populate withings table
                if withings_credentials_supplied:
                    try:
                        app.server.logger.info('Pulling withings data...')
                        pull_withings_data()
                        withings_status = 'Successful'
                    except BaseException as e:
                        app.server.logger.error(
                            'Error pulling withings data: {}'.format(e))
                        withings_status = str(e)
                else:
                    withings_status = 'No Credentials'

                ### Pull Fitbod Data ###

                # If nextcloud credentials in config.ini, pull fitbod data from nextcloud location
                if nextcloud_credentials_supplied:
                    try:
                        app.server.logger.info('Pulling fitbod data...')
                        pull_fitbod_data()
                        fitbod_status = 'Successful'
                    except BaseException as e:
                        app.server.logger.error(
                            'Error pulling fitbod data: {}'.format(e))
                        fitbod_status = str(e)
                else:
                    fitbod_status = 'No Credentials'

                ### Pull Oura Data ###

                if oura_credentials_supplied:
                    # Pull Oura Data before strava because resting heart rate used in strava sample heart rate zones
                    try:
                        app.server.logger.info('Pulling oura data...')
                        oura_status = pull_oura_data()
                        oura_status = 'Successful' if oura_status else 'Oura cloud not yet updated'
                    except BaseException as e:
                        app.server.logger.error(
                            'Error pulling oura data: {}'.format(e))
                        oura_status = str(e)
                else:
                    oura_status = 'No Credentials'

                ### Pull Stryd Data ###
                if stryd_credentials_supplied:
                    app.server.logger.info('Pulling stryd data...')
                    pull_stryd_data()

                ### This has been moved to crontab as spotify refresh is required more frequently than hourly ###
                # ### Pull Spotify Data ###
                # if spotify_credentials_supplied:
                #     app.server.logger.info('Pulling spotify play history...')
                #     save_spotify_play_history()

                ### Pull Strava Data ###

                # Only pull strava data if oura cloud has been updated with latest day, or no oura credentials so strava will use athlete static resting hr
                if oura_status == 'Successful' or oura_status == 'No Credentials':
                    try:
                        app.server.logger.info('Pulling strava data...')

                        if strava_connected():
                            athlete_id = 1  # TODO: Make this dynamic if ever expanding to more users
                            client = get_strava_client()
                            after = config.get('strava',
                                               'activities_after_date')
                            activities = client.get_activities(
                                after=after, limit=0
                            )  # Use after to sort from oldest to newest

                            athlete_info = app.session.query(athlete).filter(
                                athlete.athlete_id == athlete_id).first()
                            min_non_warmup_workout_time = athlete_info.min_non_warmup_workout_time
                            # Loop through the activities, and create a dict of the dataframe stream data of each activity
                            db_activities = pd.read_sql(sql=app.session.query(
                                stravaSummary.activity_id).filter(
                                    stravaSummary.athlete_id ==
                                    athlete_id).distinct(
                                        stravaSummary.activity_id).statement,
                                                        con=engine)

                            app.session.remove()
                            new_activities = []
                            for act in activities:
                                # If not already in db, parse and insert
                                if act.id not in db_activities[
                                        'activity_id'].unique():
                                    new_activities.append(FitlyActivity(act))
                                    app.server.logger.info(
                                        'New Workout found: "{}"'.format(
                                            act.name))
                            # If new workouts found, analyze and insert
                            if len(new_activities) > 0:
                                for fitly_act in new_activities:
                                    fitly_act.stravaScrape(
                                        athlete_id=athlete_id)
                            # Only run hrv training workflow if oura connection available to use hrv data or readiness score
                            if oura_status == 'Successful':
                                training_workflow(
                                    min_non_warmup_workout_time=
                                    min_non_warmup_workout_time,
                                    metric=app.session.query(athlete).filter(
                                        athlete.athlete_id ==
                                        1).first().recovery_metric)

                        app.server.logger.debug('stravaScrape() complete...')
                        strava_status = 'Successful'
                    except BaseException as e:
                        app.server.logger.error(
                            'Error pulling strava data: {}'.format(e))
                        strava_status = str(e)
                else:
                    app.server.logger.info(
                        'Oura cloud not yet updated. Waiting to pull Strava data'
                    )
                    strava_status = 'Awaiting oura cloud update'

                app.server.logger.debug(
                    'Updating db refresh record with status...')
                refresh_record = app.session.query(dbRefreshStatus).filter(
                    dbRefreshStatus.timestamp_utc == run_time).first()
                refresh_record.oura_status = oura_status
                refresh_record.fitbod_status = fitbod_status
                refresh_record.strava_status = strava_status
                refresh_record.withings_status = withings_status
                refresh_record.refresh_method = refresh_method
                app.session.commit()

                # Refresh peloton class types local json file
                if peloton_credentials_supplied:
                    get_peloton_class_names()

                db_process_flag(flag=False)
                app.server.logger.info('Refresh Complete')
                app.session.remove()

            else:
                app.server.logger.info(
                    'Please define all athlete settings prior to refreshing data'
                )
        except:
            # Just in case the job fails, remove any processing records that may have been added to audit log as to not lock the next job
            db_process_flag(flag=False)
    else:
        if refresh_method == 'manual':
            app.server.logger.info('Database is already running a refresh job')

    app.session.remove()
Esempio n. 45
0
    async def mute(self,
                   ctx,
                   member: Greedy[discord.Member],
                   *,
                   time: TimeConverter = None):

        redmark = "<:redmark:738415723172462723>"

        #If no members are give
        if not len(member):
            e = discord.Embed(
                description=
                f"{redmark} __*{ctx.author.mention}, you have to give a member(s) to mute*__",
                color=0x420000)
            await ctx.send(embed=e)
            return

        #Get the muted role
        mute_role = discord.utils.get(ctx.guild.roles, name="Muted")

        #If the mute role doesn't exist in the server
        if not mute_role:
            e = discord.Embed(
                description=
                f"{redmark} __*Couldn't find a [Muted Role](https://echo-bot.fandom.com/wiki/Setting_up_the_muted_role)",
                color=0x420000)
            await ctx.send(embed=e)
            return

        #Empty list of members for us to to store
        #To unmute later on
        unmutes = []

        for mem in member:
            #Check if the member(s) aren't muted already
            if not mute_role in mem.roles:
                #Check if the bot has the right permissions
                #To perform the mute
                if ctx.guild.me.top_role.position > mem.top_role.position:

                    #Store the role ids
                    role_ids = ",".join([str(r.id) for r in mem.roles])
                    #If an endtime was given
                    #Store it, else None
                    endtime = datetime.utcnow() + timedelta(
                        seconds=time) if time else None

                    #Use the function for adding the mute
                    #And storing the member's id, roles, and endtime (if given)
                    await self.db.add_mute(
                        mem.id, role_ids,
                        getattr(endtime, "isoformat", lambda: None)(),
                        ctx.guild.id)

                    #Edit the user's roles
                    await mem.edit(roles=[mute_role])

                    tm = f'{time:,}' if time else 'Indefinite'

                    try:
                        e = discord.Embed(
                            description=
                            f"**You've been muted in {ctx.guild}!**",
                            timestamp=datetime.utcnow(),
                            color=0x420000)

                        e.set_author(name=f"Duration -> {tm}")

                        e.set_footer(text=f"Muted by {ctx.author}")

                        e.set_thumbnail(url=ctx.guild.icon_url)

                        await mem.send(embed=e)

                    except Exception:
                        pass

                    #Make our embed
                    e = discord.Embed(
                        description=f"🤐 **{mem.mention} has been muted!**",
                        color=0x420000,
                        timestamp=datetime.utcnow())

                    e.set_author(name=f'Duration -> {tm}')

                    #Send the embed
                    await ctx.send(embed=e)

                    #If a time is given
                    #Append the members
                    #To the list of unmutes
                    if time:
                        unmutes.append(mem)

                #If the bot doesn't have the right perms
                else:
                    e = discord.Embed(
                        description=
                        f"{redmark} __*{mem.mention} couldn't be muted due to my permission/role hierarchy*__",
                        color=0x420000)
                    await ctx.send(embed=e)
                    return
            #If the member's already muted
            else:
                e = discord.Embed(
                    description=
                    f"{redmark} __*{mem.mention} is already muted!*__",
                    color=0x420000)
                await ctx.send(embed=e)
                return

        #If a time is given
        #Wait for that amount of time and
        #Then unmute after that time passes
        if len(unmutes):
            await asyncio.sleep(time)
            await self.unmute_mem(ctx, member)
def detect() -> None:
    schema_registry = confluent_kafka.schema_registry.SchemaRegistryClient(
        {'url': config.SCHEMA_REGISTRY_URL})
    key_serializer = confluent_kafka.schema_registry.avro.AvroSerializer(
        message_schemas.BAD_PLANS_MESSAGE_KEY_AVRO_SCHEMA, schema_registry)
    value_serializer = confluent_kafka.schema_registry.avro.AvroSerializer(
        message_schemas.BAD_PLANS_MESSAGE_VALUE_AVRO_SCHEMA, schema_registry)
    key_deserializer = confluent_kafka.schema_registry.avro.AvroDeserializer(
        message_schemas.QUERY_STATS_MESSAGE_KEY_AVRO_SCHEMA, schema_registry)
    value_deserializer = confluent_kafka.schema_registry.avro.AvroDeserializer(
        message_schemas.QUERY_STATS_MESSAGE_VALUE_AVRO_SCHEMA, schema_registry)

    producer_config = {
        'bootstrap.servers': config.KAFKA_BOOTSTRAP_SERVERS,
        'key.serializer': key_serializer,
        'value.serializer': value_serializer,
        'linger.ms': 100,
        'retry.backoff.ms': 250,
        'compression.codec': 'snappy'
    }
    consumer_config = {
        'bootstrap.servers': config.KAFKA_BOOTSTRAP_SERVERS,
        'group.id':
        f'sqlserver_plan_regression_monitor_detect_{socket.getfqdn()}',
        'key.deserializer': key_deserializer,
        'value.deserializer': value_deserializer,
        # We manage our own offset seeking and do not use commits in this module:
        'enable.auto.commit': False,
        'error_cb': lambda evt: logger.error('Kafka error: %s', evt),
        'throttle_cb':
        lambda evt: logger.warning('Kafka throttle event: %s', evt)
    }

    kafka_producer = confluent_kafka.SerializingProducer(producer_config)
    kafka_consumer = confluent_kafka.DeserializingConsumer(consumer_config)
    kafka_consumer.subscribe([config.STATS_TOPIC],
                             on_assign=partial(
                                 common.set_offsets_to_time,
                                 config.REFRESH_INTERVAL_MINUTES * 60))

    try:
        while True:
            # Top level: one per query on a particular DB. Next level: plans seen for that SQL query, keyed by the
            # plan_handle. 3rd and innermost level: the dict of stats collected for the plan
            queries = collections.defaultdict(dict)

            memory_flush_deadline = datetime.utcnow() + timedelta(
                minutes=config.REFRESH_INTERVAL_MINUTES)

            while datetime.utcnow() < memory_flush_deadline:
                msg = kafka_consumer.poll(1.0)

                if msg is None:
                    continue

                msg_key = dict(msg.key())
                msg_val = dict(msg.value())

                caught_up = time.time() - (msg_val['stats_query_time'] / 1000) < \
                    config.MAX_ALLOWED_EVALUATION_LAG_SECONDS
                query_key = (msg_key['db_identifier'], msg_key['set_options'],
                             msg_key['sql_handle'])
                queries[query_key][msg_val['plan_handle']] = msg_val
                queries[query_key][msg_val['plan_handle']]['source_stats_message_coordinates'] = \
                    common.msg_coordinates(msg)

                if msg.offset() % 100_000 == 0:
                    logger.info(
                        f'Reached {common.format_msg_info(msg)}. Caught up = {caught_up}. Queries cached: '
                        f'{len(queries):,}')
                if msg.offset() % 1000 == 0:
                    kafka_producer.poll(
                        0)  # serve delivery callbacks if needed

                if caught_up and len(
                        queries[query_key]
                ) > 1:  # need other plans to compare to be able to call it "bad"
                    bad_plans, prior_plans = find_bad_plans(
                        queries[query_key], msg_val['stats_query_time'])
                    for bad_plan in bad_plans:
                        kafka_producer.poll(0)  # serve delivery callbacks
                        bad_plan['prior_plans'] = prior_plans
                        msg_key = message_schemas.key_from_value(bad_plan)
                        kafka_producer.produce(
                            topic=config.BAD_PLANS_TOPIC,
                            key=msg_key,
                            value=bad_plan,
                            on_delivery=common.kafka_producer_delivery_cb)
                        logger.debug(
                            f'Produced message with key {msg_key} and value {bad_plan}'
                        )

            logger.info(
                'Clearing %s queries from memory and reloading from source Kafka topic...',
                len(queries))
            common.set_offsets_to_time(config.REFRESH_INTERVAL_MINUTES * 60,
                                       kafka_consumer,
                                       kafka_consumer.assignment())
    except KeyboardInterrupt:
        logger.info('Received interrupt request; shutting down...')
    finally:
        kafka_consumer.close()
        kafka_producer.flush(10)
        logger.info('Clean shutdown complete.')
Esempio n. 47
0
async def setConfig(ctx, dbQuery, configName, configValue, channelID: str = 'currentChannel'):
    em = discord.Embed(color=embedColor)
    if channelID == 'currentChannel':
        channelID = str(ctx.channel.id)
        em.set_author(name=f'Results for config {configName}')
    else:
        em.set_author(name=f'Results for config {configName} {channelID}')
    # Sets all keys to lowercase, making search arguments case insensitive
    try:
        resultValue = dbQuery['Items'][0]['mpaConfig'][f'{channelID}']
    except KeyError:
        em = discord.Embed(color=failEmbedColor)
        em.add_field(name='Nothing found!', value='Nothing was found.')
        await ctx.send('', embed=em)
        return
        # Probably not the best way to determine if a key is a role list or not
    if 'role' in configName.lower():
        if len(ctx.message.role_mentions) > 0:
            foundRole = ctx.message.role_mentions[0]
            configValue = foundRole.id
        else:
            try:
                foundRole = discord.utils.get(ctx.guild.roles, id=int(configValue))
                if foundRole is None:
                    await sendErrorMessage.invalidArguments(ctx, 'invalidChannelConfigSet', setConfig.__name__, configName)
                    return
            except ValueError:
                await sendErrorMessage.invalidArguments(ctx, 'invalidChannelConfigSet', setConfig.__name__, configName)
                return
        try:
            if str(configValue) in resultValue[f'{configName}']:
                await sendErrorMessage.invalidArguments(ctx, 'ItemAlreadyExists', setConfig.__name__, configName) 
                return
            else:
                updateDB = tonkDB.updateRoleList(ctx.guild.id, channelID, configName, configValue, str(datetime.utcnow()))
            if updateDB is not None:
                em.add_field(name=f'Success:', value=f'added {foundRole.mention} (ID: {configValue}) to {configName}', inline=False)
            else:
                await sendErrorMessage.invalidArguments(ctx, 'invalidChannelConfigSet', setConfig.__name__, configName)
                return
        except KeyError:
            keyExists = "false"
            updateDB = tonkDB.updateRoleList(ctx.guild.id, channelID, configName, configValue, str(datetime.utcnow()), keyExists)
            if updateDB is not None:
                em.add_field(name=f'Success:', value=f'added {foundRole.mention} (ID: {configValue}) to {configName}', inline=False)
            else:
                await sendErrorMessage.invalidArguments(ctx, 'invalidChannelConfigSet', setConfig.__name__, configName)
                return
        await ctx.send('', embed=em)
        return
    else:
        try:
            if str(configValue) in resultValue[f'{configName}']:
                await sendErrorMessage.invalidArguments(ctx, 'ItemAlreadyExists', setConfig.__name__, configName) 
                return
            else:
                if (checkConfigSyntax(ctx, configName, configValue)) is not None:
                    updateDB = tonkDB.updateConfig(ctx.guild.id, channelID, configName, configValue, str(datetime.utcnow()))
                else:
                    updateDB = None
            if updateDB is not None:
                em.add_field(name=f'Success', value=f'Set {configName} to {configValue}', inline=False)
            else:
                await sendErrorMessage.invalidArguments(ctx, 'invalidChannelConfigSet', setConfig.__name__, configName)
                return
        except KeyError:
            if (checkConfigSyntax(ctx, configName, configValue)) is not None:
                keyExists = "false"
                updateDB = tonkDB.updateConfig(ctx.guild.id, channelID, configName, configValue, str(datetime.utcnow()), keyExists)
            else:
                updateDB = None
            if updateDB is not None:
                em.add_field(name=f'Success', value=f'Set {configName} to {configValue}', inline=False)
            else:
                await sendErrorMessage.invalidArguments(ctx, 'invalidChannelConfigSet', setConfig.__name__, configName)
                return
        await ctx.send('', embed=em)
        return
    def test_write_entries_w_extra_properties(self):
        # pylint: disable=too-many-statements
        from datetime import datetime
        from google.logging.type.log_severity_pb2 import WARNING
        from google.cloud.grpc.logging.v2.log_entry_pb2 import LogEntry
        from google.cloud._helpers import UTC, _pb_timestamp_to_datetime
        NOW = datetime.utcnow().replace(tzinfo=UTC)
        TEXT = 'TEXT'
        SEVERITY = 'WARNING'
        LABELS = {
            'foo': 'bar',
        }
        IID = 'IID'
        REQUEST_METHOD = 'GET'
        REQUEST_URL = 'http://example.com/requested'
        STATUS = 200
        REQUEST_SIZE = 256
        RESPONSE_SIZE = 1024
        REFERRER_URL = 'http://example.com/referer'
        USER_AGENT = 'Agent/1.0'
        REMOTE_IP = '1.2.3.4'
        REQUEST = {
            'requestMethod': REQUEST_METHOD,
            'requestUrl': REQUEST_URL,
            'status': STATUS,
            'requestSize': REQUEST_SIZE,
            'responseSize': RESPONSE_SIZE,
            'referer': REFERRER_URL,
            'userAgent': USER_AGENT,
            'remoteIp': REMOTE_IP,
            'cacheHit': False,
        }
        PRODUCER = 'PRODUCER'
        OPID = 'OPID'
        OPERATION = {
            'producer': PRODUCER,
            'id': OPID,
            'first': False,
            'last': True,
        }
        ENTRY = {
            'logName': self.LOG_PATH,
            'resource': {'type': 'global'},
            'textPayload': TEXT,
            'severity': SEVERITY,
            'labels': LABELS,
            'insertId': IID,
            'timestamp': NOW,
            'httpRequest': REQUEST,
            'operation': OPERATION,
        }
        gax_api = _GAXLoggingAPI()
        api = self._make_one(gax_api, None)

        api.write_entries([ENTRY])

        entries, log_name, resource, labels, partial_success, options = (
            gax_api._write_log_entries_called_with)
        self.assertEqual(len(entries), 1)

        entry = entries[0]
        self.assertIsInstance(entry, LogEntry)
        self.assertEqual(entry.log_name, self.LOG_PATH)
        self.assertEqual(entry.resource.type, 'global')
        self.assertEqual(entry.text_payload, TEXT)
        self.assertEqual(entry.severity, WARNING)
        self.assertEqual(entry.labels, LABELS)
        self.assertEqual(entry.insert_id, IID)
        stamp = _pb_timestamp_to_datetime(entry.timestamp)
        self.assertEqual(stamp, NOW)

        request = entry.http_request
        self.assertEqual(request.request_method, REQUEST_METHOD)
        self.assertEqual(request.request_url, REQUEST_URL)
        self.assertEqual(request.status, STATUS)
        self.assertEqual(request.request_size, REQUEST_SIZE)
        self.assertEqual(request.response_size, RESPONSE_SIZE)
        self.assertEqual(request.referer, REFERRER_URL)
        self.assertEqual(request.user_agent, USER_AGENT)
        self.assertEqual(request.remote_ip, REMOTE_IP)
        self.assertEqual(request.cache_hit, False)

        operation = entry.operation
        self.assertEqual(operation.producer, PRODUCER)
        self.assertEqual(operation.id, OPID)
        self.assertFalse(operation.first)
        self.assertTrue(operation.last)

        self.assertIsNone(log_name)
        self.assertIsNone(resource)
        self.assertIsNone(labels)
        self.assertEqual(partial_success, False)
        self.assertIsNone(options)
Esempio n. 49
0
def now_datetime():
    from datetime import datetime
    return convert_utc_to_user_timezone(datetime.utcnow())
Esempio n. 50
0
    async def warn(self,
                   ctx,
                   member: discord.Member,
                   *,
                   reason: Optional[str] = "No Reason Provided"):

        #If a member isn't provided
        if not member:
            await ctx.send("You have to give a reason to warn this member")
            return

        #If the reason is too long
        if len(reason) > 350:
            await ctx.send("Reason has to be less than 350 Characters")
            return

        #If the author's top role
        #Is under the member's top role
        if ctx.author.top_role.position < member.top_role.position:
            await ctx.send("You don't have permissions to warn this member")
            return

        #If the author tries to warn the owner
        #Of the server
        if ctx.author.id != ctx.guild.owner.id:
            await ctx.send("You can't warn the Owner of the Server")
            return

        #If the author tries to warn
        #Themselves
        if ctx.author == member:
            await ctx.send("You can't warn yourself")
            return

        #Add to member's total warns
        await self.db.add_warns(member.id, ctx.author.id, reason, ctx.guild.id)

        #Embed to try and send to member
        #Wrapped in try/except in case
        #It tries to send to a bot
        try:

            #Get member's total warns
            total_warns = len(await self.db.get_warns(member.id, ctx.guild.id))

            e = discord.Embed(
                color=0x420000,
                title=f"⚠️ **You've been Warned in {ctx.guild}!**")
            #Make fields
            fields = [("__*Warned By*__", ctx.author, True),
                      ("__*Reason*__", reason, True),
                      ("__*Total Warns*__", total_warns, True)]

            #Add fields
            for n, v, i in fields:
                e.add_field(name=n, value=v, inline=i)

            e.timestamp = datetime.utcnow()

            await member.send(embed=e)

        except Exception as e:
            pass

        #Get member's total warns
        total_warns = len(await self.db.get_warns(member.id, ctx.guild.id))

        #Make embed
        e = discord.Embed(
            color=0x420000,
            description=
            f"⚠️ **{member}** has been warned. They now have **{total_warns} warn(s)**"
        )

        #Make embed fields
        fields = [("__*Warned by*__", ctx.author, True),
                  ("__*Reason*__", reason, True)]

        for name, value, inline in fields:

            e.add_field(name=name, value=value, inline=inline)

        e.timestamp = datetime.utcnow()

        e.set_footer(text=member, icon_url=member.avatar_url)

        await ctx.send(embed=e)
Esempio n. 51
0
#!/usr/bin/env python

import spidev
import opc
import datetime, time
from datetime import datetime,timedelta
import os.path, sys, os
import serial
import signal
from time import sleep

datadir='/var/data/OPC'
filenm='opc'
IDstacji=30100

presentTime=datetime.utcnow()


# Open a SPI connection on CE0
spi = spidev.SpiDev()
spi.open(1, 0)

# Set the SPI mode and clock speed
spi.mode = 1
spi.max_speed_hz = 500000



def date2matlab(dt):
   ord = dt.toordinal()
   mdn = dt + timedelta(days = 366)
Esempio n. 52
0
import matplotlib.transforms as transforms
import matplotlib.pyplot as plt
import pickle

# Inputs
SECS_IN_MIN = 60
MINS_IN_HOUR = 60
HRS_IN_DAY = 24
missingValue = -999.

# ARCHIVE MODE
#start_date = '20211229'
#stop_date  = '20220121'

# REALTIME MODE
now = datetime.utcnow()
twelve_hrs_ago = now - timedelta(hours=12)
start_date = twelve_hrs_ago.strftime("%Y%m%d")
stop_date = now.strftime("%Y%m%d")

#csv_dir = '/home/disk/funnel/impacts/data_archive/asos_isu'
csv_dir = '/home/disk/bob/impacts/raw/asos_isu'

# Get sitelist
pickle_jar = '/home/disk/bob/impacts/bin/pickle_jar/'
infile = open(pickle_jar + "sitelist.pkl", 'rb')
#infile = open(pickle_jar + "sitelist_test.pkl",'rb')
sitelist = pickle.load(infile)
infile.close()
# FOR TESTING
#sitelist = ['KPIA']
Esempio n. 53
0
def get_current_time():
    utcmoment_naive = datetime.utcnow()
    utcmoment = utcmoment_naive.replace(tzinfo=pytz.utc)
    tz = os.environ.get("TZ", "Asia/kolkata")
    return utcmoment.astimezone(pytz.timezone(tz))
Esempio n. 54
0
    async def on_command_error(self, ctx, error):

        if isinstance(error, commands.DisabledCommand):
            embed = discord.Embed(
                title=
                f'{Utils.YamlContainerManagement.get_yamlCGL("Embed", "HTitle")}',
                colour=discord.Colour(Utils.Farbe.Red),
                description='Dieser Command ist Deaktiviert.')
            embed.set_thumbnail(url=self.client.user.avatar_url)

            await ctx.message.delete()
            m = await ctx.send(embed=embed)
            await asyncio.sleep(15)
            await m.delete()

        elif isinstance(error, commands.NoPrivateMessage):

            embed = discord.Embed(
                title=
                f'{Utils.YamlContainerManagement.get_yamlCGL("Embed", "HTitle")}',
                colour=discord.Colour(Utils.Farbe.Red),
                description=
                'Du darfst diesen Command nicht in Privatnachrichten nutzen.')
            embed.set_thumbnail(url=self.client.user.avatar_url)

            try:

                await ctx.message.delete()
                m = await ctx.send(embed=embed)
                await asyncio.sleep(15)
                await m.delete()

            except discord.HTTPException:

                m = await ctx.send(embed=embed)
                await asyncio.sleep(15)
                await m.delete()

        elif isinstance(
                error, commands.BadArgument or commands.ArgumentParsingError
                or commands.BadBoolArgument):
            embed = discord.Embed(
                title=
                f'{Utils.YamlContainerManagement.get_yamlCGL("Embed", "HTitle")}',
                colour=discord.Colour(Utils.Farbe.Red),
                description=
                f'Dein angegebenes Argument ist fehlerhaft.\n`{error}`')
            embed.set_thumbnail(url=self.client.user.avatar_url)

            try:

                await ctx.message.delete()
                m = await ctx.send(embed=embed)
                await asyncio.sleep(15)
                await m.delete()

            except discord.HTTPException:

                m = await ctx.send(embed=embed)
                await asyncio.sleep(15)
                await m.delete()

        elif isinstance(
                error, commands.MissingRequiredArgument
                or commands.TooManyArguments):
            embed = discord.Embed(
                title=
                f'{Utils.YamlContainerManagement.get_yamlCGL("Embed", "HTitle")}',
                colour=discord.Colour(Utils.Farbe.Red),
                description=f'Dein angegebenes Argument ist fehlerhaft.')
            embed.set_thumbnail(url=self.client.user.avatar_url)

            try:

                await ctx.message.delete()
                m = await ctx.send(embed=embed)
                await asyncio.sleep(15)
                await m.delete()

            except discord.HTTPException:

                m = await ctx.send(embed=embed)
                await asyncio.sleep(15)
                await m.delete()

        elif isinstance(
                error, commands.MissingPermissions
                or commands.BotMissingPermissions):
            embed = discord.Embed(
                title=
                f'{Utils.YamlContainerManagement.get_yamlCGL("Embed", "HTitle")}',
                colour=discord.Colour(Utils.Farbe.Red),
                description=
                f'Du besitzt nicht die benötigten Rechte ({error.missing_perms}), andernfalls besitze ich nicht die benötigten Rechte!'
            )
            embed.set_thumbnail(url=self.client.user.avatar_url)

            try:

                await ctx.message.delete()
                m = await ctx.send(embed=embed)
                await asyncio.sleep(15)
                await m.delete()

            except discord.HTTPException:

                m = await ctx.send(embed=embed)
                await asyncio.sleep(15)
                await m.delete()

        elif isinstance(error, commands.NotOwner):
            embed = discord.Embed(
                title='Hey! Was machst du da?',
                colour=discord.Colour(Utils.Farbe.Red),
                description=
                'Du kannst mich mit diesem Befehl __stark beschädigen__!')
            embed.set_thumbnail(url=self.client.user.avatar_url)

            try:

                await ctx.message.delete()
                m = await ctx.send(embed=embed)
                await asyncio.sleep(15)
                await m.delete()

            except discord.HTTPException:

                m = await ctx.send(embed=embed)
                await asyncio.sleep(15)
                await m.delete()

        elif isinstance(error, commands.CommandOnCooldown):

            embed = discord.Embed(
                title=
                f'{Utils.YamlContainerManagement.get_yamlCGL("Embed", "HTitle")}',
                colour=discord.Colour(Utils.Farbe.Red),
                description=
                f'Du **musst {"%.2f" % round(error.retry_after, 2)}sek. warten**, bevor du den Command erneut benutzen kannst'
            )
            embed.set_thumbnail(url=self.client.user.avatar_url)

            try:

                await ctx.message.delete()
                m = await ctx.send(embed=embed)
                await asyncio.sleep(15)
                await m.delete()

            except discord.HTTPException:

                m = await ctx.send(embed=embed)
                await asyncio.sleep(15)
                await m.delete()

        elif isinstance(error, commands.CommandNotFound):
            return

        elif isinstance(error, commands.CheckFailure):
            embed = discord.Embed(
                title='Hey! Was machst du da?',
                colour=discord.Colour(Utils.Farbe.Red),
                description=f'Du erfüllst nicht die benötigten Rechte.')
            embed.set_thumbnail(url=self.client.user.avatar_url)

            try:

                await ctx.message.delete()
                m = await ctx.send(embed=embed)
                await asyncio.sleep(15)
                await m.delete()

            except discord.HTTPException:

                m = await ctx.send(embed=embed)
                await asyncio.sleep(15)
                await m.delete()

        elif isinstance(error, commands.CommandInvokeError):

            if isinstance(error.original, Utils.CreditError):

                embed = discord.Embed(title='',
                                      colour=discord.Colour(Utils.Farbe.Red),
                                      description=f'{error.__context__}')
                embed.set_author(name="Credit Bank",
                                 icon_url=Utils.YamlContainerManagement.gce(
                                     "Images", "Credits"))
                embed.set_thumbnail(url=self.client.user.avatar_url)

                try:

                    await ctx.message.delete()
                    m = await ctx.send(embed=embed)
                    await asyncio.sleep(15)
                    await m.delete()

                except discord.HTTPException:

                    m = await ctx.send(embed=embed)
                    await asyncio.sleep(15)
                    await m.delete()
                return

            elif isinstance(error.original, Utils.MusicError):

                embed = discord.Embed(
                    title=
                    f'{Utils.YamlContainerManagement.get_yamlCGL("Embed", "HTitle")}',
                    colour=discord.Colour(Utils.Farbe.Red),
                    description=
                    f'Etwas in der Rubrik: `Enhanced Music` ist schiefgelaufen. Versuche es erneut.\n`{error}`'
                )
                embed.set_thumbnail(url=self.client.user.avatar_url)

                try:

                    await ctx.message.delete()
                    m = await ctx.send(embed=embed)
                    await asyncio.sleep(15)
                    await m.delete()

                except discord.HTTPException:

                    m = await ctx.send(embed=embed)
                    await asyncio.sleep(15)
                    await m.delete()
                return

            if isinstance(error.original, Utils.UccountError):

                embed = discord.Embed(title='-Uccount-',
                                      colour=discord.Colour(Utils.Farbe.Red),
                                      description=f'{error.__context__}')
                embed.set_thumbnail(url=self.client.user.avatar_url)

                try:

                    await ctx.message.delete()
                    m = await ctx.send(embed=embed)
                    await asyncio.sleep(15)
                    await m.delete()

                except discord.HTTPException:

                    m = await ctx.send(embed=embed)
                    await asyncio.sleep(15)
                    await m.delete()
                return

            else:

                embed = discord.Embed(
                    title='ACHTUNG!',
                    colour=discord.Colour(Utils.Farbe.Red),
                    description=
                    'Der Command ist **korrumpiert**!\nTritt dieser Fehler erneut auf, '
                    'kontaktiere **dringend** den Support: **!s**')
                embed.add_field(name='**LOG:**',
                                value=f'```css\n[{error}]\n```',
                                inline=True)
                embed.set_thumbnail(url=self.client.user.avatar_url)

                async with aiohttp.ClientSession() as session:
                    url = "https://discordapp.com/api/webhooks/815708355371860069/gy3_edx9paMdTg6f-0WL2qOlWnGKalV_10SPwK3jjdWV3f4dPSbvLStyDmClkAVQBRgu"

                    webhook = Webhook.from_url(
                        url, adapter=AsyncWebhookAdapter(session))

                    timestamp = datetime.utcnow()
                    trace = traceback.format_exception(None, error,
                                                       error.__traceback__)
                    b = 0

                    erembed = discord.Embed(
                        title=
                        "\u200b\nEin schwerwiegender Fehler ist aufgetreten!\n\u200b",
                        colour=discord.Colour(Utils.Farbe.Red))
                    erembed.set_author(
                        name=f"{timestamp.strftime(r'%I:%M %p')}",
                        icon_url=Utils.YamlContainerManagement.get_yamlCGL(
                            "Bilder", "Clock"))
                    erembed.add_field(
                        name='**OPERATOR:**',
                        value=f'```fix\n[{ctx.author} / {ctx.author.id}]\n```',
                        inline=False)
                    try:
                        erembed.add_field(
                            name='**SERVER:**',
                            value=f'```fix\n[{ctx.guild.name}]\n```',
                            inline=False)
                        erembed.add_field(
                            name='**KANAL:**',
                            value=f'```fix\n[{ctx.channel.name}]\n```',
                            inline=False)
                    except AttributeError:
                        pass
                    erembed.add_field(
                        name='**COMMAND:**',
                        value=
                        f'```fix\n[{self.client.command_prefix}{ctx.command.qualified_name}]\n```',
                        inline=False)
                    erembed.add_field(
                        name='**NACHRICHT:**',
                        value=f'```fix\n[{ctx.message.content}]\n```',
                        inline=False)
                    erembed.add_field(
                        name='**ERROR:**',
                        value=f'```css\n[{error}]\n```\n\n\u200b',
                        inline=False)
                    erembed.add_field(name='**TRACEBACK:**',
                                      value=f'\u200b',
                                      inline=False)
                    erembed.set_thumbnail(url=self.client.user.avatar_url)
                    for o in trace:
                        erembed.add_field(name='\u200b',
                                          value=f'```python\n{trace[b]}\n```',
                                          inline=False)
                        b += 1

                    await webhook.send(
                        username="******",
                        avatar_url=self.client.user.avatar_url,
                        embed=erembed)

                    try:

                        await ctx.message.delete()
                        m = await ctx.send(embed=embed)
                        await asyncio.sleep(15)
                        await m.delete()

                    except discord.HTTPException:

                        m = await ctx.send(embed=embed)
                        await asyncio.sleep(15)
                        await m.delete()
Esempio n. 55
0
def virality_scraper(USER_ID=None, PASSCODE=None, virality_job=None):

    start_time = time.time()
    # Initialize S3 and Mongo DB
    print("Initializing Mongo DB ...")
    initializationSuccess = False
    try:
        coll = s3_mongo_helper.initialize_mongo()
        initializationSuccess = True
        print("Initialized successfully")
    except Exception as e:
        print("Initialization failure")
        print(logging.traceback.format_exc())

    if initializationSuccess:
        updates = 0
        failed = 0
        today = datetime.utcnow()

        if virality_job == 1:
            # Get metrics for t+1 & t+2
            start = today - timedelta(days=2)
            end = today - timedelta(days=1)
            print(
                "# Updating virality metrics for posts 1 & 2 day old posts ..."
            )

        elif virality_job == 2:
            # Get metrics for t+3 ... t+5
            start = today - timedelta(days=5)
            end = today - timedelta(days=3)
            print("# Updating virality metrics for 3, 4 & 5 day old posts ...")

        cursor = coll.find({
            "scraped_date": {
                "$gte": start,
                "$lte": end
            },
            "scraper_type": "fresh"
        })
        for doc in cursor:
            try:
                # Get timestamp for day t
                timestamp = pd.to_datetime(doc["timestamp"])
                # Calculate days since t
                diff = str((today - timestamp).days)
                # If the post is actually less than 5 days old
                if int(diff) <= 5:
                    # Get current virality metrics
                    result = sharechat_helper.get_current_metrics(
                        USER_ID, PASSCODE, doc["post_permalink"])
                    # Update doc
                    coll.update({"_id": doc["_id"]}, {
                        "$set": {
                            "comments_t+" + diff: result[0],
                            "external_shares_t+" + diff: result[1],
                            "likes_t+" + diff: result[2],
                            "reposts_t+" + diff: result[3],
                            "views_t+" + diff: result[4]
                        }
                    })
                    updates += 1

                    # For debugging
                    # print(coll.find_one({"_id": doc["_id"]}))
                    # print("")

                else:
                    # If for some reason the post is older
                    pass

            except Exception as e:
                failed += 1
                pass

        print("Scraping complete")
        print("Updated virality metrics for {} posts".format(updates))
        print("{} updates failed".format(failed))
        print("Time taken: %s seconds" % (time.time() - start_time))
print "START: {0}, STOP: {1}".format(
    time.strftime('%Y-%m-%d %H:%M:%S', start_time),
    time.strftime('%Y-%m-%d %H:%M:%S', stop_time))


def get_timestamp(d):
    if isinstance(d, date) and not isinstance(d, datetime):
        d = datetime.combine(d, datetime.time(0, 0, 0, 0))

    msec = str(d.microsecond).rjust(6).replace(' ', '0')

    return float('%s.%s' % (calendar.timegm(d.utctimetuple()), msec))


five_min = timedelta(minutes=5)
stop_time2_dt = datetime.utcnow()
start_time2_dt = stop_time2_dt - five_min

print "START: {0}, STOP: {1}".format(
    start_time2_dt.strftime(
        '%Y-%m-%d %H:%M:%S'
    ),  #datetime.strftime('%Y-%m-%d %H:%M:%S', start_time2_dt),
    stop_time2_dt.strftime(
        '%Y-%m-%d %H:%M:%S'
    )  #datetime.strftime('%Y-%m-%d %H:%M:%S', stop_time2_dt)
)

# stop_time2 = time.gmtime(float(get_timestamp(stop_time2_dt)))
# start_time2 = time.gmtime(float(get_timestamp(start_time2_dt)))

# timestamp
Esempio n. 57
0
parser.add_argument(
    '-save',
    '--save',
    type=int,
    help=
    'Save blocks to disk how often (in seconds) should be multiple of --delay',
    default=180)
parser.add_argument('-delay',
                    '--delay',
                    type=int,
                    help='recorder delay (in seconds)',
                    default=15)
args = parser.parse_args()

json_data = []
timeString = datetime.utcnow().strftime("%Y-%m-%d")
filename = 'stats_' + timeString + '.json'
#Rename existing file
try:
    os.rename('stats.json',
              'stats.json.' + datetime.utcnow().strftime("%Y%m%d%H%M%S"))
    print('Renaming stats.json ...')
except:
    print('stats.json does not exist, create new file ...')


def writeBkup():
    global json_data
    global filename
    print('Writing to ' + filename + ' ...')
    try:
Esempio n. 58
0
async def main():
    global json_data
    global filename
    global timeString
    try:
        with open(filename) as jsonfile:
            json_data = json.load(jsonfile)
    except:
        print(filename + ' does not exist, create new file ...')
    loop_count = 0
    # api-endpoint
    URL = "http://" + args.node_url + ":" + args.node_port
    print("Connecting to: " + URL)

    # defining a params dict for the parameters to be sent to the API
    data1 = {'action': 'active_difficulty', 'include_trend': 'true'}
    data2 = {'action': 'confirmation_active'}
    data3 = {'action': 'stats', 'type': 'objects'}
    data4 = {'action': 'block_count', 'include_cemented': 'true'}
    data5 = {'action': 'confirmation_quorum'}
    data6 = {'action': 'bootstrap_status'}

    while 1:
        filename2 = 'stats_' + datetime.utcnow().strftime("%Y-%m-%d") + '.json'
        if filename2 != filename:
            writeBkup()
            timeString = datetime.utcnow().strftime("%Y-%m-%d")
            json_data = []
            filename = filename2
        loop_count += 1
        currentTime = time.time()
        # sending get request and saving the response as response object
        try:
            r = requests.post(url=URL, json=data1)
            r2 = requests.post(url=URL, json=data2)
            r3 = requests.post(url=URL, json=data3)
            r4 = requests.post(url=URL, json=data4)
            r5 = requests.post(url=URL, json=data5)
            r6 = requests.post(url=URL, json=data6)
            # extracting data in json format
            response = r.json()
            response2 = r2.json()
            response3 = r3.json()
            response4 = r4.json()
            response5 = r5.json()
            response6 = r6.json()
        except:
            print(
                "Error connecting to RPC server. Make sure you have enabled it in ~/Nano/config.json and check "
                "./sample_client.py --help")


#        print(response2)
        try:
            data = {}
            data['timestamp'] = str(time.time())
            data['confirmation_active'] = str(len(response2['confirmations']))
            data['network_minimum'] = response['network_minimum']
            data['network_current'] = response['network_current']
            data['multiplier'] = response['multiplier']
            data['difficulty_trend_min'] = str(
                min(map(float, response['difficulty_trend'])))
            data['difficulty_trend_max'] = str(
                max(map(float, response['difficulty_trend'])))
            data['difficulty_trend_median'] = str(
                statistics.median(map(float, response['difficulty_trend'])))
            data['difficulty_trend_mean'] = str(
                statistics.mean(map(float, response['difficulty_trend'])))
            data['alarm_operations_count'] = response3['node']['alarm'][
                'operations']['count']
            data['ledger_bootstrap_weights_count'] = response3['node'][
                'ledger']['bootstrap_weights']['count']
            data['active_roots_count'] = response3['node']['active']['roots'][
                'count']
            data['active_blocks_count'] = response3['node']['active'][
                'blocks']['count']
            data['active_confirmed_count'] = response3['node']['active'][
                'confirmed']['count']
            data['active_cementable_count'] = response3['node']['active'][
                'priority_cementable_frontiers_count']['count']
            data['tcp_channels_count'] = response3['node']['tcp_channels'][
                'channels']['count']
            data['tcp_channels_attempts_count'] = response3['node'][
                'tcp_channels']['attempts']['count']
            data['response_channels_count'] = response3['node'][
                'response_channels']['channels']['count']
            data['vote_processor_count'] = response3['node']['vote_processor'][
                'votes']['count']
            data['vote_processor_rep1'] = response3['node']['vote_processor'][
                'representatives_1']['count']
            data['vote_processor_rep2'] = response3['node']['vote_processor'][
                'representatives_2']['count']
            data['vote_processor_rep3'] = response3['node']['vote_processor'][
                'representatives_3']['count']
            data['block_processor_state'] = response3['node'][
                'block_processor']['state_blocks']['count']
            data['block_processor_blocks'] = response3['node'][
                'block_processor']['blocks']['count']
            data['block_processor_hashes'] = response3['node'][
                'block_processor']['blocks_hashes']['count']
            data['block_processor_forced'] = response3['node'][
                'block_processor']['forced']['count']
            data['block_processor_rolled_back'] = response3['node'][
                'block_processor']['rolled_back']['count']
            data['block_processor_generator'] = response3['node'][
                'block_processor']['generator']['state_blocks']['count']
            data['block_arrival_count'] = response3['node']['block_arrival'][
                'arrival']['count']
            data['online_reps_arrival_count'] = response3['node'][
                'online_reps']['arrival']['count']
            data['votes_cache_count'] = response3['node']['votes_cache'][
                'cache']['count']
            data['block_uniquer_count'] = response3['node']['block_uniquer'][
                'blocks']['count']
            data['vote_uniquer_count'] = response3['node']['vote_uniquer'][
                'votes']['count']
            data['confirmation_height_count'] = response3['node'][
                'pending_confirmation_height']['pending']['count']
            data['block_count'] = response4['count']
            data['unchecked_count'] = response4['unchecked']
            data['cemented_count'] = response4['cemented']
            data['quorum_delta'] = response5['quorum_delta']
            data['online_weight_minimum'] = response5['online_weight_minimum']
            data['online_stake_total'] = response5['online_stake_total']
            data['peers_stake_total'] = response5['peers_stake_total']
            data['peers_stake_required'] = response5['peers_stake_required']
            if 'clients' in response6:
                data['bootstrap_clients'] = response6['clients']
                data['bootstrap_pulls'] = response6['pulls']
                data['bootstrap_pulling'] = response6['pulling']
                data['bootstrap_connections'] = response6['connections']
                data['bootstrap_target_connections'] = response6[
                    'target_connections']
                data['bootstrap_total_blocks'] = response6['total_blocks']
                data['bootstrap_lazy_pulls'] = response6['lazy_pulls']
            else:
                data['bootstrap_clients'] = '0'
                data['bootstrap_pulls'] = '0'
                data['bootstrap_pulling'] = '0'
                data['bootstrap_connections'] = '0'
                data['bootstrap_target_connections'] = '0'
                data['bootstrap_total_blocks'] = '0'
                data['bootstrap_lazy_pulls'] = '0'
            json_data.append(data)
        except Exception as e:
            print(e)
        #            print('\nAn error occurred getting data')
        if loop_count % (round(args.save / args.delay)) == 0:
            writeBkup()
        endTime = time.time()
        print('{} records - '.format(len(json_data)) + 'Time to Process: ' +
              str(endTime - currentTime) + ' - Active Confirmations: ' +
              str(len(response2['confirmations'])))
        if (args.delay - (endTime - currentTime)) < 0:
            sleep(0)
        else:
            sleep(args.delay - (endTime - currentTime))
Esempio n. 59
0
 def start(self):
     self.started = datetime.utcnow()
     return self.pool.start()
Esempio n. 60
0
def get_dates(calendar_id, max_number, categories):
    # Credentials for Google Calendar API
    creds = None

    # The file token.pickle stores the user's access and refresh tokens, and is
    # created automatically when the authorization flow completes for the first
    # time.
    if os.path.exists('token.pickle'):
        with open('token.pickle', 'rb') as token:
            creds = pickle.load(token)
    # If there are no (valid) credentials available, let the user log in.
    if not creds or not creds.valid:
        if creds and creds.expired and creds.refresh_token:
            creds.refresh(Request())
        else:
            flow = InstalledAppFlow.from_client_secrets_file(
                'credentials.json', SCOPES)
            creds = flow.run_local_server(port=0)
        # Save the credentials for the next run
        with open('token.pickle', 'wb') as token:
            pickle.dump(creds, token)

    service = build('calendar', 'v3', credentials=creds)

    # Call the Calendar API
    now = datetime.utcnow().isoformat() + 'Z'  # 'Z' indicates UTC time
    print('Getting the upcoming', max_number, 'events')
    events_result = service.events().list(calendarId=calendar_id,
                                          timeMin=now,
                                          maxResults=max_number,
                                          singleEvents=True,
                                          orderBy='startTime').execute()  # pylint: disable=maybe-no-member
    events = events_result.get('items', [])

    if not events:
        print('No upcoming events found.')
        return

    eventList = []

    for event in events:
        evnt = {}

        # .get() tries to get first argument value from dictionary, second argument is alternative!
        start = event['start'].get('dateTime', event['start'].get('date'))
        start_dttm = datetime.fromisoformat(start)

        end = event['end'].get('dateTime', event['end'].get('date'))
        end_dttm = datetime.fromisoformat(end)

        evnt['start_dttm_iso'] = start
        evnt['start_date'] = start_dttm.date().strftime("%d.%m.%Y")
        evnt['start_day'] = start_dttm.date().strftime("%-d")
        evnt['start_month'] = start_dttm.date().strftime("%b")
        evnt['end_dttm_iso'] = end
        evnt['start_weekday'] = start_dttm.time().strftime("%A")
        if start_dttm.time().strftime("%H:%M") == "00:00" and end_dttm.time(
        ).strftime("%H:%M") == "00:00":
            evnt['start_time'] = "whole day"
            evnt['end_time'] = ""
        else:
            evnt['start_time'] = start_dttm.time().strftime("%H:%M")
            evnt['end_time'] = end_dttm.time().strftime("%H:%M")

        evnt['title'] = event['summary'].replace(": ", ":\n", 1)

        if 'description' in event.keys():
            desc = event['description']
            evnt['description'] = desc.replace("\n\n\n", "\n\n")
            for category in categories:
                if category in desc:
                    evnt['type'] = category
        else:
            evnt['description'] = event['summary']
            evnt['type'] = ""

        if not 'location' in event.keys():
            evnt['location'] = ""
        else:
            loc = event['location']
            evnt['location'] = loc
            if len(loc.split(", ", 1)) > 1:
                evnt['locationName'] = loc.split(", ", 1)[0]
                evnt['locationAddress'] = loc.split(", ", 1)[1]
            else:
                evnt['locationName'] = loc
                evnt['locationAddress'] = ""
            evnt[
                'locationMapsSearch'] = "https://google.com/maps/search/" + evnt[
                    'locationName'].replace(" ", "+")

        eventList.append(evnt)

    return eventList