def exportWithmeness_wfactors():
	'''
	Produce the file all_withmeness from all the files in attentionLogs starting with withmeness and from the session_info
	'''
	csvfileout = open('all_withmeness.csv', 'w')
	csvheader=("idx","datetime","child_id","child_name","session","fformation","alone","groupid","gender","chouchou","withmeness_value","evol")
	writer = csv.DictWriter(csvfileout, csvheader)
	writer.writeheader()
	
	all_data=[]
	files_in_dir = os.listdir('./attentionLog/')
	for file_in_dir in files_in_dir:
		print(file_in_dir)
		if(file_in_dir.find("withmeness")==0):
			data_w = withmenesslogtoCSV('./attentionLog/'+file_in_dir)
			all_data.append(data_w)

	info=readInfoFile()
	for data in all_data:
		for row in data:
			current_time = row[1]
			for i in info:
				#print(i)
				start_time = datetime.strptime(i[2]+' '+i[3], '%d.%m.%y %H:%M:%S.%f')
				end_time = datetime.strptime(i[2]+' '+i[6], '%d.%m.%y %H:%M:%S.%f')
				if(current_time>= start_time and  current_time<= end_time):
					#print(start_time)
					print(datetime.strftime(current_time,'%Y-%m-%d %H:%M:%S:%f'),i[0], i[1],i[-1],i[5], i[9],i[10],row[2])
					new_row = {"idx":row[0],"datetime":datetime.strftime(current_time,'%Y-%m-%d %H:%M:%S:%f'),"child_id":i[0],"child_name":i[1],"session":i[-5],"fformation":i[5],"alone":i[-4],"groupid":i[-3],"gender":i[-2],"chouchou":i[-1],"withmeness_value":row[2], "evol":row[3]}
					writer.writerow(new_row)
					#print(end_time)
	csvfileout.close()
def get_london_events(page,token):
		now = datetime.now()
		then = now + timedelta(days=7)

		format_s = "%Y-%m-%dT%H:%M:%S"
		start_time=datetime.strftime(now,format_s)
		end_time=datetime.strftime(b,format_s)

		data = {
			"expand":"category,ticket_classes,venue",
			"venue.city": "london",
			"start_date.range_start": start_time,
			"start_date.range_end": end_time,
			"sort_by":"date",
			"page": page , # request page from API
		}

		response = requests.get(  #make an http request (insteaf of get =put,post,delete)
			"https://www.eventbriteapi.com/v3/events/search/", # URL
			headers = {  # headers = metadata (ex :cookie)     # header (=object) = KEY + VALUE
				"Authorization": "Bearer "+token,              # "Bearer" is specific to eventbrite API
			},
			params=data,  # question marks = all the infos at the end of the url (everything foldng up into a string)
			verify = True,  # Verify SSL certificate (because our url starts with https = secure url)
		)

		return response.json()
def url_date_intervals(start, end):
	'''
	Takes:
		- start, string representing the starting date to retreive data YYYY-MM-DD
		- end, string representing the starting date to retreive data YYYY-MM-DD

	Returns:
		- rv, a list with the dates to input in a NOAA API url request
	'''
	rv = []
	start = datetime.strptime(start, "%Y-%m-%d").date()
	end = datetime.strptime(end, "%Y-%m-%d").date()
	new_date = start + relativedelta(years=1)

	date = datetime.strftime(start, "%Y-%m-%d").replace(' 0', ' ')
	rv.append(date)

	while new_date < end:
		# print(new_date, end)
		date = datetime.strftime(new_date, "%Y-%m-%d").replace(' 0', ' ')
		rv.append(date)
		new_date = new_date + relativedelta(years=1)

	# print(end)
	end = datetime.strftime(end, "%Y-%m-%d").replace(' 0', ' ')
	rv.append(end)

	return rv
 def getObsFiles(self, variable, year, maxLeadtime=10, minLeadtime=1):
     """
     Get the observation files from an specified folder
     
     :param variable:
     :param year: start year of decadal
     :return tmp file with maxLeadtime years of observation 
     """
     if not os.path.isfile(self.observation):
         raise NoFilesFoundError, '%s does not exist.' % self.observation
     
     variable_file = cdo.showname(input=self.observation)[0]
     if variable != variable_file:
         print 'WARNING: Variable in observation file is not %s. \n Variable will be renamed.' % variable
         self.observation = cdo.chvar(variable_file+','+variable, input=self.observation,
                                      output=self.tmpDir+self.getFilename(self.observation))
     
     if len(str(year)) == 4:
         year = int(str(year)+'12')    
     start_month = datetime.strftime(datetime.strptime(str(year), "%Y%m") + relativedelta(months=1), '%Y-%m-01')
     end_month = datetime.strftime(datetime.strptime(str(year), "%Y%m") + relativedelta(months=maxLeadtime+1) - relativedelta(days=1),'%Y-%m-31')
     
     tmp = cdo.seldate(','.join([start_month, end_month]), input=self.observation,
                       output=self.tmpDir+'reanalysis_'+experiment+str(year+1)+'-'+str(year+maxLeadtime)+'.nc', options='-f nc')
     return tmp
Example #5
0
def dateEnClair(dateTraitement):
	dateTemp=datetime.strftime(dateTraitement, "%d-%m-%Y")
	dateDuJour=datetime.strftime(datetime.now(), "%d-%m-%Y")
	if dateTemp == dateDuJour :
		return "Aujourd'hui"
	else:
		return convertDate(dateTraitement)
def format_datetime(datetime, milliseconds):
    """Return a date stamp with milliseconds in the format in which
    GenAV requires it."""
    return (
        datetime.strftime("%Y/%m/%d"),
        datetime.strftime("%H:%M"),
        datetime.strftime("%S") + ("%03d" % (milliseconds,))
    )
 def __create_donation_report_data(self, start_date, end_date):
     donations = Donation.objects.filter(
         date__gte=start_date, date__lte=end_date).order_by('date', 'donor__name')
     return {
             "donations" : donations,
             "total_donated" : sum([d.amount for d in donations if d.amount]),
             "start_date" : datetime.strftime(start_date, '%Y-%m-%d'),
             "end_date" : datetime.strftime(end_date, '%Y-%m-%d')
            }
    def __create_visit_report_data(self, start_date, end_date):
        all_visits = (Visit.objects.filter(date__gte=start_date, date__lte=end_date)
            .prefetch_related("visitresponse_set", "visitresponse_set__question", "visit_type")).all()

        return {
                "visits" : all_visits, 
                "start_date" : datetime.strftime(start_date, '%Y-%m-%d'),
                "end_date" : datetime.strftime(end_date, '%Y-%m-%d')
               }
Example #9
0
def random(request):
	username = request.user.username
	random_photo = sample(list(Photo.objects.all().values('title')),1)[0]
	title = random_photo['title']
	year = datetime.strftime(Photo.objects.get(title=title).date_created, '%Y')
	month = datetime.strftime(Photo.objects.get(title=title).date_created, '%B')
	orientation = Photo.objects.get(title=title).orientation
	timetaken = Photo.objects.get(title=title).time_created
	datetaken = Photo.objects.get(title=title).date_created
	
	context = {'title': random_photo['title'],'year': year, 'month': month, 'time': timetaken, 'date': datetaken, 'ori':orientation,'username': username}
	return render(request, 'photos/random.html', context)
Example #10
0
def buildTileRequestDocument(tileorigin, tilesource, x, y, z, status, datetime, ip):
    r = {
        'ip': ip,
        'origin': tileorigin if tileorigin else "",
        'source': tilesource,
        'location': z+'/'+x+'/'+y,
        'z': z,
        'status': status,
        'year': datetime.strftime('%Y'),
        'month': datetime.strftime('%Y-%m'),
        'date': datetime.strftime('%Y-%m-%d'),
        'date_iso': datetime.isoformat()
    }
    return r
Example #11
0
 def getDayFolder(self, baseDir):
     tmpdir = baseDir
     # Ensure the subdirectories exist, and creates them if they
     # don't. This does not create superfluous photos since presumably
     # at least one photo is meant to be copied into the folder,
     # otherwise this function wouldn't be called.
     for tmp in toutformat.strip('/').split('/'):
         tmpdir = '/'.join([tmpdir, datetime.strftime(self.time, tmp)])
         if not os.path.exists(tmpdir):
             os.mkdir(tmpdir)
     return [datetime.strftime(self.time, toutformat) + x 
             for x in os.listdir(tmpdir)
             if x[:tformatlen] == datetime.strftime(self.time, tformat) 
             and os.path.isdir(baseDir + datetime.strftime(self.time, toutformat) + x)]
Example #12
0
def visualize_events():
  global final_db, vis_count
  print "update visualization: " + str(vis_count)
  msg = ""
  for house in houses.find():
     datetime = house['time']
     date = datetime.strftime('%y%m%d') #YYMMDD
     time = datetime.strftime('%H%M%S') #HHMMSS
     msg += str("#" + str(house['lat'])[:-3]) + "," + str(house['lng'])[:-3] + "," + str(date) + "," + str(time) + "," + str(house['outage_cnt'])
  print msg
  final_db.remove({})
  blob = {"msg" : base64.b64encode(msg)}
  final_db.insert(blob)
  vis_count += 1 
def exportTargets_wfactors():
	csvfileout = open('all_targets.csv', 'w')
	csvheader=("idx","datetime","child_id","child_name","session","fformation","alone","groupid","gender","chouchou","target")
	writer = csv.DictWriter(csvfileout, csvheader)
	writer.writeheader()
	info=readInfoFile()
	all_data=[]
	files_in_dir = os.listdir('./attentionLog/')
	for file_in_dir in files_in_dir:
		print(file_in_dir)
		if(file_in_dir.find("attention")==0):
			data_w = targetslogtoCSV('./attentionLog/'+file_in_dir)
			all_data.append(data_w)
	
	for data in all_data:
		for row in data:
			current_time = row[1]
			for i in info:
				start_time = datetime.strptime(i[2]+' '+i[3], '%d.%m.%y %H:%M:%S.%f')
				end_time = datetime.strptime(i[2]+' '+i[6], '%d.%m.%y %H:%M:%S.%f')
				
				if(current_time>= start_time and  current_time<= end_time):
					new_row = {"idx":row[0],"datetime":datetime.strftime(current_time,'%Y-%m-%d %H:%M:%S:%f'),"child_id":i[0],"child_name":i[1],"session":i[-5],"fformation":i[5],"alone":i[-4],"groupid":i[-3],"gender":i[-2],"chouchou":i[-1],"target":row[2]}
					writer.writerow(new_row)
	csvfileout.close()
def resample_withUF(wfname="withme_sorted.csv", uffname ="uf_sorted.csv"):
	'''
	takes the closest value of withmeness regarding the time of the user_feedback
	'''
	wcsvheader=("X","idx","datetime","child_id","child_name","session","fformation","alone","groupid","gender","chouchou","withmeness_value","evol","new_w")
	csvheader=("idx","datetime","child_id","child_name","session","fformation","alone","groupid","gender","chouchou","withmeness_value","evol","new_w","feedback")
	fcsvheader=("X","idx","datetime","child_id","child_name","session","fformation","alone","groupid","gender","chouchou","feedback")
	
	wcsvfilein = open(wfname, 'r')
	wreader = csv.DictReader(wcsvfilein, wcsvheader)
	fcsvfilein = open(uffname, 'r')
	freader = csv.DictReader(fcsvfilein, fcsvheader)
	next(freader, None)  # skip the headers
	next(wreader, None)  # skip the headers
	
	prev_time = datetime(1985, 7, 2, 12, 30) 
	ufrow = freader.next()
	u_time =  datetime.strptime(ufrow["datetime"], '%Y-%m-%d %H:%M:%S:%f')
	csvfileout = open('uf_withmeness.csv', 'w')
	writer = csv.DictWriter(csvfileout, csvheader)
	writer.writeheader()
	for wrow in wreader:
		print(wrow["datetime"])
		c_time = datetime.strptime(wrow["datetime"], '%Y-%m-%d %H:%M:%S:%f')
		while((u_time> prev_time) and (u_time<= c_time)):
			new_row = {"idx":ufrow["idx"],"datetime":datetime.strftime(c_time,'%Y-%m-%d %H:%M:%S:%f'),"child_id":wrow["child_id"],"child_name":wrow["child_name"],"session":wrow["session"],"fformation":wrow["fformation"],"alone":wrow["alone"],"groupid":wrow["groupid"],"gender":wrow["gender"],"chouchou":wrow["chouchou"],"withmeness_value":wrow["withmeness_value"], "evol":wrow["evol"],"new_w":wrow["new_w"],"feedback":ufrow["feedback"]}
			writer.writerow(new_row)
			ufrow = freader.next()
			u_time =  datetime.strptime(ufrow["datetime"], '%Y-%m-%d %H:%M:%S:%f')
		prev_time = c_time
	
	wcsvfilein.close()
	fcsvfilein.close()
	csvfileout.close()
Example #15
0
	def select_records_by_date_and_hour(data, datetime):
		selected_records = []
		for rec_id in data:
			if DataExtractor.date_time_of_record(data[rec_id]).strftime('%Y-%m-%d %H')==datetime.strftime('%Y-%m-%d %H'):
				selected_records.append(data[rec_id])
		
		return selected_records
def formatHeader(header, color, datetime):
    bgColor = getattr(Back, color)
    fgColor = getattr(Fore, color)
    msg = Style.DIM + datetime.strftime("%H:%M:%S.%f") + "  " + bgColor  + Style.BRIGHT + Fore.WHITE + header + fgColor + Back.RESET + Style.NORMAL
    # Make sure we outline everything
    msg += " " * (55 - (len(msg)))
    return msg
Example #17
0
    def get_donate_records(self):
        raw_out = db.execute('SELECT name, amount, time, status FROM tb_donate_record where (status=\'1\') or (sid=\'%s\' and uid=\'%s\')' % (session.sid, session['uid']))
        selected_data = []
        # UTC Zone
        from_zone = tz.gettz('UTC')
        # China Zone
        to_zone = tz.gettz('CST')
        STATUS_DESCRIPTION = [u"等待确认", u"打赏成功", u"打赏失败"]
        for i in xrange(len(raw_out)):
            row_item = []
            for k in xrange(len(raw_out[i])):
                item = raw_out[i][k]
        if k == 3:
            item = STATUS_DESCRIPTION[int(item)]

        # Convert datatime.datetime to date string
        if type(item) == datetime:
            item = item.replace(tzinfo=from_zone)
            # Convert time zone
            local = item.astimezone(to_zone)
            row_item.append(datetime.strftime(local, "%Y-%m-%d %H:%M:%S"))
        else:
            row_item.append(item)

            selected_data.append(row_item)

        return JSONEncoder().encode(selected_data)
def print_header(file_report):
    '''Given the path of the report file, (over)write to the file the header
    of the report. The header should be similar to below except the date and
    time. It should be close to the date and time you run the script.

    ************************************
    *        APACHE LOG REPORT         *
    * Generated on 2015-04-19 09:04 PM *
    ************************************

    Note:
        - See datetime module reference:
            https://docs.python.org/3.4/library/datetime.html
        - To get the current date and time, see datetime.now function
        - To return formatted date and time, see datetime.strftime function
    '''
    # +++your code here+++
    from datetime import datetime

    datetime = datetime.now()

    f = open(file_report, 'w')

    f.write('\
************************************\n\
*        APACHE LOG REPORT         *\n\
* Generated on ' +  datetime.strftime("%d-%m-%y %H:%M") + '      *\n\
************************************\n')

    f.close()
Example #19
0
def format_time(datetime):
	format = "%I:%M %p"
	if datetime != "skip":
		date_time_formatted = datetime.strftime(format)
	else:
		date_time_formatted = "--"
	return date_time_formatted
Example #20
0
    def get_city_yyb(self):
        url = "http://data.eastmoney.com/stock/yybcx.html"
        _data = self.sGet(url)
        _urls = self.sMatch('href="/Stock/lhb/city/', '\.html"', _data, 0)
        for x in xrange(0, len(_urls)):
            #_urls[x] = 440000
            detail = "http://data.eastmoney.com/DataCenter_V3/stock2016/yybSearch.ashx?pagesize=1000&page=1&js=var+fguIHta&param=&sortRule=-1&sortType=UpCount&city=%s&typeCode=2&gpfw=0&code=%s&rt=24462162" % (_urls[x], _urls[x])
            a = self.sGet(detail)
            a = a.replace("var fguIHta=", "")
            re = json.loads(a)
            for k in range(0, len(re['data'])):
                _tmp = re['data'][k]

                indata = {
                    'province': _tmp['Province'],
                    'codex': _tmp['SalesCode'],
                    'name': _tmp['SalesName'],
                    'SumActMoney': _tmp['SumActMoney'],
                    'SumActBMoney': _tmp['SumActBMoney'],
                    'SumActSMoney': _tmp['SumActSMoney'],
                    'UpCount': _tmp['UpCount'],
                    'BCount': _tmp['BCount'],
                    'SCount': _tmp['SCount']
                }
                print indata
                _has = self.mysql.fetch_one("select * from  s_lhb where codex=%s" % _tmp['SalesCode'])
                _where = "codex=%s" % _tmp['SalesCode']

                if _has is not None:
                    self.mysql.dbUpdate('s_lhb', indata, _where)
                else:
                    indata['last_dateline'] = datetime.strftime(date.today(), "%Y%m%d")
                    self.mysql.dbInsert('s_lhb', indata)
Example #21
0
 def service_periods(self, datetime):
     datetimestr = datetime.strftime( "%Y%m%d" ) #datetime to string like "20081225"
     datetimeint = int(datetimestr)              #int like 20081225. These ints have the same ordering as regular dates, so comparison operators work
     
     # Get the gtfs date range. If the datetime is out of the range, no service periods are in effect
     start_date, end_date = self.date_range()
     if datetime < start_date or datetime > end_date:
         return []
     
     # Use the day-of-week name to query for all service periods that run on that day
     dow_name = self.DOW_INDEX[datetime.weekday()]
     service_periods = list( self.execute( "SELECT service_id, start_date, end_date FROM calendar WHERE %s=1"%dow_name ) )
      
     # Exclude service periods whose range does not include this datetime
     service_periods = [x for x in service_periods if (int(x[1]) <= datetimeint and int(x[2]) >= datetimeint)]
     
     # Cut service periods down to service IDs
     sids = set( [x[0] for x in service_periods] )
         
     # For each exception on the given datetime, add or remove service_id to the accumulating list
     
     for exception_sid, exception_type in self.execute( "select service_id, exception_type from calendar_dates WHERE date = ?", (datetimestr,) ):
         if exception_type == 1:
             sids.add( exception_sid )
         elif exception_type == 2:
             if exception_sid in sids:
                 sids.remove( exception_sid )
             
     return list(sids)
def exportFeedback_wfactors():
	csvfileout = open('all_userfeeback.csv', 'w')
	csvheader=("idx","datetime","child_id","child_name","session","fformation","alone","groupid","gender","chouchou","feedback")
	writer = csv.DictWriter(csvfileout, csvheader)
	writer.writeheader()
	info=readInfoFile()
	all_data=[]
	files_in_dir = os.listdir('./visionLog_activity/')
	for file_in_dir in files_in_dir:
		print(file_in_dir)
		data_w = userfeedbacktoCSV('./visionLog_activity/'+file_in_dir)
		all_data.append(data_w)
	
	for data in all_data:
		for row in data:
			current_time = row[1]
			for i in info:
				start_time = datetime.strptime(i[2]+' '+i[3], '%d.%m.%y %H:%M:%S.%f')
				end_time = datetime.strptime(i[2]+' '+i[6], '%d.%m.%y %H:%M:%S.%f')
				
				if(current_time>= start_time and  current_time<= end_time):
					feedback = -1 if  row[-1]=='-' else 1
					new_row = {"idx":row[0],"datetime":datetime.strftime(current_time,'%Y-%m-%d %H:%M:%S:%f'),"child_id":i[0],"child_name":i[1],"session":i[-5],"fformation":i[5],"alone":i[-4],"groupid":i[-3],"gender":i[-2],"chouchou":i[-1],"feedback":feedback}
					writer.writerow(new_row)
	csvfileout.close()
def format_value(v):
    if type(v) is str:
        return v
    elif type(v) is datetime:
        return datetime.strftime(v, "%Y-%m-%d %H:%M:%S")
    else:
        return str(v)
	def generate_parameters(self, cr, uid, ids, data, context):
		val={}		
		as_on_date = data['form']['as_on_date']
		if data['form']['division_id']:				
			division_id = data['form']['division_id'][0]
			division_name = data['form']['division_id'][1] 							
		printed = data['form']['printed_by'][1]
		p_user= str(printed)		
		printed_date = data['form']['print_date']
		
		date_print =  printed_date.encode('utf-8')
		d1 = datetime.strptime(date_print,'%Y-%m-%d %H:%M:%S')
		p_date = d1.strftime( '%d-%m-%Y %H:%M:%S')
		
		as_date =  as_on_date.encode('utf-8')
		as_d1 = datetime.strptime(as_date,'%Y-%m-%d')
		as_d2 = datetime.strftime(as_d1, '%d-%m-%Y')
		
		val['as_date_range'] = as_d2	
		
		val['as_on_date'] = as_on_date
		val['user_id'] = uid		
		val['printed_by'] = str(printed)	
		val['print_date'] = p_date
		if data['form']['division_id']:	
			val['division_id'] = division_id
			val['division_name'] = division_name
		else:
			val['division_id'] = 0
			val['division_name'] = 'ALL'
			pass	
		
		return val
Example #25
0
def get_datetime_string(datetime):
    try:
        if datetime is not None:
            return datetime.strftime("%m/%d/%Y %I:%M %p")
        return None
    except:
        return None
Example #26
0
def random_date(startYear, endYear):
    year = random.randint(startYear, endYear)
    month = random.randint(1, 12)
    day = random.randint(1, 28)
    date = datetime(year, month, day)
    birth_date = datetime.strftime(date, "%Y-%m-%d")
    return birth_date
Example #27
0
    def refresh(self):
        snapshot = self.get_image(self.surface)
        if not snapshot:
            return
        # Change the position of the rectangular window here. Or position your webcam accordingly
        rect = pygame.draw.rect(snapshot, (255, 0, 0), (10, 40, 100, 100), 2)

        if not self.reference_color:
            if time.time() - self.starttime < self.waitfor:
                return
            self.reference_color = pygame.transform.average_color(snapshot, rect)

        # Play a sound and pop up a desktop notification if any motion is detected within the rect window
        if self.check_movement(rect):
            print "Movement detected at ", datetime.strftime(datetime.now(), "%Y-%m-%d %H:%M:%S")
            if not pygame.mixer.get_busy() and self.sound:
                self.sound.play(loops=-1)
            if NOTIFICATIONS:
                self.notify.show()
            snapshot.fill((255, 0, 0), rect)
            ren = self.font.render("Boss Alert", 30, (255, 0, 0))
            snapshot.blit(ren, (400, 200))
        else:
            if self.sound:
                self.sound.fadeout(500)
        self.display.blit(snapshot, (0, 0))
        self.surface = snapshot
        pygame.display.flip()
Example #28
0
def fileAnalyzer(fname):
    print "Analyzing", fname

    f = open(fname, "r")
    newFile = open(fname + ".intermediate", "w")

    for l in f:
        if not l[0].isdigit():
            newFile.write(l)
            continue

        # R-dls.out includes dates like this, which the date parsing
        # library does not like.
        if l.startswith("00-00-0000  00:00:00"):
            newFile.write(l)
            continue

        dateTimeString = l[:20]
        dateTime = datetime.strptime(dateTimeString, dateFormatString)
        dateTime = sourceTimeZone.localize(dateTime).astimezone(pytz.utc)
        newString = datetime.strftime(dateTime, dateFormatString)

        newFile.write(newString + l[20:])

    f.close()
    newFile.close()
    os.rename(fname + ".intermediate", fname)
Example #29
0
def latest(request):
	username = request.user.username
	latest_photos_list = Photo.objects.order_by('-date_created')[:20]
	phototitles = []
	photoyears = []
	photomonths = []

	for i in latest_photos_list:
		phototitles.append(i.title)
		photomonths.append(datetime.strftime(i.date_created,'%b'))
		photoyears.append(datetime.strftime(i.date_created,'%Y'))

	photodetails = zip(phototitles,photoyears,photomonths)

	context = {'latest_photos': photodetails,'username': username}
	return render(request, 'photos/latest.html', context)
def convert_string_date_for_timeline(input, format):
    string_date = str(input)
    date_list = string_date.split("T")
    date_list = date_list[0].split("-")
    date_object = date(int(date_list[0]), int(date_list[1]), int(date_list[2]))
    output = datetime.strftime(date_object, format)
    return output
Example #31
0
    def loading_data(self, trade_date):
        """
        获取基础数据
        按天获取当天交易日所有股票的基础数据
        :param trade_date: 交易日
        :return:
        """
        # 转换时间格式
        time_array = datetime.strptime(trade_date, "%Y-%m-%d")
        trade_date = datetime.strftime(time_array, '%Y%m%d')
        # 读取目前涉及到的因子
        trade_date_pre_year = self.get_trade_date(trade_date, 1)
        trade_date_pre_year_2 = self.get_trade_date(trade_date, 2)
        trade_date_pre_year_3 = self.get_trade_date(trade_date, 3)
        trade_date_pre_year_4 = self.get_trade_date(trade_date, 4)
        trade_date_pre_year_5 = self.get_trade_date(trade_date, 5)

        engine = sqlEngine()
        columns = ['COMPCODE', 'PUBLISHDATE', 'ENDDATE', 'symbol', 'company_id', 'trade_date']

        # Report Data
        cash_flow_sets = engine.fetch_fundamentals_pit_extend_company_id(CashFlowReport,
                                                                         [CashFlowReport.LABORGETCASH,
                                                                          CashFlowReport.FINALCASHBALA,
                                                                          ], dates=[trade_date])
        for column in columns:
            if column in list(cash_flow_sets.keys()):
                cash_flow_sets = cash_flow_sets.drop(column, axis=1)
        cash_flow_sets = cash_flow_sets.rename(
            columns={'LABORGETCASH': 'goods_sale_and_service_render_cash',  # 销售商品、提供劳务收到的现金
                     'FINALCASHBALA': 'cash_and_equivalents_at_end',  # 期末现金及现金等价物余额
                     })

        income_sets = engine.fetch_fundamentals_pit_extend_company_id(IncomeReport,
                                                                      [IncomeReport.BIZTOTINCO,
                                                                       IncomeReport.BIZINCO,
                                                                       IncomeReport.PERPROFIT,
                                                                       IncomeReport.PARENETP,
                                                                       IncomeReport.NETPROFIT,
                                                                       ], dates=[trade_date])
        for column in columns:
            if column in list(income_sets.keys()):
                income_sets = income_sets.drop(column, axis=1)
        income_sets = income_sets.rename(columns={'NETPROFIT': 'net_profit',  # 净利润
                                                  'BIZTOTINCO': 'total_operating_revenue',  # 营业总收入
                                                  'BIZINCO': 'operating_revenue',  # 营业收入
                                                  'PERPROFIT': 'operating_profit',  # 营业利润
                                                  'PARENETP': 'np_parent_company_owners',  # 归属于母公司所有者的净利润
                                                  })

        indicator_sets = engine.fetch_fundamentals_pit_extend_company_id(IndicatorReport,
                                                                         [IndicatorReport.NETPROFITCUT, # 扣除非经常损益后的净利润
                                                                          IndicatorReport.MGTEXPRT, # 管理费用率
                                                                         ], dates=[trade_date])
        for column in columns:
            if column in list(indicator_sets.keys()):
                indicator_sets = indicator_sets.drop(column, axis=1)
        indicator_sets = indicator_sets.rename(columns={'NETPROFITCUT': 'adjusted_profit',  # 扣除非经常损益后的净利润
                                                        })

        balance_sets = engine.fetch_fundamentals_pit_extend_company_id(BalanceReport,
                                                                       [BalanceReport.PARESHARRIGH,
                                                                        ], dates=[trade_date])
        for column in columns:
            if column in list(balance_sets.keys()):
                balance_sets = balance_sets.drop(column, axis=1)
        balance_sets = balance_sets.rename(columns={'PARESHARRIGH': 'equities_parent_company_owners',  # 归属于母公司股东权益合计
                                                    })

        income_sets_pre_year_1 = engine.fetch_fundamentals_pit_extend_company_id(IncomeReport,
                                                                                 [IncomeReport.BIZINCO,  # 营业收入
                                                                                  IncomeReport.NETPROFIT,  # 净利润
                                                                                  ], dates=[trade_date_pre_year])
        for column in columns:
            if column in list(income_sets_pre_year_1.keys()):
                income_sets_pre_year_1 = income_sets_pre_year_1.drop(column, axis=1)
        income_sets_pre_year_1 = income_sets_pre_year_1.rename(columns={'NETPROFIT': 'net_profit_pre_year_1',  # 净利润
                                                                        'BIZINCO': 'operating_revenue_pre_year_1',
                                                                        # 营业收入
                                                                        })

        income_sets_pre_year_2 = engine.fetch_fundamentals_pit_extend_company_id(IncomeReport,
                                                                                 [IncomeReport.BIZINCO,
                                                                                  IncomeReport.NETPROFIT,
                                                                                  ], dates=[trade_date_pre_year_2])
        for column in columns:
            if column in list(income_sets_pre_year_2.keys()):
                income_sets_pre_year_2 = income_sets_pre_year_2.drop(column, axis=1)
        income_sets_pre_year_2 = income_sets_pre_year_2.rename(columns={'NETPROFIT': 'net_profit_pre_year_2',  # 净利润
                                                                        'BIZINCO': 'operating_revenue_pre_year_2',
                                                                        # 营业收入
                                                                        })

        income_sets_pre_year_3 = engine.fetch_fundamentals_pit_extend_company_id(IncomeReport,
                                                                                 [IncomeReport.BIZINCO,
                                                                                  IncomeReport.NETPROFIT,
                                                                                  ], dates=[trade_date_pre_year_3])
        for column in columns:
            if column in list(income_sets_pre_year_3.keys()):
                income_sets_pre_year_3 = income_sets_pre_year_3.drop(column, axis=1)
        income_sets_pre_year_3 = income_sets_pre_year_3.rename(columns={'NETPROFIT': 'net_profit_pre_year_3',  # 净利润
                                                                        'BIZINCO': 'operating_revenue_pre_year_3',
                                                                        # 营业收入
                                                                        })

        income_sets_pre_year_4 = engine.fetch_fundamentals_pit_extend_company_id(IncomeReport,
                                                                                 [IncomeReport.BIZINCO,
                                                                                  IncomeReport.NETPROFIT,
                                                                                  ], dates=[trade_date_pre_year_4])
        for column in columns:
            if column in list(income_sets_pre_year_4.keys()):
                income_sets_pre_year_4 = income_sets_pre_year_4.drop(column, axis=1)
        income_sets_pre_year_4 = income_sets_pre_year_4.rename(columns={'NETPROFIT': 'net_profit_pre_year_4',  # 净利润
                                                                        'BIZINCO': 'operating_revenue_pre_year_4',
                                                                        # 营业收入
                                                                        })

        tp_earning = pd.merge(cash_flow_sets, income_sets, how='outer', on='security_code')
        tp_earning = pd.merge(indicator_sets, tp_earning, how='outer', on='security_code')
        tp_earning = pd.merge(balance_sets, tp_earning, how='outer', on='security_code')
        tp_earning = pd.merge(income_sets_pre_year_1, tp_earning, how='outer', on='security_code')
        tp_earning = pd.merge(income_sets_pre_year_2, tp_earning, how='outer', on='security_code')
        tp_earning = pd.merge(income_sets_pre_year_3, tp_earning, how='outer', on='security_code')
        tp_earning = pd.merge(income_sets_pre_year_4, tp_earning, how='outer', on='security_code')

        # MRQ
        balance_mrq_sets = engine.fetch_fundamentals_pit_extend_company_id(BalanceMRQ,
                                                                           [BalanceMRQ.TOTASSET,  # 资产总计
                                                                            BalanceMRQ.PARESHARRIGH,  # 归属于母公司股东权益合计
                                                                            BalanceMRQ.RIGHAGGR,  # 所有者权益(或股东权益)合计
                                                                            BalanceMRQ.LONGBORR,  # 长期借款
                                                                            ], dates=[trade_date])
        for column in columns:
            if column in list(balance_mrq_sets.keys()):
                balance_mrq_sets = balance_mrq_sets.drop(column, axis=1)
        balance_mrq_sets = balance_mrq_sets.rename(columns={'TOTASSET': 'total_assets_mrq',
                                                            'PARESHARRIGH': 'equities_parent_company_owners_mrq',
                                                            # 归属于母公司股东权益合计
                                                            'RIGHAGGR': 'total_owner_equities_mrq',  # 所有者权益(或股东权益)合计
                                                            'LONGBORR': 'longterm_loan_mrq',  # 长期借款
                                                            })

        balance_mrq_sets_pre = engine.fetch_fundamentals_pit_extend_company_id(BalanceMRQ,
                                                                               [BalanceMRQ.TOTASSET,  # 资产总计
                                                                                BalanceMRQ.RIGHAGGR,  # 所有者权益(或股东权益)合计
                                                                                BalanceMRQ.LONGBORR,  # 长期借款
                                                                                ], dates=[trade_date])
        for column in columns:
            if column in list(balance_mrq_sets_pre.keys()):
                balance_mrq_sets_pre = balance_mrq_sets_pre.drop(column, axis=1)
        balance_mrq_sets_pre = balance_mrq_sets_pre.rename(columns={'TOTASSET': 'total_assets_mrq_pre',
                                                                    'RIGHAGGR': 'total_owner_equities_mrq_pre',
                                                                    # 所有者权益(或股东权益)合计
                                                                    'LONGBORR': 'longterm_loan_mrq_pre',  # 长期借款
                                                                    })

        # TTM Data
        cash_flow_ttm_sets = engine.fetch_fundamentals_pit_extend_company_id(CashFlowTTM,
                                                                             [CashFlowTTM.FINNETCFLOW,
                                                                              ], dates=[trade_date])
        for column in columns:
            if column in list(cash_flow_ttm_sets.keys()):
                cash_flow_ttm_sets = cash_flow_ttm_sets.drop(column, axis=1)
        cash_flow_ttm_sets = cash_flow_ttm_sets.rename(columns={'FINNETCFLOW': 'net_finance_cash_flow'})

        income_ttm_sets = engine.fetch_fundamentals_pit_extend_company_id(IncomeTTM,
                                                                          [IncomeTTM.BIZINCO,  # 营业收入
                                                                           IncomeTTM.NETPROFIT,  # 净利润
                                                                           IncomeTTM.MANAEXPE,  # 管理费用
                                                                           IncomeTTM.BIZTOTINCO,  # 营业总收入
                                                                           IncomeTTM.TOTPROFIT,  # 利润总额
                                                                           IncomeTTM.FINEXPE,  # 财务费用
                                                                           # IncomeTTM.INTEINCO,  # 利息收入
                                                                           IncomeTTM.SALESEXPE,  # 销售费用
                                                                           IncomeTTM.BIZTOTCOST,  # 营业总成本
                                                                           IncomeTTM.PERPROFIT,  # 营业利润
                                                                           IncomeTTM.PARENETP,  # 归属于母公司所有者的净利润
                                                                           IncomeTTM.BIZCOST,  # 营业成本
                                                                           # IncomeTTM.ASSOINVEPROF,  # 对联营企业和合营企业的投资收益
                                                                           IncomeTTM.BIZTAX,  # 营业税金及附加
                                                                           IncomeTTM.ASSEIMPALOSS,  # 资产减值损失
                                                                           ], dates=[trade_date])
        for column in columns:
            if column in list(income_ttm_sets.keys()):
                income_ttm_sets = income_ttm_sets.drop(column, axis=1)
        income_ttm_sets = income_ttm_sets.rename(columns={'BIZINCO': 'operating_revenue',  # 营业收入
                                                          'NETPROFIT': 'net_profit',  # 净利润
                                                          'MANAEXPE': 'administration_expense',  # 管理费用
                                                          'BIZTOTINCO': 'total_operating_revenue',  # 营业总收入
                                                          'TOTPROFIT': 'total_profit',  # 利润总额
                                                          'FINEXPE': 'financial_expense',  # 财务费用
                                                          # 'INTEINCO': 'interest_income',  # 利息收入
                                                          'SALESEXPE': 'sale_expense',  # 销售费用
                                                          'BIZTOTCOST': 'total_operating_cost',  # 营业总成本
                                                          'PERPROFIT': 'operating_profit',  # 营业利润
                                                          'PARENETP': 'np_parent_company_owners',  # 归属于母公司所有者的净利润
                                                          'BIZCOST': 'operating_cost',  # 营业成本
                                                          # 'ASSOINVEPROF': 'invest_income_associates',  # 对联营企业和合营企业的投资收益
                                                          'BIZTAX': 'operating_tax_surcharges',  # 营业税金及附加
                                                          'ASSEIMPALOSS': 'asset_impairment_loss',  # 资产减值损失
                                                          })

        balance_ttm_sets = engine.fetch_fundamentals_pit_extend_company_id(BalanceTTM,
                                                                           [BalanceTTM.TOTASSET,  # 资产总计
                                                                            BalanceTTM.RIGHAGGR,  # 所有者权益(或股东权益)合计
                                                                            BalanceTTM.PARESHARRIGH,  # 归属于母公司股东权益合计
                                                                            ], dates=[trade_date])
        for column in columns:
            if column in list(balance_ttm_sets.keys()):
                balance_ttm_sets = balance_ttm_sets.drop(column, axis=1)
        balance_ttm_sets = balance_ttm_sets.rename(
            columns={'PARESHARRIGH': 'equities_parent_company_owners',  # 归属于母公司股东权益合计
                     'RIGHAGGR': 'total_owner_equities',  # 所有者权益(或股东权益)合计
                     'TOTASSET': 'total_assets',  # 资产总计
                     })

        income_ttm_sets_pre_year_1 = engine.fetch_fundamentals_pit_extend_company_id(IncomeTTM,
                                                                                     [IncomeTTM.BIZINCO,
                                                                                      IncomeTTM.NETPROFIT,
                                                                                      ], dates=[trade_date_pre_year])
        for column in columns:
            if column in list(income_ttm_sets_pre_year_1.keys()):
                income_ttm_sets_pre_year_1 = income_ttm_sets_pre_year_1.drop(column, axis=1)
        income_ttm_sets_pre_year_1 = income_ttm_sets_pre_year_1.rename(
            columns={'BIZINCO': 'operating_revenue_pre_year_1',  # 营业收入
                     'NETPROFIT': 'net_profit_pre_year_1',  # 净利润
                     })

        income_ttm_sets_pre_year_2 = engine.fetch_fundamentals_pit_extend_company_id(IncomeTTM,
                                                                                     [IncomeTTM.BIZINCO,
                                                                                      IncomeTTM.NETPROFIT,
                                                                                      ], dates=[trade_date_pre_year_2])
        for column in columns:
            if column in list(income_ttm_sets_pre_year_2.keys()):
                income_ttm_sets_pre_year_2 = income_ttm_sets_pre_year_2.drop(column, axis=1)
        income_ttm_sets_pre_year_2 = income_ttm_sets_pre_year_2.rename(
            columns={'BIZINCO': 'operating_revenue_pre_year_2',  # 营业收入
                     'NETPROFIT': 'net_profit_pre_year_2',  # 净利润
                     })

        income_ttm_sets_pre_year_3 = engine.fetch_fundamentals_pit_extend_company_id(IncomeTTM,
                                                                                     [IncomeTTM.BIZINCO,
                                                                                      IncomeTTM.NETPROFIT,
                                                                                      ], dates=[trade_date_pre_year_3])
        for column in columns:
            if column in list(income_ttm_sets_pre_year_3.keys()):
                income_ttm_sets_pre_year_3 = income_ttm_sets_pre_year_3.drop(column, axis=1)
        income_ttm_sets_pre_year_3 = income_ttm_sets_pre_year_3.rename(
            columns={'BIZINCO': 'operating_revenue_pre_year_3',  # 营业收入
                     'NETPROFIT': 'net_profit_pre_year_3',  # 净利润
                     })

        income_ttm_sets_pre_year_4 = engine.fetch_fundamentals_pit_extend_company_id(IncomeTTM,
                                                                                     [IncomeTTM.BIZINCO,
                                                                                      IncomeTTM.NETPROFIT,
                                                                                      ], dates=[trade_date_pre_year_4])
        for column in columns:
            if column in list(income_ttm_sets_pre_year_4.keys()):
                income_ttm_sets_pre_year_4 = income_ttm_sets_pre_year_4.drop(column, axis=1)
        income_ttm_sets_pre_year_4 = income_ttm_sets_pre_year_4.rename(
            columns={'BIZINCO': 'operating_revenue_pre_year_4',  # 营业收入
                     'NETPROFIT': 'net_profit_pre_year_4',  # 净利润
                     })

        # indicator_ttm_sets = engine.fetch_fundamentals_pit_extend_company_id(IndicatorTTM,
        #                                                                      [IndicatorTTM.ROIC,   # 投入资本回报率
        #                                                                       ], dates=[trade_date]).drop(columns, axis=1)
        #
        # indicator_ttm_sets = indicator_ttm_sets.rename(columns={'ROIC': '',
        #                                                         })

        ttm_earning = pd.merge(income_ttm_sets, balance_ttm_sets, how='outer', on='security_code')
        ttm_earning = pd.merge(ttm_earning, cash_flow_ttm_sets, how='outer', on='security_code')
        ttm_earning = pd.merge(ttm_earning, income_ttm_sets_pre_year_1, how='outer', on='security_code')
        ttm_earning = pd.merge(ttm_earning, income_ttm_sets_pre_year_2, how='outer', on='security_code')
        ttm_earning = pd.merge(ttm_earning, income_ttm_sets_pre_year_3, how='outer', on='security_code')
        ttm_earning = pd.merge(ttm_earning, income_ttm_sets_pre_year_4, how='outer', on='security_code')
        ttm_earning = pd.merge(ttm_earning, balance_mrq_sets, how='outer', on='security_code')
        ttm_earning = pd.merge(ttm_earning, balance_mrq_sets_pre, how='outer', on='security_code')

        balance_con_sets = engine.fetch_fundamentals_pit_extend_company_id(BalanceTTM,
                                                                           [BalanceTTM.TOTASSET,  # 资产总计
                                                                            BalanceTTM.RIGHAGGR,  # 所有者权益(或股东权益)合计
                                                                            ],
                                                                           dates=[trade_date,
                                                                                  trade_date_pre_year,
                                                                                  trade_date_pre_year_2,
                                                                                  trade_date_pre_year_3,
                                                                                  trade_date_pre_year_4,
                                                                                  ])
        for column in columns:
            if column in list(balance_con_sets.keys()):
                balance_con_sets = balance_con_sets.drop(column, axis=1)
        balance_con_sets = balance_con_sets.groupby(['security_code'])
        balance_con_sets = balance_con_sets.sum()
        balance_con_sets = balance_con_sets.rename(columns={'TOTASSET': 'total_assets',
                                                            'RIGHAGGR': 'total_owner_equities'})

        # cash_flow_con_sets = engine.fetch_fundamentals_pit_extend_company_id(CashFlowReport,
        #                                                                      [CashFlowReport.FINALCASHBALA,
        #                                                                   ],
        #                                                                  dates=[trade_date,
        #                                                                         trade_date_pre_year,
        #                                                                         trade_date_pre_year_2,
        #                                                                         trade_date_pre_year_3,
        #                                                                         trade_date_pre_year_4,
        #                                                                         trade_date_pre_year_5,
        #                                                                         ]).drop(columns, axis=1)
        # cash_flow_con_sets = cash_flow_con_sets.groupby(['security_code'])
        # cash_flow_con_sets = cash_flow_con_sets.sum()
        # cash_flow_con_sets = cash_flow_con_sets.rename(columns={'FINALCASHBALA':'cash_and_equivalents_at_end'})

        income_con_sets = engine.fetch_fundamentals_pit_extend_company_id(IncomeReport,
                                                                          [IncomeReport.NETPROFIT,
                                                                           ],
                                                                          dates=[trade_date,
                                                                                 trade_date_pre_year,
                                                                                 trade_date_pre_year_2,
                                                                                 trade_date_pre_year_3,
                                                                                 trade_date_pre_year_4,
                                                                                 trade_date_pre_year_5,
                                                                                 ])
        for column in columns:
            if column in list(income_con_sets.keys()):
                income_con_sets = income_con_sets.drop(column, axis=1)
        income_con_sets = income_con_sets.groupby(['security_code'])
        income_con_sets = income_con_sets.sum()
        income_con_sets = income_con_sets.rename(columns={'NETPROFIT': 'net_profit'}).reset_index()
        ttm_earning_5y = pd.merge(balance_con_sets, income_con_sets, how='outer', on='security_code')

        return tp_earning, ttm_earning, ttm_earning_5y
Example #32
0
    def get_courses(self):
        print(f"\n-----------------\n{bcolors.OKCYAN}Getting EDT...{bcolors.ENDC}")

        list_of_dates_raw = self.driver.find_elements_by_class_name(
            "fc-day-header")
        list_of_dates = (x.text for x in list_of_dates_raw)

        list_of_colonnes = self.driver.find_elements_by_class_name('fc-content-col')
        
        dico = {}
        passe = False

        for jour, colonne_raw, in zip(list_of_dates, list_of_colonnes):
            dico[jour] = []
            try:
                raw_day = re.match(
                    r".* (?P<jour>.*)/(?P<mois>.*)[ ]*", jour).groupdict()
                true_day = f'2021-{raw_day["mois"]}-{raw_day["jour"]}'
            
            except:
                print(
                    f"{bcolors.WARNING}WARNING: Date deducted manually{bcolors.ENDC}")
                true_day = f'2021-{jour[7:9]}-{jour[4:6]}'

            list_of_cours_raw = colonne_raw.find_elements_by_class_name("fc-content")
            
            list_of_cours = (x.text for x in list_of_cours_raw)
            
            list_of_hours_raw = colonne_raw.find_elements_by_class_name(
                "fc-time")
            list_of_hours = [x.get_attribute("data-full") for x in list_of_hours_raw]
            cours_dic = {}
            for heure, cours_infos in zip(list_of_hours, list_of_cours):
                passe = True
                try:
                    cours_dic = re.match(
                        r"-* -* (?P<module>.*) - (?P<prof>.*)[ /.*]* - (?P<salle>.*) - (?P<UE>.*)", cours_infos).groupdict()
                    cours_dic['type'] = "CONF"
                    
                except:
                    try:
                        cours_dic = re.match(
                            r"(?P<module>.*) - (?P<UE>.*) -.*(FISE)*[_, ](?P<type>.*) - (?P<prof>.*) - (?P<salle>.*)[ /.]*.*[,-, ]", cours_infos).groupdict()
                    except:
                        print(f"{bcolors.WARNING}\nWARNING: Impossible to parse data\n{bcolors.ENDC}")
                        cours_dic['infos'] = cours_infos
                
                true_hours = re.match(
                    r"(?P<sth>.*) - (?P<eth>.*)", heure).groupdict()

                tmp = datetime.strptime(true_hours['sth'], "%I:%M %p")
                true_hours['sth'] = datetime.strftime(tmp, "%H:%M")

                tmp = datetime.strptime(true_hours['eth'], "%I:%M %p")
                true_hours['eth'] = datetime.strftime(tmp, "%H:%M")

                cours_dic['startdate'] = true_day + f"T{true_hours['sth']}:00"
                cours_dic['enddate'] = true_day + f"T{true_hours['eth']}:00"
                dico[jour].append(cours_dic)
                
        self.raw_EDT = dico
        print(f"{bcolors.OKGREEN}Schedule saved!{bcolors.ENDC}")
        #only used for debug
        #self.save_results(dico)

        if(not passe):
            print(
                f"{bcolors.WARNING}\nWARNING: No lessons this week!{bcolors.ENDC} ❤ {bcolors.WARNING} Exiting...\n{bcolors.ENDC}")
            if(not(self.headless)):
                self.close()
            print(f"\n{bcolors.WARNING}Program closing...{bcolors.ENDC}")
            delete_tmp_files()
            sys.exit(0)
ck_app_data['FinalAPR'] = ck_app_data['FinalAPR'].astype(float)
ck_app_data['SmallAPR'] = ck_app_data['SmallAPR'].astype(float)
ck_app_data['LargeAPR'] = ck_app_data['LargeAPR'].astype(float)
ck_app_data['SmallTerm'] = ck_app_data['SmallTerm'].astype(float)
ck_app_data['LargeTerm'] = ck_app_data['LargeTerm'].astype(float)
ck_app_data['SSNo'] = ck_app_data['SSNo'].astype(int)
ck_app_data['SSNo'] = ck_app_data['SSNo'].astype(int)

# Create app table identifier
ck_app_data['lead_app'] = 'app'

# Add current date to ck_app_data file formatted as YYYY_MM_DD
# Use pandas to adress the issue
from datetime import datetime
os.chdir('E:\\cepps\\Web_Report\\Credit_Karma\\ck_app_data')
datestring = datetime.strftime(datetime.now(), ' %Y_%m_%d')
#Fill in your path
ck_app_data.to_excel(
    excel_writer=r"E:\cepps\Web_Report\Credit_Karma\ck_app_data\{0}".format(
        'ck_app_data_' + datestring + '.xls'))

# Drop duplicates again
leads = leads.sort_values(['LEADDATE', 'LEAD_SCORE'], ascending=[False, False])
leads = leads.drop_duplicates(subset=['ssn'], keep="first")

# Sort by application date, de-dup, keep latest applications
ck_app_data = ck_app_data.sort_values(['ApplicationEnterDate'],
                                      ascending=[False])
apps = ck_app_data.drop_duplicates(["ssn"], keep='first')

# merge leads and apps
Example #34
0
def _format_date(datetime):
    return datetime.strftime(
        "%A, %b %d, %Y (week {week})".format(week=datetime.isocalendar()[1]))
Example #35
0
def get_date_list(beginDate, endDate):
    date_list = [
        datetime.strftime(x, '%Y-%m-%d')
        for x in list(pd.date_range(start=beginDate, end=endDate))
    ]
    return date_list
import pandas as pd
import plotly.graph_objs as go
import plotly.express as px
from plotly.subplots import make_subplots
import plotly.io as pio
import numpy as np
import dash_table
import sidetable as stb
import datetime
from datetime import datetime, timedelta
from datetime import date
import geopandas as gpd
import flask
import os
yesterday = datetime.now() - timedelta(1)
yea = datetime.strftime(yesterday, '%Y%m%d')

today = date.today()
d2 = today.strftime("Fecha de actualización : %d-%m-%Y")

###############################
# DATABASES
############################### Abre archivos

base = pd.read_csv(
    'https://raw.githubusercontent.com/fdealbam/CamaraDiputados/main/application/mun_p1_cvegeo.csv',
    encoding='latin-1',
    usecols=['Nom_Ent', 'nom_mun', 'cve_ent_mun1', 'cve_ent_mun2'])
contagios = pd.read_csv(
    "https://datos.covid-19.conacyt.mx/Downloads/Files/Casos_Diarios_Municipio_Confirmados_%s.csv"
    % (yea))
Example #37
0
 def convert_day(seff, str):
     flag = datetime.strptime(str, '%Y-%m-%d')
     str = datetime.strftime(flag, '%d-%m-%Y')
     return str
Example #38
0
def iso_8601_datetime_with_nanoseconds(datetime):
    return datetime.strftime("%Y-%m-%dT%H:%M:%S.%f000Z")
Example #39
0
def iso_8601_datetime_with_milliseconds(datetime):
    return datetime.strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3] + "Z"
Example #40
0
def format_datetime(date_x: str):
    return datetime.strftime(date_x, '%d.%m.%Y %H:%M:%S')
Example #41
0
import numpy as np
import datetime
from  dateutil import parser
dt = datetime.datetime(year=2020, month=6, day=25,
                       hour=11, minute=23, second=59)
print(dt)  # 2020-06-25 11:23:59
print(dt.timestamp())  # 1593055439.0
dt = datetime.datetime.fromtimestamp(1593055439.0)
print(dt)  # 2020-06-25 11:23:59
print(type(dt))  # <class 'datetime.datetime'>
dt = datetime.datetime.now()
print(dt)  # 2020-06-25 11:11:03.877853
print(type(dt))  # <class 'datetime.datetime'>


'''
datetime.strftime(fmt) 格式化 datetime 对象。

%a 本地简化星期名称(如星期一,返回 Mon)
%A 本地完整星期名称(如星期一,返回 Monday)
%b 本地简化的月份名称(如一月,返回 Jan)
%B 本地完整的月份名称(如一月,返回 January)
%c 本地相应的日期表示和时间表示
%d 月内中的一天(0-31)
%H 24小时制小时数(0-23)
%I 12小时制小时数(01-12)
%j 年内的一天(001-366)
%m 月份(01-12)
%M 分钟数(00-59)
%p 本地A.M.或P.M.的等价符
%S 秒(00-59)
Example #42
0
def _local_strftime(datetime: datetime.datetime, fmt_char: str, be,
                    ctx) -> str:

    _abbr_weekdays = [
        ctx.bot.ss('Date', '_Monday'),
        ctx.bot.ss('Date', '_Tuesday'),
        ctx.bot.ss('Date', '_Wednesday'),
        ctx.bot.ss('Date', '_Thursday'),
        ctx.bot.ss('Date', '_Friday'),
        ctx.bot.ss('Date', '_Saturday'),
        ctx.bot.ss('Date', '_Sunday')
    ]
    _full_weekdays = [
        ctx.bot.ss('Date', 'Monday'),
        ctx.bot.ss('Date', 'Tuesday'),
        ctx.bot.ss('Date', 'Wednesday'),
        ctx.bot.ss('Date', 'Thursday'),
        ctx.bot.ss('Date', 'Friday'),
        ctx.bot.ss('Date', 'Saturday'),
        ctx.bot.ss('Date', 'Sunday')
    ]

    _full_months = [
        ctx.bot.ss('Date', 'January'),
        ctx.bot.ss('Date', 'February'),
        ctx.bot.ss('Date', 'March'),
        ctx.bot.ss('Date', 'April'),
        ctx.bot.ss('Date', 'May'),
        ctx.bot.ss('Date', 'June'),
        ctx.bot.ss('Date', 'July'),
        ctx.bot.ss('Date', 'August'),
        ctx.bot.ss('Date', 'September'),
        ctx.bot.ss('Date', 'October'),
        ctx.bot.ss('Date', 'November'),
        ctx.bot.ss('Date', 'December')
    ]
    _abbr_months = [
        ctx.bot.ss('Date', '_January'),
        ctx.bot.ss('Date', '_February'),
        ctx.bot.ss('Date', '_March'),
        ctx.bot.ss('Date', '_April'),
        ctx.bot.ss('Date', '_May'),
        ctx.bot.ss('Date', '_June'),
        ctx.bot.ss('Date', '_July'),
        ctx.bot.ss('Date', '_August'),
        ctx.bot.ss('Date', '_September'),
        ctx.bot.ss('Date', '_October'),
        ctx.bot.ss('Date', '_November'),
        ctx.bot.ss('Date', '_December')
    ]

    str_ = ""
    if fmt_char == "A":
        str_ = _full_weekdays[datetime.weekday()]
    elif fmt_char == "a":
        str_ = _abbr_weekdays[datetime.weekday()]
    elif fmt_char == "B":
        str_ = _full_months[datetime.month - 1]
    elif fmt_char == "b":
        str_ = _abbr_months[datetime.month - 1]
    elif fmt_char == "C":
        str_ = str(int((datetime.year + (543 if be else 0)) / 100) + 1)
    elif fmt_char == "c":
        str_ = "{:<2} {:>2} {} {} {}".format(
            _abbr_weekdays[datetime.weekday()],
            datetime.day,
            _abbr_months[datetime.month - 1],
            datetime.strftime("%H:%M:%S"),
            datetime.year + (543 if be else 0),
        )
    elif fmt_char == "D":
        str_ = "{}/{}".format(
            datetime.strftime("%m/%d"),
            str(datetime.year + ((543 if be else 0) if be else 0))[-2:])
    elif fmt_char == "F":
        str_ = "{}-{}".format(str(datetime.year + (543 if be else 0)),
                              datetime.strftime("%m-%d"))
    elif fmt_char == "G":
        str_ = str(int(datetime.strftime("%G")) + (543 if be else 0))
    elif fmt_char == "g":
        str_ = str(int(datetime.strftime("%G")) + (543 if be else 0))[-2:]
    elif fmt_char == "v":
        str_ = "{:>2}-{}-{}".format(datetime.day,
                                    _abbr_months[datetime.month - 1],
                                    datetime.year + (543 if be else 0))
    elif fmt_char == "X":
        str_ = datetime.strftime("%H:%M:%S")
    elif fmt_char == "x":
        str_ = "{}/{}/{}".format(_padding(datetime.day),
                                 _padding(datetime.month),
                                 datetime.year + (543 if be else 0))
    elif fmt_char == "Y":
        str_ = str(datetime.year + (543 if be else 0))
    elif fmt_char == "y":
        str_ = str(datetime.year + (543 if be else 0))[2:4]
    elif fmt_char == "+":
        str_ = "{:<2} {:>2} {} {} {}".format(
            _abbr_weekdays[datetime.weekday()],
            datetime.day,
            _abbr_months[datetime.month - 1],
            datetime.year + (543 if be else 0),
            datetime.strftime("%H:%M:%S"),
        )
    else:
        str_ = datetime.strftime(f"%{fmt_char}")

    return str_
Example #43
0
def iso_8601_datetime_without_milliseconds_s3(datetime):
    return None if datetime is None else datetime.strftime("%Y-%m-%dT%H:%M:%S.000Z")
def current_fundamental_scoring(stats_table,current_values_list,table_list,constituent_list):
    #table_list = [ROCE_table,sales_table,profit_margin_table,PER_table,EPS_table,EBITDA_table]
    
    m=len(table_list)
    n=len(constituent_list)
    current_fundamental_board = pd.DataFrame()
    current_fundamental_array=np.zeros((n,m))
    #date = "{:%Y-%m-%dT%H:%M:%S}".format(datetime.datetime.now().date())
    date = datetime.strftime(datetime.now().date(),'%Y-%m-%d %H:%M:%S') 
    constituent_name_list = []
    constituent_id_list = []
    
    for j in range(m): ##loop through fundamental quantities
        #array = np.zeros(n) #this array stores scores of one particular fundamental quantity for each constituent. 
        table = table_list[j]
        top_lower = float(stats_table['Top_lower_bound'].loc[stats_table['Fundamental_quantity']==current_values_list[j]])
        good_lower = float(stats_table['Good_lower_bound'].loc[stats_table['Fundamental_quantity']==current_values_list[j]])
        fair_lower = float(stats_table['Fair_lower_bound'].loc[stats_table['Fundamental_quantity']==current_values_list[j]])
        
        for i in range(n): ##loop through constituents
            constituent = constituent_list[i]
            ##Taking care of the special German characters
            print constituent
            
            if constituent.encode('utf-8') =='M\xc3\xbcnchener R\xc3\xbcckversicherungs-Gesellschaft':
                constituent = 'Münchener Rückversicherungs-Gesellschaft'
            elif constituent.encode('utf-8') =='Deutsche B\xc3\xb6rse':
                constituent = 'Deutsche Börse'


            constituent_name_list.append(get_constituent_id_name(constituent)[1])
            constituent_id_list.append(get_constituent_id_name(constituent)[0])
            
            
            #print constituent
            if table[current_values_list[j]].loc[table['Constituent']==constituent].empty==False: 
                #print table[current_values_list[j]].loc[table['Constituent']==constituent]
                value = table[current_values_list[j]].loc[table['Constituent']==constituent]
                value = value.iloc[0]
                value = float(value)

                #print top_lower
                if value > top_lower:
                    score = 4 #top-performing
                elif value > good_lower:
                    score = 2 #well-performing
                elif value> fair_lower:
                    score = 1 #fair-performing
                else: 
                    score = 0 #poorly-performing
                current_fundamental_array[i,j]=score
            else: 
                print current_values_list[j]+'=N/A for '+constituent
                score=0
                current_fundamental_array[i,j]=score

            
        
        #current_fundamental_array stores all the info needed to calculate scores
    for i in range(n): ## loop constituents
        temp = {'Constituent':constituent_list[i],'Constituent_name':constituent_name_list[i],'Constituent_id':constituent_id_list[i],'Table':'Current_fundamental_ranking', 'Current_fundamental_total_score':sum(current_fundamental_array[i,:]),'Status':'active','Date_of_analysis':date}
        score_dict = {str(current_values_list[j]):int(current_fundamental_array[i,j]) for j in range(m)}
        score_dict.update(temp.copy())
        current_fundamental_board=current_fundamental_board.append(pd.DataFrame(score_dict,index=[0]),ignore_index=True)
        
    return current_fundamental_board
Example #45
0
rows = read[0].shape[0]
colms = read[0].shape[1]
AOI = read[5] > 0
AOI_true = np.nonzero(AOI)
AOI_a_true = AOI_true[0]
AOI_b_true = AOI_true[1]
# ASPECT=dem_dict['aspect'][AOI_a_true, AOI_b_true].reshape(rows, colms)
Slope_true = dem_dict['slope'][AOI_a_true, AOI_b_true]
Slope_true[Slope_true < 0] = 0
IC_R = IC[AOI_a_true, AOI_b_true]
# Matriks_true[a_true,b_true]= dem_dict['slope'][a_true,b_true]
slope_sample = dem_dict['slope'][a_true, b_true]
IC_sample = IC[a_true, b_true]
piksel = int(rows * colms)
today = date.today()
today_par = datetime.strftime(today, '%Y-%m-%d')

# Report CSV
with open(
        "D:/FORESTS2020/TRAINING/PyQgis/RESULT/REPORT/050918/Piksel/8/" +
        filename[0][10:25] + "_PIKSEL_REPORT.csv", 'wb') as csvfile:
    filewriter = csv.writer(csvfile,
                            delimiter=',',
                            quotechar='|',
                            quoting=csv.QUOTE_MINIMAL)
    filewriter.writerow([
        'Tanggal', 'Kondisi Awan', 'Path/Row', 'Slope1_min', 'Slope1_max',
        'Slope1_mean', 'Slope1_std', 'Slope2_min', 'Slope2_max', 'Slope2_mean',
        'Slope2_std', 'IC1_min', 'IC1_max', 'IC1_mean', 'IC1_std', 'IC2_min',
        'IC2_max', 'IC2_mean', 'IC2_std', 'Jml_piksel', 'Jml_Sampel',
        'Bheta_B1', 'Bheta_B2', 'Bheta_B3', 'Bheta_B4', 'Bheta_B5', 'Bheta_B6',
Example #46
0
def rfc_1123_datetime(datetime):
    return datetime.strftime(RFC1123)
Example #47
0
 def _transfer_time2str(datetime):
     if datetime is not None:
         return datetime.strftime("%Y-%m-%dT%H:%M:%SZ")
     else:
         return datetime
Example #48
0
 def test_init_accepts_instance_locale(self):
     datetime = jdatetime.datetime(1397, 4, 23, locale=jdatetime.FA_LOCALE)
     self.assertEqual(datetime.strftime('%A'), u'شنبه')
Example #49
0
df_json_raw = pd.read_json('./source/data.json')

# Add variables as desired e.g. 'source'
#df_json = df_json_raw.apply( lambda x: pd.Series([x[0]['title'],x[0]['description'],x[0]['publishedAt'],x[0]['source']]), axis = 1 )
#df_json = df_json_raw.apply( lambda x: pd.Series([x[0]['title'],x[0]['description']]), axis = 1 )

df_json = df_json_raw.apply(
    lambda x: pd.Series([x[2]['description'], x[2]['title']]), axis=1)
datadf = pd.DataFrame(data['articles'])
print(datadf)
# Label columns for csv file
#df_json.columns=['Title','Description','Published At','Source']
df_json.columns = ['Description', 'Title']

#export as csv, respect location and time conventions:
datestring = datetime.strftime(datetime.now(), '%Y-%m-%d-%H-%M-%S')

archive = './archive'
df_json.to_csv(os.path.join(archive, r'newsfeed.txt'))
#path = 'C:/Users/BELUSA/Documents/Projects/alt/feed'
#df_json.to_csv(os.path.join(path,r'feed_'+datestring+'.csv'))

# Show file in workspace
print(data)
print(df_json)

print('')

print('Word Frequency Count')
import common
Example #50
0
    def parse_results(self, url, au):
        url = f"{url}/results"
        t, e = self.get_html(url)
        rstb = t.xpath('.//div[@id="fs-results"]//tbody/tr')
        now = datetime.datetime.now()
        dates = []
        games = []
        if "/team/" in url:
            team = "".join(t.xpath('.//div[@class="team-name"]/text()'))
            e.title = f"≡ Results for {team}"
            for i in rstb:
                d = "".join(i.xpath('.//td[contains(@class,"time")]//text()'))
                # Skip header rows.
                if not d:
                    continue

                # Get match date
                try:
                    d = datetime.datetime.strptime(d, "%d.%m. %H:%M")
                    d = d.replace(year=now.year)
                    d = datetime.datetime.strftime(d, "%a %d %b: %H:%M")
                except ValueError:
                    # Fix older than a year games.
                    d = datetime.datetime.strptime(d, "%d.%m.%Y")
                    d = datetime.datetime.strftime(d, "%d/%m/%Y")

                # Score
                sc = i.xpath('.//td[contains(@class,"score")]/text()')
                sc = "".join(sc).replace('\xa0', '').split(':')
                h = sc[0]
                a = sc[1]
                sc = "-".join(sc)
                # Assume we're playing at home.
                op = "".join(
                    i.xpath('.//span[@class="padr"]/text()'))  # PADR IS HOME.
                wh = "A" if team in op else "H"
                w = "L" if h > a else "D" if h == a else "W"

                if team in op:
                    # if we're actually the away team.
                    op = "".join(i.xpath('.//span[@class="padl"]/text()'))
                    w = "W" if h > a else "D" if h == a else "L"
                dates.append(f"`{wh}: {d}`")
                games.append(f"`{w}: {sc} v {op}`")
        else:
            comp = "".join(t.xpath('.//div[@class="tournament-name"]/text()'))
            e.title = f"≡ Fixtures for {comp}"
            for i in rstb:
                d = "".join(i.xpath('.//td[contains(@class,"time")]//text()'))
                # Skip header rows.
                if not d:
                    continue
                d = datetime.strptime(d, "%d.%m. %H:%M")
                d = d.replace(year=now.year)
                d = datetime.strftime(d, "%a %d %b: %H:%M")
                dates.append(f"`{d}`")
                sc = i.xpath('.//td[contains(@class,"score")]/text()')
                sc = "".join(sc).replace('\xa0', '').split(':')
                hos = sc[0]
                aws = sc[1]

                h = "".join(i.xpath('.//span[@class="padr"]/text()'))
                a = "".join(i.xpath('.//span[@class="padl"]/text()'))
                sc = f"`{hos}-{aws}`"
                games.append(f"{h} {sc} {a}")
        if not games:
            return  # Rip
        z = list(zip(dates, games))
        embeds = self.build_embeds(au, e, z, "Result")
        return embeds
Example #51
0
        if h == 0:
            stname = x['name']
            fl = 0
            for i in stname:
                if ord(i) < 0 or ord(i) > 128:
                    fl = 1
            if fl == 0:
                posix_timestamp_1 = x['startTimeSeconds']
                unix_timestamp = float(posix_timestamp_1)
                local_timezone = tzlocal.get_localzone()  # get pytz timezone
                local_time = datetime.fromtimestamp(unix_timestamp,
                                                    local_timezone)
                dt = datetime.fromtimestamp(posix_timestamp_1, local_timezone)
                gog = local_time.strftime('%z')
                gog = gog[:3] + ":" + gog[3:]
                en = dt.strftime('%Y-%m-%d' + "T" + '%H:%M:%S' + str(gog))

                posix_timestamp_2 = x['startTimeSeconds'] + x['durationSeconds']
                unix_timestamp2 = float(posix_timestamp_2)
                local_timezone2 = tzlocal.get_localzone()  # get pytz timezone
                local_time2 = datetime.fromtimestamp(unix_timestamp2,
                                                     local_timezone2)
                dt2 = datetime.fromtimestamp(posix_timestamp_2,
                                             local_timezone2)
                gog2 = local_time2.strftime('%z')
                gog2 = gog2[:3] + ":" + gog2[3:]
                en2 = dt2.strftime('%Y-%m-%d' + "T" + '%H:%M:%S' + str(gog2))

                EVENT = {
                    'summary': x['name'],
                    'location': 'Codeforces',
Example #52
0
def convertDate(item):
    theDate = item[-1]
    dateObj = datetime.strptime(theDate,'%Y-%m-%d')
    dateStr = datetime.strftime(dateObj,'%m/%d/%Y')
    item[-1] = dateStr
    return item
Example #53
0
        strftime = time.strftime('%X')

        if strfmin30 == strftime:
            count += 1

    counts = counts + [count]
    strmin30s = strmin30s + [strfmin30]
    # counts = counts + {min30, count}

mydict = {"min30s": min30s, "apply": counts}
my_df = pd.DataFrame.from_dict(mydict)

print("=================")
print((mydict))

print("=================")
print((my_df))

nDF = pd.DataFrame(my_df)
# nDF.plot.bar()

plt.plot(min30s, counts, label="apply", color='r')
dt_labels = [dt.strftime('%H:%M') for dt in nDF['min30s']]
print((dt_labels))
plt.xticks(min30s[::1], dt_labels[::1], rotation=90, size='small')
# nDF.plot.show()
# plt.bar(min30s, counts)
plt.show()
# print(dfdates)

# ndf = pd.DataFrame({'counts', counts})
Example #54
0
os.path.basename(files[0])
file_name = os.path.basename(files[0])
file_name = file_name.replace(".pdf", "")
file_name
file_name = os.path.basename(files[0])
file_name
file_name = file_name.replace("_speech.pdf", "")
file_name
file_name[0]
file_name[1:]
order = file_name[0]
import datetime as dt
date = file_namne[1:]
date = file_name[1:]
date
dt.strftime(date, '%Y-%m-%d')
strftime
dt
dt.strftime()
date
date.strft('%Y-%m-%d')
dt.Parse(date)
dt.strptime()
dt.datetime.strptime()
dt.datetime.strptime(date, "%Y-%m-%d")
dt.datetime.strptime(date, "%y%m%d")
date = dt.datetime.strptime(date, "%y%m%d")
date
date = dt.datetime.strftime(date, "%Y-%m-%d")
date
text
Example #55
0
 def datetime_to_str(self, datetime):
     try:
         str = datetime.strftime('%Y-%m-%d %H:%M:%S')
     except:
         str = ""
     return str
Example #56
0
 def _datetime_in_ES5_compatible_iso_format(datetime):
     return datetime.strftime('%Y-%m-%dT%H:%M:%S.%f')
nd = {}
nd = ENVdicts.ENVdicts(localtag)
for var in nd.keys():
    ##    print var, nd[var]
    locals()[var] = nd[var]
##################
global recentlimit, time_format, timedate_format, nextorderID
from time import sleep, strftime, localtime
import rpu_rp, rpInd, ibutiles, TicksUtile, RP_Snapshot, glob, csv, subprocess, datetime, shutil, time
from datetime import datetime
import ctypes
global date
date = yesterday  # today  ######## <<<<<<<

style = ''
now = datetime.strftime(datetime.now(), spaceYtime_format)
current_time = datetime.now().time()
#######################
btmode = 'BACKTEST'
btestlimit = 1000
#########################
######################
cpfname = EXE + 'signalcontroller.txt'
symbol_list = ['ES']  #symbol_list2
print symbol_list
prevsigid = ''


##########################################
def Mbox(title, text, style):
    ctypes.windll.user32.MessageBoxA(0, text, title, style)
Example #58
0
 def __init__(self, datetime, desc: str):
     self.dt = datetime
     self.str = datetime.strftime("%Hh%M %d/%m/%Y")
     self.desc = desc
Example #59
0
def parse_xml_to_flat_dict(request, datetime, lat, lon, a_dt, dist):
    response_dict = xmltodict.parse(request.content)

    parsed_result = {}
    parsed_list = [
        'location_lat', 'location_long', 'temperature', 'dew_point',
        'wind_speed', 'wind_direction', 'cloud_cover_amount', 'snow_amount',
        'humidity'
    ]

    for para in parsed_list:
        parsed_result.setdefault(para, 0)

    parsed_result['day'] = datetime.weekday()
    parsed_result['day_of_month'] = datetime.day
    parsed_result['dep_time'] = datetime.strftime('%H%M')
    parsed_result['arr_time'] = a_dt.strftime('%H%M')
    parsed_result['elapsed_time'] = 60
    parsed_result['distance'] = dist
    parsed_result['location_lat'] = lat
    parsed_result['location_long'] = lon

    try:
        response_data = response_dict['dwml']['data']
        response_params = response_data['parameters']
    except KeyError:
        print 'Exception {}'.format(parsed_result)
        return parsed_result

    def check_if_value_exists(dict):
        return 'value' in dict

    try:
        if 'temperature' in response_params:
            t = response_params['temperature']
            if t[0] and check_if_value_exists(t[0]):
                parsed_result['temperature'] = response_params['temperature'][
                    0]['value']
            if t[1] and check_if_value_exists(t[1]):
                parsed_result['dew_point'] = response_params['temperature'][1][
                    'value']
    except:
        pass

    if 'wind-speed' in response_params and check_if_value_exists(
            response_params['wind-speed']):
        parsed_result['wind_speed'] = response_params['wind-speed']['value']

    if 'direction' in response_params and check_if_value_exists(
            response_params['direction']):
        parsed_result['wind_direction'] = response_params['direction']['value']

    if 'cloud-amount' in response_params and check_if_value_exists(
            response_params['cloud-amount']):
        parsed_result['cloud_cover_amount'] = response_params['cloud-amount'][
            'value']

    if 'precipitation' in response_params and check_if_value_exists(
            response_params['precipitation']):
        parsed_result['snow_amount'] = response_params['precipitation'][
            'value']

    # if 'convective-hazard' in response_params:
    #     ch = response_params['convective-hazard']
    #     if ch[0] and ch[0]['severe-component'] and check_if_value_exists(ch[0]['severe-component']):
    #         parsed_result['probability_severe_thunderstorm'] = response_params['convective-hazard'][0]['severe-component']['value']
    #     if ch[1] and ch[1]['severe-component'] and check_if_value_exists(ch[1]['severe-component']):
    #         parsed_result['probability_extreme_severe_thunderstorm'] = response_params['convective-hazard'][1]['severe-component'][
    #             'value']

    if 'humidity' in response_params and check_if_value_exists(
            response_params['humidity']):
        parsed_result['humidity'] = response_params['humidity']['value']

    print parsed_result
    return parsed_result
Example #60
0
 def convert_time_stamp(datetime):
     return datetime.strftime(DATE_TIME_FORMAT)